From a9a23b93bfed9331bcedfbe00f9fc448d1db26e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Apr 2024 10:03:59 +0200 Subject: [PATCH 01/29] Bump sqlparse from 0.4.4 to 0.5.0 (#558) Bumps [sqlparse](https://github.com/andialbrecht/sqlparse) from 0.4.4 to 0.5.0. - [Changelog](https://github.com/andialbrecht/sqlparse/blob/master/CHANGELOG) - [Commits](https://github.com/andialbrecht/sqlparse/compare/0.4.4...0.5.0) --- updated-dependencies: - dependency-name: sqlparse dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/poetry.lock b/poetry.lock index 188a4728a..e0971e36c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3408,19 +3408,18 @@ sqlcipher = ["sqlcipher3_binary"] [[package]] name = "sqlparse" -version = "0.4.4" +version = "0.5.0" description = "A non-validating SQL parser." optional = false -python-versions = ">=3.5" +python-versions = ">=3.8" files = [ - {file = "sqlparse-0.4.4-py3-none-any.whl", hash = "sha256:5430a4fe2ac7d0f93e66f1efc6e1338a41884b7ddf2a350cedd20ccc4d9d28f3"}, - {file = "sqlparse-0.4.4.tar.gz", hash = "sha256:d446183e84b8349fa3061f0fe7f06ca94ba65b426946ffebe6e3e8295332420c"}, + {file = "sqlparse-0.5.0-py3-none-any.whl", hash = "sha256:c204494cd97479d0e39f28c93d46c0b2d5959c7b9ab904762ea6c7af211c8663"}, + {file = "sqlparse-0.5.0.tar.gz", hash = "sha256:714d0a4932c059d16189f58ef5411ec2287a4360f17cdd0edd2d09d4c5087c93"}, ] [package.extras] -dev = ["build", "flake8"] +dev = ["build", "hatch"] doc = ["sphinx"] -test = ["pytest", "pytest-cov"] [[package]] name = "sympy" From 36eb46f371616b35921aebfc6362726c586b0754 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 4 May 2024 09:50:19 +0200 Subject: [PATCH 02/29] Bump tqdm from 4.66.1 to 4.66.3 (#569) Bumps [tqdm](https://github.com/tqdm/tqdm) from 4.66.1 to 4.66.3. - [Release notes](https://github.com/tqdm/tqdm/releases) - [Commits](https://github.com/tqdm/tqdm/compare/v4.66.1...v4.66.3) --- updated-dependencies: - dependency-name: tqdm dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index e0971e36c..64d65d07e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3667,13 +3667,13 @@ scipy = ["scipy"] [[package]] name = "tqdm" -version = "4.66.1" +version = "4.66.3" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"}, - {file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"}, + {file = "tqdm-4.66.3-py3-none-any.whl", hash = "sha256:4f41d54107ff9a223dca80b53efe4fb654c67efaba7f47bada3ee9d50e05bd53"}, + {file = "tqdm-4.66.3.tar.gz", hash = "sha256:23097a41eba115ba99ecae40d06444c15d1c0c698d527a01c6c8bd1c5d0647e5"}, ] [package.dependencies] From fa7f0f1f21578fc56410b593782f3aacae182a48 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 May 2024 08:51:38 +0200 Subject: [PATCH 03/29] Bump werkzeug from 3.0.1 to 3.0.3 (#570) Bumps [werkzeug](https://github.com/pallets/werkzeug) from 3.0.1 to 3.0.3. - [Release notes](https://github.com/pallets/werkzeug/releases) - [Changelog](https://github.com/pallets/werkzeug/blob/main/CHANGES.rst) - [Commits](https://github.com/pallets/werkzeug/compare/3.0.1...3.0.3) --- updated-dependencies: - dependency-name: werkzeug dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 64d65d07e..80ed6f4c9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3889,13 +3889,13 @@ watchmedo = ["PyYAML (>=3.10)"] [[package]] name = "werkzeug" -version = "3.0.1" +version = "3.0.3" description = "The comprehensive WSGI web application library." optional = false python-versions = ">=3.8" files = [ - {file = "werkzeug-3.0.1-py3-none-any.whl", hash = "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10"}, - {file = "werkzeug-3.0.1.tar.gz", hash = "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc"}, + {file = "werkzeug-3.0.3-py3-none-any.whl", hash = "sha256:fc9645dc43e03e4d630d23143a04a7f947a9a3b5727cd535fdfe155a17cc48c8"}, + {file = "werkzeug-3.0.3.tar.gz", hash = "sha256:097e5bfda9f0aba8da6b8545146def481d06aa7d3266e7448e2cccf67dd8bd18"}, ] [package.dependencies] From a05fcd5963fd5135e799b408ff7094ae8d95eac5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 May 2024 08:52:55 +0200 Subject: [PATCH 04/29] Bump jinja2 from 3.1.3 to 3.1.4 (#571) Bumps [jinja2](https://github.com/pallets/jinja) from 3.1.3 to 3.1.4. - [Release notes](https://github.com/pallets/jinja/releases) - [Changelog](https://github.com/pallets/jinja/blob/main/CHANGES.rst) - [Commits](https://github.com/pallets/jinja/compare/3.1.3...3.1.4) --- updated-dependencies: - dependency-name: jinja2 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 80ed6f4c9..04501eadb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1095,13 +1095,13 @@ files = [ [[package]] name = "jinja2" -version = "3.1.3" +version = "3.1.4" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" files = [ - {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"}, - {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"}, + {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, + {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, ] [package.dependencies] From b2fc3e6d975a47a9d2897aad4cf8cf8bcb8244bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 May 2024 09:14:52 +0200 Subject: [PATCH 05/29] Bump mlflow from 2.10.1 to 2.12.1 (#575) Bumps [mlflow](https://github.com/mlflow/mlflow) from 2.10.1 to 2.12.1. - [Release notes](https://github.com/mlflow/mlflow/releases) - [Changelog](https://github.com/mlflow/mlflow/blob/master/CHANGELOG.md) - [Commits](https://github.com/mlflow/mlflow/compare/v2.10.1...v2.12.1) --- updated-dependencies: - dependency-name: mlflow dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 138 +++++++++++++++++++++++++++------------------------- 1 file changed, 73 insertions(+), 65 deletions(-) diff --git a/poetry.lock b/poetry.lock index 04501eadb..4b48efdc8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -32,6 +32,20 @@ typing-extensions = ">=4" [package.extras] tz = ["backports.zoneinfo"] +[[package]] +name = "aniso8601" +version = "9.0.1" +description = "A library for parsing ISO 8601 strings." +optional = false +python-versions = "*" +files = [ + {file = "aniso8601-9.0.1-py2.py3-none-any.whl", hash = "sha256:1d2b7ef82963909e93c4f24ce48d4de9e66009a21bf1c1e1c85bdd0812fe412f"}, + {file = "aniso8601-9.0.1.tar.gz", hash = "sha256:72e3117667eedf66951bb2d93f4296a56b94b078a8a95905a052611fb3f1b973"}, +] + +[package.extras] +dev = ["black", "coverage", "isort", "pre-commit", "pyenchant", "pylint"] + [[package]] name = "appdirs" version = "1.4.4" @@ -456,26 +470,6 @@ files = [ docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] tests = ["pytest", "pytest-cov", "pytest-xdist"] -[[package]] -name = "databricks-cli" -version = "0.18.0" -description = "A command line interface for Databricks" -optional = false -python-versions = ">=3.7" -files = [ - {file = "databricks-cli-0.18.0.tar.gz", hash = "sha256:87569709eda9af3e9db8047b691e420b5e980c62ef01675575c0d2b9b4211eb7"}, - {file = "databricks_cli-0.18.0-py2.py3-none-any.whl", hash = "sha256:1176a5f42d3e8af4abfc915446fb23abc44513e325c436725f5898cbb9e3384b"}, -] - -[package.dependencies] -click = ">=7.0" -oauthlib = ">=3.1.0" -pyjwt = ">=1.7.0" -requests = ">=2.17.3" -six = ">=1.10.0" -tabulate = ">=0.7.7" -urllib3 = ">=1.26.7,<3" - [[package]] name = "deprecated" version = "1.2.14" @@ -806,6 +800,51 @@ requests-oauthlib = ">=0.7.0" [package.extras] tool = ["click (>=6.0.0)"] +[[package]] +name = "graphene" +version = "3.3" +description = "GraphQL Framework for Python" +optional = false +python-versions = "*" +files = [ + {file = "graphene-3.3-py2.py3-none-any.whl", hash = "sha256:bb3810be33b54cb3e6969506671eb72319e8d7ba0d5ca9c8066472f75bf35a38"}, + {file = "graphene-3.3.tar.gz", hash = "sha256:529bf40c2a698954217d3713c6041d69d3f719ad0080857d7ee31327112446b0"}, +] + +[package.dependencies] +aniso8601 = ">=8,<10" +graphql-core = ">=3.1,<3.3" +graphql-relay = ">=3.1,<3.3" + +[package.extras] +dev = ["black (==22.3.0)", "coveralls (>=3.3,<4)", "flake8 (>=4,<5)", "iso8601 (>=1,<2)", "mock (>=4,<5)", "pytest (>=6,<7)", "pytest-asyncio (>=0.16,<2)", "pytest-benchmark (>=3.4,<4)", "pytest-cov (>=3,<4)", "pytest-mock (>=3,<4)", "pytz (==2022.1)", "snapshottest (>=0.6,<1)"] +test = ["coveralls (>=3.3,<4)", "iso8601 (>=1,<2)", "mock (>=4,<5)", "pytest (>=6,<7)", "pytest-asyncio (>=0.16,<2)", "pytest-benchmark (>=3.4,<4)", "pytest-cov (>=3,<4)", "pytest-mock (>=3,<4)", "pytz (==2022.1)", "snapshottest (>=0.6,<1)"] + +[[package]] +name = "graphql-core" +version = "3.2.3" +description = "GraphQL implementation for Python, a port of GraphQL.js, the JavaScript reference implementation for GraphQL." +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "graphql-core-3.2.3.tar.gz", hash = "sha256:06d2aad0ac723e35b1cb47885d3e5c45e956a53bc1b209a9fc5369007fe46676"}, + {file = "graphql_core-3.2.3-py3-none-any.whl", hash = "sha256:5766780452bd5ec8ba133f8bf287dc92713e3868ddd83aee4faab9fc3e303dc3"}, +] + +[[package]] +name = "graphql-relay" +version = "3.2.0" +description = "Relay library for graphql-core" +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "graphql-relay-3.2.0.tar.gz", hash = "sha256:1ff1c51298356e481a0be009ccdff249832ce53f30559c1338f22a0e0d17250c"}, + {file = "graphql_relay-3.2.0-py3-none-any.whl", hash = "sha256:c9b22bd28b170ba1fe674c74384a8ff30a76c8e26f88ac3aa1584dd3179953e5"}, +] + +[package.dependencies] +graphql-core = ">=3.2,<3.3" + [[package]] name = "greenlet" version = "3.0.3" @@ -1635,25 +1674,25 @@ files = [ [[package]] name = "mlflow" -version = "2.10.1" -description = "MLflow: A Platform for ML Development and Productionization" +version = "2.12.2" +description = "MLflow is an open source platform for the complete machine learning lifecycle" optional = false python-versions = ">=3.8" files = [ - {file = "mlflow-2.10.1-py3-none-any.whl", hash = "sha256:3dddb8a011ab3671d0c6da806549fdc84d39eb853b1bc29e8b3df50115ba5b6c"}, - {file = "mlflow-2.10.1.tar.gz", hash = "sha256:d534e658a979517f56478fc7f0b1a19451700078a725242e789fe63c87d46815"}, + {file = "mlflow-2.12.2-py3-none-any.whl", hash = "sha256:38dd04710fe64ee8229b7233b4d91db32c3ff887934c40d926246a566c886c0b"}, + {file = "mlflow-2.12.2.tar.gz", hash = "sha256:d712f1af9d44f1eb9e1baee8ca64f7311e185b7572fc3c1e0a83a4c8ceff6aad"}, ] [package.dependencies] alembic = "<1.10.0 || >1.10.0,<2" click = ">=7.0,<9" cloudpickle = "<4" -databricks-cli = ">=0.8.7,<1" docker = ">=4.0.0,<8" entrypoints = "<1" Flask = "<4" -gitpython = ">=2.1.0,<4" -gunicorn = {version = "<22", markers = "platform_system != \"Windows\""} +gitpython = ">=3.1.9,<4" +graphene = "<4" +gunicorn = {version = "<23", markers = "platform_system != \"Windows\""} importlib-metadata = ">=3.7.0,<4.7.0 || >4.7.0,<8" Jinja2 = [ {version = ">=2.11,<4", markers = "platform_system != \"Windows\""}, @@ -1662,11 +1701,11 @@ Jinja2 = [ markdown = ">=3.3,<4" matplotlib = "<4" numpy = "<2" -packaging = "<24" +packaging = "<25" pandas = "<3" protobuf = ">=3.12.0,<5" pyarrow = ">=4.0.0,<16" -pytz = "<2024" +pytz = "<2025" pyyaml = ">=5.1,<7" querystring-parser = "<2" requests = ">=2.17.3,<3" @@ -1674,14 +1713,14 @@ scikit-learn = "<2" scipy = "<2" sqlalchemy = ">=1.4.0,<3" sqlparse = ">=0.4.0,<1" -waitress = {version = "<3", markers = "platform_system == \"Windows\""} +waitress = {version = "<4", markers = "platform_system == \"Windows\""} [package.extras] aliyun-oss = ["aliyunstoreplugin"] -databricks = ["azure-storage-file-datalake (>12)", "boto3 (>1)", "botocore (>1.34)", "google-cloud-storage (>=1.30.0)"] -extras = ["azureml-core (>=1.2.0)", "boto3", "botocore", "google-cloud-storage (>=1.30.0)", "kubernetes", "mlserver (>=1.2.0,!=1.3.1)", "mlserver-mlflow (>=1.2.0,!=1.3.1)", "prometheus-flask-exporter", "pyarrow", "pysftp", "requests-auth-aws-sigv4", "virtualenv"] -gateway = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] -genai = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] +databricks = ["azure-storage-file-datalake (>12)", "boto3 (>1)", "botocore", "google-cloud-storage (>=1.30.0)"] +extras = ["azureml-core (>=1.2.0)", "boto3", "botocore", "google-cloud-storage (>=1.30.0)", "kubernetes", "mlserver (>=1.2.0,!=1.3.1,<1.4.0)", "mlserver-mlflow (>=1.2.0,!=1.3.1,<1.4.0)", "prometheus-flask-exporter", "pyarrow", "pysftp", "requests-auth-aws-sigv4", "virtualenv"] +gateway = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] +genai = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] sqlserver = ["mlflow-dbstore"] xethub = ["mlflow-xethub"] @@ -2373,23 +2412,6 @@ files = [ plugins = ["importlib-metadata"] windows-terminal = ["colorama (>=0.4.6)"] -[[package]] -name = "pyjwt" -version = "2.8.0" -description = "JSON Web Token implementation in Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "PyJWT-2.8.0-py3-none-any.whl", hash = "sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320"}, - {file = "PyJWT-2.8.0.tar.gz", hash = "sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de"}, -] - -[package.extras] -crypto = ["cryptography (>=3.4.0)"] -dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] -docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] -tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] - [[package]] name = "pymdown-extensions" version = "10.7" @@ -3435,20 +3457,6 @@ files = [ [package.dependencies] mpmath = ">=0.19" -[[package]] -name = "tabulate" -version = "0.9.0" -description = "Pretty-print tabular data" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, - {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, -] - -[package.extras] -widechars = ["wcwidth"] - [[package]] name = "tensorboard" version = "2.14.0" From 495d5b9454becba63fb4ba23b1852cc05f210307 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 May 2024 09:35:45 +0200 Subject: [PATCH 06/29] Bump gunicorn from 21.2.0 to 22.0.0 (#576) Bumps [gunicorn](https://github.com/benoitc/gunicorn) from 21.2.0 to 22.0.0. - [Release notes](https://github.com/benoitc/gunicorn/releases) - [Commits](https://github.com/benoitc/gunicorn/compare/21.2.0...22.0.0) --- updated-dependencies: - dependency-name: gunicorn dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index 4b48efdc8..2f2b3b9b9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -984,22 +984,23 @@ protobuf = ["grpcio-tools (>=1.60.1)"] [[package]] name = "gunicorn" -version = "21.2.0" +version = "22.0.0" description = "WSGI HTTP Server for UNIX" optional = false -python-versions = ">=3.5" +python-versions = ">=3.7" files = [ - {file = "gunicorn-21.2.0-py3-none-any.whl", hash = "sha256:3213aa5e8c24949e792bcacfc176fef362e7aac80b76c56f6b5122bf350722f0"}, - {file = "gunicorn-21.2.0.tar.gz", hash = "sha256:88ec8bff1d634f98e61b9f65bc4bf3cd918a90806c6f5c48bc5603849ec81033"}, + {file = "gunicorn-22.0.0-py3-none-any.whl", hash = "sha256:350679f91b24062c86e386e198a15438d53a7a8207235a78ba1b53df4c4378d9"}, + {file = "gunicorn-22.0.0.tar.gz", hash = "sha256:4a0b436239ff76fb33f11c07a16482c521a7e09c1ce3cc293c2330afe01bec63"}, ] [package.dependencies] packaging = "*" [package.extras] -eventlet = ["eventlet (>=0.24.1)"] +eventlet = ["eventlet (>=0.24.1,!=0.36.0)"] gevent = ["gevent (>=1.4.0)"] setproctitle = ["setproctitle"] +testing = ["coverage", "eventlet", "gevent", "pytest", "pytest-cov"] tornado = ["tornado (>=0.2)"] [[package]] From bdd102a6e42197a5a416625225798f47bf8314b0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 May 2024 09:14:39 +0200 Subject: [PATCH 07/29] Bump requests from 2.31.0 to 2.32.0 (#578) updated-dependencies: - dependency-name: requests dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 2f2b3b9b9..7ff18c8fc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2909,13 +2909,13 @@ files = [ [[package]] name = "requests" -version = "2.31.0" +version = "2.32.0" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.0-py3-none-any.whl", hash = "sha256:f2c3881dddb70d056c5bd7600a4fae312b2a300e39be6a118d30b90bd27262b5"}, + {file = "requests-2.32.0.tar.gz", hash = "sha256:fa5490319474c82ef1d2c9bc459d3652e3ae4ef4c4ebdd18a21145a47ca4b6b8"}, ] [package.dependencies] From beccd4cddccee82ea305537489323e1df8237d82 Mon Sep 17 00:00:00 2001 From: Gensollen Date: Wed, 22 May 2024 15:35:13 +0200 Subject: [PATCH 08/29] [CI] Run tests through GitHub Actions (#573) * try a simple workflow first * try running on new ubuntu VM * fixes * bump poetry version to 1.8.3 * try removing caching.. * add workflow for testing tsv tools --- .github/workflows/test_cli.yml | 46 +++++++++++++++++++++++++++ .github/workflows/test_tsvtools.yml | 48 +++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) create mode 100644 .github/workflows/test_cli.yml create mode 100644 .github/workflows/test_tsvtools.yml diff --git a/.github/workflows/test_cli.yml b/.github/workflows/test_cli.yml new file mode 100644 index 000000000..d8309b2e1 --- /dev/null +++ b/.github/workflows/test_cli.yml @@ -0,0 +1,46 @@ +name: CLI Tests + +on: + push: + branches: ["dev"] + pull_request: + branches: ["dev"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-cli: + runs-on: + - self-hosted + - Linux + - ubuntu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run CLI tests + run: | + make env.conda + source /builds/miniconda3/etc/profile.d/conda.sh + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_cli_report.xml \ + --disable-warnings \ + --verbose \ + test_cli.py diff --git a/.github/workflows/test_tsvtools.yml b/.github/workflows/test_tsvtools.yml new file mode 100644 index 000000000..bddbb80d2 --- /dev/null +++ b/.github/workflows/test_tsvtools.yml @@ -0,0 +1,48 @@ +name: TSV Tools Tests + +on: + push: + branches: ["dev"] + pull_request: + branches: ["dev"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-tsvtools: + runs-on: + - self-hosted + - Linux + - ubuntu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run tests for TSV tools + run: | + make env.conda + source /builds/miniconda3/etc/profile.d/conda.sh + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_tsvtools_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/tmp \ + --input_data_directory=/mnt/data/data_ci \ + test_tsvtools.py From 2861e9d8da889f7546be9776e1a496bb8cd83e61 Mon Sep 17 00:00:00 2001 From: Gensollen Date: Thu, 23 May 2024 14:22:24 +0200 Subject: [PATCH 09/29] [CI] Skip tests when PR is in draft mode (#592) * try skipping test_tsvtools when PR is in draft mode * trigger CI * add a cpu tag to avoid running cpu tests on gpu machines * run also on refactoring branch --- .github/workflows/test_cli.yml | 6 ++++-- .github/workflows/test_tsvtools.yml | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test_cli.yml b/.github/workflows/test_cli.yml index d8309b2e1..750f1cd00 100644 --- a/.github/workflows/test_cli.yml +++ b/.github/workflows/test_cli.yml @@ -2,9 +2,9 @@ name: CLI Tests on: push: - branches: ["dev"] + branches: ["dev", "refactoring"] pull_request: - branches: ["dev"] + branches: ["dev", "refactoring"] permissions: contents: read @@ -19,10 +19,12 @@ env: jobs: test-cli: + if: github.event.pull_request.draft == false runs-on: - self-hosted - Linux - ubuntu + - cpu steps: - uses: actions/checkout@v4 - uses: snok/install-poetry@v1 diff --git a/.github/workflows/test_tsvtools.yml b/.github/workflows/test_tsvtools.yml index bddbb80d2..5a8c7896a 100644 --- a/.github/workflows/test_tsvtools.yml +++ b/.github/workflows/test_tsvtools.yml @@ -2,9 +2,9 @@ name: TSV Tools Tests on: push: - branches: ["dev"] + branches: ["dev", "refactoring"] pull_request: - branches: ["dev"] + branches: ["dev", "refactoring"] permissions: contents: read @@ -19,10 +19,12 @@ env: jobs: test-tsvtools: + if: github.event.pull_request.draft == false runs-on: - self-hosted - Linux - ubuntu + - cpu steps: - uses: actions/checkout@v4 - uses: snok/install-poetry@v1 From f5de25105e2db3e87619b7782eb6873a0066c3c6 Mon Sep 17 00:00:00 2001 From: Gensollen Date: Thu, 23 May 2024 14:28:57 +0200 Subject: [PATCH 10/29] [CI] Test train workflow on GPU machine (#590) * add test workflow on GPU for train * fix conda path * fix conflicting workdir * only run on non-draft PRs * run also on refactoring branch --- .github/workflows/test_train.yml | 53 ++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 .github/workflows/test_train.yml diff --git a/.github/workflows/test_train.yml b/.github/workflows/test_train.yml new file mode 100644 index 000000000..a65a92a56 --- /dev/null +++ b/.github/workflows/test_train.yml @@ -0,0 +1,53 @@ +name: Train Tests (GPU) + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-train-gpu: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - gpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run tests for Train on GPU + run: | + make env.conda + source "${HOME}/miniconda3/etc/profile.d/conda.sh" + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_train_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/actions_runner_workdir/train \ + --input_data_directory=/mnt/data/clinicadl_data_ci/data_ci \ + -k test_train + - name: Cleaning + run: | + rm -rf $HOME/actions_runner_workdir/train/* From 69b3538d5397c94e0c3b7e306648ca1dd0720b7a Mon Sep 17 00:00:00 2001 From: Gensollen Date: Thu, 23 May 2024 15:51:54 +0200 Subject: [PATCH 11/29] [CI] Port remaining GPU tests to GitHub Actions (#593) * add workflow for testing interpretation task * add workflow for testing random search task * add workflow for testing resume task * add workflow for testing transfer learning task * trigger CI * trigger CI --- .github/workflows/test_interpret.yml | 53 ++++++++++++++++++++ .github/workflows/test_random_search.yml | 53 ++++++++++++++++++++ .github/workflows/test_resume.yml | 53 ++++++++++++++++++++ .github/workflows/test_transfer_learning.yml | 53 ++++++++++++++++++++ 4 files changed, 212 insertions(+) create mode 100644 .github/workflows/test_interpret.yml create mode 100644 .github/workflows/test_random_search.yml create mode 100644 .github/workflows/test_resume.yml create mode 100644 .github/workflows/test_transfer_learning.yml diff --git a/.github/workflows/test_interpret.yml b/.github/workflows/test_interpret.yml new file mode 100644 index 000000000..0163bf583 --- /dev/null +++ b/.github/workflows/test_interpret.yml @@ -0,0 +1,53 @@ +name: Interpretation Tests (GPU) + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-interpret-gpu: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - gpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run tests for Interpret task on GPU + run: | + make env.conda + source "${HOME}/miniconda3/etc/profile.d/conda.sh" + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_interpret_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/actions_runner_workdir/interpret \ + --input_data_directory=/mnt/data/clinicadl_data_ci/data_ci \ + test_interpret.py + - name: Cleaning + run: | + rm -rf $HOME/actions_runner_workdir/interpret/* diff --git a/.github/workflows/test_random_search.yml b/.github/workflows/test_random_search.yml new file mode 100644 index 000000000..529f1fda1 --- /dev/null +++ b/.github/workflows/test_random_search.yml @@ -0,0 +1,53 @@ +name: Random Search Tests (GPU) + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-random-search-gpu: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - gpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run Random Search tests on GPU + run: | + make env.conda + source "${HOME}/miniconda3/etc/profile.d/conda.sh" + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_random_search_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/actions_runner_workdir/random_search \ + --input_data_directory=/mnt/data/clinicadl_data_ci/data_ci \ + test_random_search.py + - name: Cleaning + run: | + rm -rf $HOME/actions_runner_workdir/random_search/* diff --git a/.github/workflows/test_resume.yml b/.github/workflows/test_resume.yml new file mode 100644 index 000000000..b789a21f6 --- /dev/null +++ b/.github/workflows/test_resume.yml @@ -0,0 +1,53 @@ +name: Resume Tests (GPU) + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-resume-gpu: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - gpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run resume tests on GPU + run: | + make env.conda + source "${HOME}/miniconda3/etc/profile.d/conda.sh" + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_resume_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/actions_runner_workdir/resume \ + --input_data_directory=/mnt/data/clinicadl_data_ci/data_ci \ + test_resume.py + - name: Cleaning + run: | + rm -rf $HOME/actions_runner_workdir/resume/* diff --git a/.github/workflows/test_transfer_learning.yml b/.github/workflows/test_transfer_learning.yml new file mode 100644 index 000000000..61238d4e1 --- /dev/null +++ b/.github/workflows/test_transfer_learning.yml @@ -0,0 +1,53 @@ +name: Transfer Learning Tests (GPU) + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-transfer-learning-gpu: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - gpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run tests for Transfer Learning on GPU + run: | + make env.conda + source "${HOME}/miniconda3/etc/profile.d/conda.sh" + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_transfer_learning_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/actions_runner_workdir/transfer_learning \ + --input_data_directory=/mnt/data/clinicadl_data_ci/data_ci \ + test_transfer_learning.py + - name: Cleaning + run: | + rm -rf $HOME/actions_runner_workdir/transfer_learning/* From c9d9252ae4436a7a17d8812fdea97f2b01e0c0cb Mon Sep 17 00:00:00 2001 From: Gensollen Date: Fri, 24 May 2024 09:43:01 +0200 Subject: [PATCH 12/29] [CI] Remove GPU pipeline from Jenkinsfile (#594) --- .jenkins/Jenkinsfile | 207 ------------------------------------------- 1 file changed, 207 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index f7bd3dafb..033182681 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -252,214 +252,7 @@ pipeline { } } } - stage('GPU') { - agent { - label 'gpu' - } - environment { - CONDA_HOME = "$HOME/miniconda3" - CONDA_ENV = "$WORKSPACE/env" - PATH = "$HOME/.local/bin:$PATH" - TMP_DIR = "$HOME/tmp" - INPUT_DATA_DIR = '/mnt/data/clinicadl_data_ci/data_ci' - } - stages { - stage('Build Env') { - steps { - echo 'Installing clinicadl sources in Linux...' - echo "My branch name is ${BRANCH_NAME}" - sh "echo 'My branch name is ${BRANCH_NAME}'" - sh 'printenv' - sh "echo 'Agent name: ${NODE_NAME}'" - sh '''#!/usr/bin/env bash - source "${CONDA_HOME}/etc/profile.d/conda.sh" - make env.conda - conda activate "${CONDA_ENV}" - conda info - echo "Install clinicadl using poetry..." - cd $WORKSPACE - make env - # Show clinicadl help message - echo "Display clinicadl help message" - clinicadl --help - conda deactivate - ''' - } - } - stage('Train tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing train task...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh '''#!/usr/bin/env bash - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - clinicadl --help - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_train_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - -k "test_train" - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_train_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - stage('Transfer learning tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing transfer learning...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh '''#!/usr/bin/env bash - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - clinicadl --help - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_transfer_learning_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_transfer_learning.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_transfer_learning_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - stage('Resume tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing resume...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh '''#!/usr/bin/env bash - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - clinicadl --help - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_resume_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_resume.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_resume_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - stage('Interpretation tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing interpret task...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh '''#!/usr/bin/env bash - set +x - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - clinicadl --help - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_interpret_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_interpret.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_interpret_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - stage('Random search tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing random search...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh '''#!/usr/bin/env bash - set +x - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - clinicadl --help - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_random_search_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_random_search.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_random_search_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - } - post { - // Clean after build - cleanup { - cleanWs(deleteDirs: true, - notFailBuild: true, - patterns: [[pattern: 'env', type: 'INCLUDE']]) - } - } - } } } } -// post { -// failure { -// mail to: 'clinicadl-ci@inria.fr', -// subject: "Failed Pipeline: ${currentBuild.fullDisplayName}", -// body: "Something is wrong with ${env.BUILD_URL}" -// mattermostSend( -// color: "#FF0000", -// message: "ClinicaDL Build FAILED: ${env.JOB_NAME} #${env.BUILD_NUMBER} (<${env.BUILD_URL}|Link to build>)" -// ) -// } -// } } From 753f04e49e266ec3767cd91bcefda18370718fec Mon Sep 17 00:00:00 2001 From: Gensollen Date: Fri, 24 May 2024 12:06:06 +0200 Subject: [PATCH 13/29] [CI] Port remaining non GPU tests to GitHub Actions (#581) * add cleaning step to test_tsvtools pipeline * add test_generate pipeline * add test_predict pipeline * add test_prepare_data pipeline * add test_quality_checks pipeline * add refactoring target branch, cpu tag, and draft PR filter * trigger CI --- .github/workflows/test_generate.yml | 53 +++++++++++++++++++++++ .github/workflows/test_predict.yml | 53 +++++++++++++++++++++++ .github/workflows/test_prepare_data.yml | 53 +++++++++++++++++++++++ .github/workflows/test_quality_checks.yml | 53 +++++++++++++++++++++++ .github/workflows/test_tsvtools.yml | 5 ++- 5 files changed, 216 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/test_generate.yml create mode 100644 .github/workflows/test_predict.yml create mode 100644 .github/workflows/test_prepare_data.yml create mode 100644 .github/workflows/test_quality_checks.yml diff --git a/.github/workflows/test_generate.yml b/.github/workflows/test_generate.yml new file mode 100644 index 000000000..51ac863b2 --- /dev/null +++ b/.github/workflows/test_generate.yml @@ -0,0 +1,53 @@ +name: Generate Tests + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-generate: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - cpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run tests for generate task + run: | + make env.conda + source /builds/miniconda3/etc/profile.d/conda.sh + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_generate_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/tmp/generate \ + --input_data_directory=/mnt/data/data_ci \ + test_generate.py + - name: Cleaning + run: | + rm -rf $HOME/tmp/generate diff --git a/.github/workflows/test_predict.yml b/.github/workflows/test_predict.yml new file mode 100644 index 000000000..8ec5976e4 --- /dev/null +++ b/.github/workflows/test_predict.yml @@ -0,0 +1,53 @@ +name: Predict Tests + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-predict: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - cpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run tests for predict task + run: | + make env.conda + source /builds/miniconda3/etc/profile.d/conda.sh + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_predict_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/tmp/predict \ + --input_data_directory=/mnt/data/data_ci \ + test_predict.py + - name: Cleaning + run: | + rm -rf $HOME/tmp/predict/* diff --git a/.github/workflows/test_prepare_data.yml b/.github/workflows/test_prepare_data.yml new file mode 100644 index 000000000..8dccd217f --- /dev/null +++ b/.github/workflows/test_prepare_data.yml @@ -0,0 +1,53 @@ +name: Prepare data Tests + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-prepare-data: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - cpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run tests for prepare data task + run: | + make env.conda + source /builds/miniconda3/etc/profile.d/conda.sh + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_prepare_data_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/tmp/prepare_data \ + --input_data_directory=/mnt/data/data_ci \ + test_prepare_data.py + - name: Cleaning + run: | + rm -rf $HOME/tmp/prepare_data/* diff --git a/.github/workflows/test_quality_checks.yml b/.github/workflows/test_quality_checks.yml new file mode 100644 index 000000000..1cf0414e2 --- /dev/null +++ b/.github/workflows/test_quality_checks.yml @@ -0,0 +1,53 @@ +name: Quality Check Tests + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-quality-check: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - cpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run tests for Quality Check + run: | + make env.conda + source /builds/miniconda3/etc/profile.d/conda.sh + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_quality_check_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/tmp/quality_checks \ + --input_data_directory=/mnt/data/data_ci \ + test_qc.py + - name: Cleaning + run: | + rm -rf $HOME/tmp/quality_checks/* diff --git a/.github/workflows/test_tsvtools.yml b/.github/workflows/test_tsvtools.yml index 5a8c7896a..811c6d4f4 100644 --- a/.github/workflows/test_tsvtools.yml +++ b/.github/workflows/test_tsvtools.yml @@ -45,6 +45,9 @@ jobs: --junitxml=./test-reports/test_tsvtools_report.xml \ --disable-warnings \ --verbose \ - --basetemp=$HOME/tmp \ + --basetemp=$HOME/tmp/tsv_tools \ --input_data_directory=/mnt/data/data_ci \ test_tsvtools.py + - name: Cleaning + run: | + rm -rf $HOME/tmp/tsv_tools/* From c424d77f2273966d89571f5c9a0da08fffc5dff4 Mon Sep 17 00:00:00 2001 From: Gensollen Date: Fri, 24 May 2024 13:07:56 +0200 Subject: [PATCH 14/29] [CI] Remove jenkins related things (#595) --- .jenkins/Jenkinsfile | 258 ---------------------------- .jenkins/scripts/find_env.sh | 39 ----- .jenkins/scripts/generate_wheels.sh | 31 ---- 3 files changed, 328 deletions(-) delete mode 100644 .jenkins/Jenkinsfile delete mode 100755 .jenkins/scripts/find_env.sh delete mode 100755 .jenkins/scripts/generate_wheels.sh diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile deleted file mode 100644 index 033182681..000000000 --- a/.jenkins/Jenkinsfile +++ /dev/null @@ -1,258 +0,0 @@ -#!/usr/bin/env groovy - -// Continuous Integration script for clinicadl -// Author: mauricio.diaz@inria.fr - -pipeline { - options { - timeout(time: 1, unit: 'HOURS') - disableConcurrentBuilds(abortPrevious: true) - } - agent none - stages { - stage('Functional tests') { - failFast false - parallel { - stage('No GPU') { - agent { - label 'cpu' - } - environment { - CONDA_HOME = "$HOME/miniconda" - CONDA_ENV = "$WORKSPACE/env" - PATH = "$HOME/.local/bin:$PATH" - TMP_DIR = "$HOME/tmp" - INPUT_DATA_DIR = '/mnt/data/clinicadl_data_ci/data_ci' - } - stages { - stage('Build Env') { - steps { - echo 'Installing clinicadl sources in Linux...' - echo "My branch name is ${BRANCH_NAME}" - sh "echo 'My branch name is ${BRANCH_NAME}'" - sh 'printenv' - sh "echo 'Agent name: ${NODE_NAME}'" - sh ''' - set +x - source "${CONDA_HOME}/etc/profile.d/conda.sh" - make env.conda - conda activate "${CONDA_ENV}" - conda info - echo "Install clinicadl using poetry..." - cd $WORKSPACE - make env - # Show clinicadl help message - echo "Display clinicadl help message" - clinicadl --help - conda deactivate - ''' - } - } - stage('CLI tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing pipeline instantiation...' - sh 'echo "Agent name: ${NODE_NAME}"' - sh ''' - set +x - echo $WORKSPACE - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - conda list - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_cli_report.xml \ - --verbose \ - --disable-warnings \ - test_cli.py - conda deactivate - ''' - } - } - } - stage('tsvtools tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing tsvtool tasks...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh ''' - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_tsvtool_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_tsvtools.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_tsvtool_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - stage('Quality check tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing quality check tasks...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh ''' - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_quality_check_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_qc.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_quality_check_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - stage('Generate tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing generate task...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh ''' - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_generate_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_generate.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_generate_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - stage('Prepare data tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing prepare_data task...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh ''' - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_prepare_data_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_prepare_data.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_prepare_data_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - stage('Predict tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing predict...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh ''' - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_predict_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_predict.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_predict_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - // stage('Meta-maps analysis') { - // environment { - // PATH = "$HOME/miniconda3/bin:$HOME/miniconda/bin:$PATH" - // } - // steps { - // echo 'Testing maps-analysis task...' - // sh 'echo "Agent name: ${NODE_NAME}"' - // sh '''#!/usr/bin/env bash - // set +x - // eval "$(conda shell.bash hook)" - // conda activate "${WORKSPACE}/env" - // cd $WORKSPACE/tests - // pytest \ - // --junitxml=./test-reports/test_meta-analysis_report.xml \ - // --verbose \ - // --disable-warnings \ - // test_meta_maps.py - // conda deactivate - // ''' - // } - // post { - // always { - // junit 'tests/test-reports/test_meta-analysis_report.xml' - // sh 'rm -rf $WORKSPACE/tests/data/dataset' - // } - // } - // } - } - post { - // Clean after build - cleanup { - cleanWs(deleteDirs: true, - notFailBuild: true, - patterns: [[pattern: 'env', type: 'INCLUDE']]) - } - } - } - } - } - } -} diff --git a/.jenkins/scripts/find_env.sh b/.jenkins/scripts/find_env.sh deleted file mode 100755 index a68fff821..000000000 --- a/.jenkins/scripts/find_env.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -# A shell script to launch clinica in CI machines - -# Name of the Conda environment according to the branch -CLINICA_ENV_BRANCH="clinicadl_test" - -set -e -set +x - -ENV_EXISTS=0 -# Verify that the conda environment corresponding to the branch exists, otherwise -# create it. -ENVS=$(conda env list | awk '{print $1}' ) -echo $ENVS - -for ENV in $ENVS -do - if [[ "$ENV " == *"$CLINICA_ENV_BRANCH "* ]] - then - echo "Find Conda environment named $ENV, continue." - conda activate $CLINICA_ENV_BRANCH - cd $WORKSPACE/ - poetry install - conda deactivate - ENV_EXISTS=1 - break - fi; -done -if [ "$ENV_EXISTS" = 0 ]; then - echo "Conda env $CLINICA_ENV_BRANCH not found... Creating" - conda create -y -f environment.yml - echo "Conda env $CLINICA_ENV_BRANCH was created." - conda activate $CLINICA_ENV_BRANCH - cd $WORKSPACE/ - poetry install - echo "ClinicaDL has been installed in $CLINICA_ENV_BRANCH." - conda deactivate - cd $WORKSPACE -fi diff --git a/.jenkins/scripts/generate_wheels.sh b/.jenkins/scripts/generate_wheels.sh deleted file mode 100755 index 326d55074..000000000 --- a/.jenkins/scripts/generate_wheels.sh +++ /dev/null @@ -1,31 +0,0 @@ -#! /bin/sh - -#--------------------------------------# -# ClinicaDL package creations ( wheel) -#--------------------------------------# -# -# WARNING: Activate a conda environment with the right pip version. -# Use at your own risk. - - -CURRENT_DIR=$(pwd) -echo $CURRENT_DIR - -# ensure we are in the right dir -SCRIPT_DIR=`(dirname $0)` -cd "$SCRIPT_DIR" -echo "Entering ${SCRIPT_DIR}/../../" -cd "${SCRIPT_DIR}/../../" -ls - -# clean pycache stuff -rm -rf dist build clinicadl.egg-info/ -find -name "*__pycache__*" -exec rm {} \-rf \; -find -name "*.pyc*" -exec rm {} \-rf \; - -set -o errexit -set -e -# generate wheel -poetry build -# come back to directory of -cd $CURRENT_DIR From 4281c73a96a9b6188059e1285421202bc5f979e1 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Tue, 28 May 2024 10:42:45 +0200 Subject: [PATCH 15/29] add simulate-gpu option --- tests/conftest.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index 7251b5b8f..3a603f44f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -14,10 +14,19 @@ def pytest_addoption(parser): action="store", help="Directory for (only-read) inputs for tests", ) + parser.addoption( + "--simulate-gpu", + action="store_true", + help="""To simulate the presence of a gpu on a cpu-only device. Default is False. + To use carefully, only to run tests locally. Should not be used in final CI tests. + Concretely, the tests won't fail if gpu option if false in the output MAPS whereas + it should be true.""", + ) @pytest.fixture def cmdopt(request): config_param = {} config_param["input"] = request.config.getoption("--input_data_directory") + config_param["simulate gpu"] = request.config.getoption("--simulate-gpu") return config_param From 52d7561f8b6aab078d6ef2c33a6a9b97ad3e852f Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Thu, 30 May 2024 11:34:05 +0200 Subject: [PATCH 16/29] Add flags to run CI tests locally (#596) --- tests/conftest.py | 19 +++++++++++++++++++ tests/test_train_ae.py | 14 ++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index 7251b5b8f..e5a4a7302 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -14,10 +14,29 @@ def pytest_addoption(parser): action="store", help="Directory for (only-read) inputs for tests", ) + parser.addoption( + "--no-gpu", + action="store_true", + help="""To run tests on cpu. Default is False. + To use carefully, only to run tests locally. Should not be used in final CI tests. + Concretely, the tests won't fail if gpu option is false in the output MAPS whereas + it is true in the reference MAPS.""", + ) + parser.addoption( + "--adapt-base-dir", + action="store_true", + help="""To virtually change the base directory in the paths stored in the MAPS of the CI data. + Default is False. + To use carefully, only to run tests locally. Should not be used in final CI tests. + Concretely, the tests won't fail if only the base directories differ in the paths stored + in the output and reference MAPS.""", + ) @pytest.fixture def cmdopt(request): config_param = {} config_param["input"] = request.config.getoption("--input_data_directory") + config_param["no-gpu"] = request.config.getoption("--no-gpu") + config_param["adapt-base-dir"] = request.config.getoption("--adapt-base-dir") return config_param diff --git a/tests/test_train_ae.py b/tests/test_train_ae.py index ab9c057ff..311e145d0 100644 --- a/tests/test_train_ae.py +++ b/tests/test_train_ae.py @@ -88,6 +88,9 @@ def test_train_ae(cmdopt, tmp_path, test_name): else: raise NotImplementedError(f"Test {test_name} is not implemented.") + if cmdopt["no-gpu"]: + test_input.append("--no-gpu") + if tmp_out_dir.is_dir(): shutil.rmtree(tmp_out_dir) @@ -101,6 +104,17 @@ def test_train_ae(cmdopt, tmp_path, test_name): if test_name == "patch_multi_ae": json_data_out["multi_network"] = True + if cmdopt["no-gpu"]: + json_data_ref["gpu"] = False + if cmdopt["adapt-base-dir"]: + base_dir = base_dir.resolve() + ref_base_dir = Path(json_data_ref["caps_directory"]).parents[2] + json_data_ref["caps_directory"] = str( + base_dir / Path(json_data_ref["caps_directory"]).relative_to(ref_base_dir) + ) + json_data_ref["tsv_path"] = str( + base_dir / Path(json_data_ref["tsv_path"]).relative_to(ref_base_dir) + ) assert json_data_out == json_data_ref # ["mode"] == mode assert compare_folders( From 39d22fddbdf613ea1b793e6593d8b84d4440db0a Mon Sep 17 00:00:00 2001 From: Gensollen Date: Thu, 30 May 2024 13:43:39 +0200 Subject: [PATCH 17/29] [CI] Remove duplicated verbose flag in test pipelines (#598) --- .github/workflows/test_cli.yml | 2 +- .github/workflows/test_generate.yml | 2 +- .github/workflows/test_interpret.yml | 2 +- .github/workflows/test_predict.yml | 2 +- .github/workflows/test_prepare_data.yml | 2 +- .github/workflows/test_quality_checks.yml | 2 +- .github/workflows/test_random_search.yml | 2 +- .github/workflows/test_resume.yml | 2 +- .github/workflows/test_train.yml | 2 +- .github/workflows/test_transfer_learning.yml | 2 +- .github/workflows/test_tsvtools.yml | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/test_cli.yml b/.github/workflows/test_cli.yml index 750f1cd00..802511c9d 100644 --- a/.github/workflows/test_cli.yml +++ b/.github/workflows/test_cli.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_cli_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_generate.yml b/.github/workflows/test_generate.yml index 51ac863b2..e0149760f 100644 --- a/.github/workflows/test_generate.yml +++ b/.github/workflows/test_generate.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_generate_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_interpret.yml b/.github/workflows/test_interpret.yml index 0163bf583..7cfd9d56b 100644 --- a/.github/workflows/test_interpret.yml +++ b/.github/workflows/test_interpret.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_interpret_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_predict.yml b/.github/workflows/test_predict.yml index 8ec5976e4..f12740dac 100644 --- a/.github/workflows/test_predict.yml +++ b/.github/workflows/test_predict.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_predict_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_prepare_data.yml b/.github/workflows/test_prepare_data.yml index 8dccd217f..8209b9fa2 100644 --- a/.github/workflows/test_prepare_data.yml +++ b/.github/workflows/test_prepare_data.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_prepare_data_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_quality_checks.yml b/.github/workflows/test_quality_checks.yml index 1cf0414e2..701460266 100644 --- a/.github/workflows/test_quality_checks.yml +++ b/.github/workflows/test_quality_checks.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_quality_check_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_random_search.yml b/.github/workflows/test_random_search.yml index 529f1fda1..314afc353 100644 --- a/.github/workflows/test_random_search.yml +++ b/.github/workflows/test_random_search.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_random_search_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_resume.yml b/.github/workflows/test_resume.yml index b789a21f6..78a229913 100644 --- a/.github/workflows/test_resume.yml +++ b/.github/workflows/test_resume.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_resume_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_train.yml b/.github/workflows/test_train.yml index a65a92a56..599725225 100644 --- a/.github/workflows/test_train.yml +++ b/.github/workflows/test_train.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_train_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_transfer_learning.yml b/.github/workflows/test_transfer_learning.yml index 61238d4e1..4664a97a3 100644 --- a/.github/workflows/test_transfer_learning.yml +++ b/.github/workflows/test_transfer_learning.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_transfer_learning_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_tsvtools.yml b/.github/workflows/test_tsvtools.yml index 811c6d4f4..9f3bfeb02 100644 --- a/.github/workflows/test_tsvtools.yml +++ b/.github/workflows/test_tsvtools.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_tsvtools_report.xml \ --disable-warnings \ --verbose \ From 571662c4598101fd969158b1d337ff5046974a1d Mon Sep 17 00:00:00 2001 From: Gensollen Date: Thu, 30 May 2024 15:54:16 +0200 Subject: [PATCH 18/29] [DOC] Update the Python version used for creating the conda environment in README (#600) * update python version used for creating conda env in README * investigate * fix --- .github/workflows/test_cli.yml | 2 +- .github/workflows/test_generate.yml | 2 +- .github/workflows/test_interpret.yml | 2 +- .github/workflows/test_predict.yml | 2 +- .github/workflows/test_prepare_data.yml | 2 +- .github/workflows/test_quality_checks.yml | 2 +- .github/workflows/test_random_search.yml | 2 +- .github/workflows/test_resume.yml | 2 +- .github/workflows/test_train.yml | 2 +- .github/workflows/test_transfer_learning.yml | 2 +- .github/workflows/test_tsvtools.yml | 2 +- README.md | 19 +++++++++---------- 12 files changed, 20 insertions(+), 21 deletions(-) diff --git a/.github/workflows/test_cli.yml b/.github/workflows/test_cli.yml index 802511c9d..4efd2b024 100644 --- a/.github/workflows/test_cli.yml +++ b/.github/workflows/test_cli.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run CLI tests run: | - make env.conda source /builds/miniconda3/etc/profile.d/conda.sh + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_generate.yml b/.github/workflows/test_generate.yml index e0149760f..8fc54c025 100644 --- a/.github/workflows/test_generate.yml +++ b/.github/workflows/test_generate.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run tests for generate task run: | - make env.conda source /builds/miniconda3/etc/profile.d/conda.sh + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_interpret.yml b/.github/workflows/test_interpret.yml index 7cfd9d56b..13db354a9 100644 --- a/.github/workflows/test_interpret.yml +++ b/.github/workflows/test_interpret.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run tests for Interpret task on GPU run: | - make env.conda source "${HOME}/miniconda3/etc/profile.d/conda.sh" + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_predict.yml b/.github/workflows/test_predict.yml index f12740dac..e6d790b2a 100644 --- a/.github/workflows/test_predict.yml +++ b/.github/workflows/test_predict.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run tests for predict task run: | - make env.conda source /builds/miniconda3/etc/profile.d/conda.sh + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_prepare_data.yml b/.github/workflows/test_prepare_data.yml index 8209b9fa2..753634f76 100644 --- a/.github/workflows/test_prepare_data.yml +++ b/.github/workflows/test_prepare_data.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run tests for prepare data task run: | - make env.conda source /builds/miniconda3/etc/profile.d/conda.sh + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_quality_checks.yml b/.github/workflows/test_quality_checks.yml index 701460266..25d1bc752 100644 --- a/.github/workflows/test_quality_checks.yml +++ b/.github/workflows/test_quality_checks.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run tests for Quality Check run: | - make env.conda source /builds/miniconda3/etc/profile.d/conda.sh + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_random_search.yml b/.github/workflows/test_random_search.yml index 314afc353..78ddc2df0 100644 --- a/.github/workflows/test_random_search.yml +++ b/.github/workflows/test_random_search.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run Random Search tests on GPU run: | - make env.conda source "${HOME}/miniconda3/etc/profile.d/conda.sh" + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_resume.yml b/.github/workflows/test_resume.yml index 78a229913..6d145339b 100644 --- a/.github/workflows/test_resume.yml +++ b/.github/workflows/test_resume.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run resume tests on GPU run: | - make env.conda source "${HOME}/miniconda3/etc/profile.d/conda.sh" + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_train.yml b/.github/workflows/test_train.yml index 599725225..b3852bb09 100644 --- a/.github/workflows/test_train.yml +++ b/.github/workflows/test_train.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run tests for Train on GPU run: | - make env.conda source "${HOME}/miniconda3/etc/profile.d/conda.sh" + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_transfer_learning.yml b/.github/workflows/test_transfer_learning.yml index 4664a97a3..9a3a583a3 100644 --- a/.github/workflows/test_transfer_learning.yml +++ b/.github/workflows/test_transfer_learning.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run tests for Transfer Learning on GPU run: | - make env.conda source "${HOME}/miniconda3/etc/profile.d/conda.sh" + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_tsvtools.yml b/.github/workflows/test_tsvtools.yml index 9f3bfeb02..9e388b8e7 100644 --- a/.github/workflows/test_tsvtools.yml +++ b/.github/workflows/test_tsvtools.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run tests for TSV tools run: | - make env.conda source /builds/miniconda3/etc/profile.d/conda.sh + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/README.md b/README.md index 05b5f3a09..dae486a72 100755 --- a/README.md +++ b/README.md @@ -33,34 +33,33 @@ ## About the project This repository hosts ClinicaDL, the deep learning extension of [Clinica](https://github.com/aramis-lab/clinica), -a python library to process neuroimaging data in [BIDS](https://bids.neuroimaging.io/index.html) format. +a Python library to process neuroimaging data in [BIDS](https://bids.neuroimaging.io/index.html) format. > **Disclaimer:** this software is **under development**. Some features can change between different releases and/or commits. -To access the full documentation of the project, follow the link -[https://clinicadl.readthedocs.io/](https://clinicadl.readthedocs.io/). +To access the full documentation of the project, follow the link [https://clinicadl.readthedocs.io/](https://clinicadl.readthedocs.io/). If you find a problem when using it or if you want to provide us feedback, please [open an issue](https://github.com/aramis-lab/ad-dl/issues) or write on the [forum](https://groups.google.com/forum/#!forum/clinica-user). ## Getting started + ClinicaDL currently supports macOS and Linux. We recommend to use `conda` or `virtualenv` for the installation of ClinicaDL -as it guarantees the correct management of libraries depending on common -packages: +as it guarantees the correct management of libraries depending on common packages: ```{.sourceCode .bash} -conda create --name ClinicaDL python=3.8 +conda create --name ClinicaDL python=3.10 conda activate ClinicaDL pip install clinicadl ``` -## Tutorial -Visit our [hands-on tutorial web -site](https://aramislab.paris.inria.fr/clinicadl/tuto) to start -using **ClinicaDL** directly in a Google Colab instance! +## Tutorial + +Visit our [hands-on tutorial web site](https://aramislab.paris.inria.fr/clinicadl/tuto) +to start using **ClinicaDL** directly in a Google Colab instance! ## Related Repositories From d54d59cfd1bc954ff026cf2cbba9a0ba9647c4ea Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Fri, 31 May 2024 13:38:12 +0200 Subject: [PATCH 19/29] Flag for local tests (#608) * add no-gpu and adapt-base-dir flag --- tests/test_interpret.py | 3 ++ tests/test_random_search.py | 12 ++++--- tests/test_resume.py | 26 ++++++++++++-- tests/test_train_ae.py | 25 +++++-------- tests/test_train_cnn.py | 13 +++++-- tests/test_train_from_json.py | 28 ++++++++++----- tests/test_transfer_learning.py | 42 +++++++++++++++------- tests/testing_tools.py | 62 ++++++++++++++++++++++++++++++++- 8 files changed, 164 insertions(+), 47 deletions(-) diff --git a/tests/test_interpret.py b/tests/test_interpret.py index 8030e4c98..d84147e97 100644 --- a/tests/test_interpret.py +++ b/tests/test_interpret.py @@ -61,6 +61,9 @@ def test_interpret(cmdopt, tmp_path, test_name): else: raise NotImplementedError(f"Test {test_name} is not implemented.") + if cmdopt["no-gpu"]: + cnn_input.append("--no-gpu") + run_interpret(cnn_input, tmp_out_dir, ref_dir) diff --git a/tests/test_random_search.py b/tests/test_random_search.py index e1c530513..5b68787e8 100644 --- a/tests/test_random_search.py +++ b/tests/test_random_search.py @@ -1,6 +1,5 @@ # coding: utf8 -import json import os import shutil from os.path import join @@ -8,7 +7,7 @@ import pytest -from tests.testing_tools import compare_folders +from .testing_tools import change_gpu_in_toml, compare_folders # random searxh for ROI with CNN @@ -34,10 +33,12 @@ def test_random_search(cmdopt, tmp_path, test_name): else: raise NotImplementedError(f"Test {test_name} is not implemented.") - run_test_random_search(toml_path, generate_input, tmp_out_dir, ref_dir) + run_test_random_search( + toml_path, generate_input, tmp_out_dir, ref_dir, cmdopt["no-gpu"] + ) -def run_test_random_search(toml_path, generate_input, tmp_out_dir, ref_dir): +def run_test_random_search(toml_path, generate_input, tmp_out_dir, ref_dir, no_gpu): if os.path.exists(tmp_out_dir): shutil.rmtree(tmp_out_dir) @@ -45,6 +46,9 @@ def run_test_random_search(toml_path, generate_input, tmp_out_dir, ref_dir): os.makedirs(tmp_out_dir, exist_ok=True) shutil.copy(toml_path, tmp_out_dir) + if no_gpu: + change_gpu_in_toml(tmp_out_dir / "random_search.toml") + flag_error_generate = not os.system("clinicadl " + " ".join(generate_input)) performances_flag = os.path.exists( tmp_out_dir / "job-1" / "split-0" / "best-loss" / "train" diff --git a/tests/test_resume.py b/tests/test_resume.py index 3cf883c32..cdf6031ee 100644 --- a/tests/test_resume.py +++ b/tests/test_resume.py @@ -1,15 +1,14 @@ # coding: utf8 import json -import os import shutil from os import system -from os.path import join from pathlib import Path import pytest from clinicadl import MapsManager -from tests.testing_tools import compare_folders + +from .testing_tools import modify_maps @pytest.fixture( @@ -33,6 +32,18 @@ def test_resume(cmdopt, tmp_path, test_name): shutil.copytree(input_dir / test_name, tmp_out_dir / test_name) maps_stopped = tmp_out_dir / test_name + if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: # modify the input MAPS + with open(maps_stopped / "maps.json", "r") as f: + config = json.load(f) + config = modify_maps( + maps=config, + base_dir=base_dir, + no_gpu=cmdopt["no-gpu"], + adapt_base_dir=cmdopt["adapt-base-dir"], + ) + with open(maps_stopped / "maps.json", "w") as f: + json.dump(config, f) + flag_error = not system(f"clinicadl -vv train resume {maps_stopped}") assert flag_error @@ -48,4 +59,13 @@ def test_resume(cmdopt, tmp_path, test_name): json_data_out = json.load(out) with open(ref_dir / "maps_image_cnn" / "maps.json", "r") as ref: json_data_ref = json.load(ref) + + if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: + json_data_ref = modify_maps( + maps=json_data_ref, + base_dir=base_dir, + no_gpu=cmdopt["no-gpu"], + adapt_base_dir=cmdopt["adapt-base-dir"], + ) + assert json_data_ref == json_data_out diff --git a/tests/test_train_ae.py b/tests/test_train_ae.py index 311e145d0..b20749258 100644 --- a/tests/test_train_ae.py +++ b/tests/test_train_ae.py @@ -3,12 +3,11 @@ import json import os import shutil -from os.path import join from pathlib import Path import pytest -from tests.testing_tools import clean_folder, compare_folders +from .testing_tools import clean_folder, compare_folders, modify_maps @pytest.fixture( @@ -27,8 +26,8 @@ def test_train_ae(cmdopt, tmp_path, test_name): base_dir = Path(cmdopt["input"]) input_dir = base_dir / "train" / "in" ref_dir = base_dir / "train" / "ref" - tmp_out_dir = base_dir / "train" / "out" - # tmp_out_dir.mkdir(parents=True) + tmp_out_dir = tmp_path / "train" / "out" + tmp_out_dir.mkdir(parents=True) clean_folder(tmp_out_dir, recreate=True) @@ -102,18 +101,12 @@ def test_train_ae(cmdopt, tmp_path, test_name): with open(ref_dir / ("maps_" + test_name) / "maps.json", "r") as ref: json_data_ref = json.load(ref) - if test_name == "patch_multi_ae": - json_data_out["multi_network"] = True - if cmdopt["no-gpu"]: - json_data_ref["gpu"] = False - if cmdopt["adapt-base-dir"]: - base_dir = base_dir.resolve() - ref_base_dir = Path(json_data_ref["caps_directory"]).parents[2] - json_data_ref["caps_directory"] = str( - base_dir / Path(json_data_ref["caps_directory"]).relative_to(ref_base_dir) - ) - json_data_ref["tsv_path"] = str( - base_dir / Path(json_data_ref["tsv_path"]).relative_to(ref_base_dir) + if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: + json_data_ref = modify_maps( + maps=json_data_ref, + base_dir=base_dir, + no_gpu=cmdopt["no-gpu"], + adapt_base_dir=cmdopt["adapt-base-dir"], ) assert json_data_out == json_data_ref # ["mode"] == mode diff --git a/tests/test_train_cnn.py b/tests/test_train_cnn.py index da5b3a3f1..761fedbee 100644 --- a/tests/test_train_cnn.py +++ b/tests/test_train_cnn.py @@ -3,12 +3,11 @@ import json import os import shutil -from os.path import join from pathlib import Path import pytest -from tests.testing_tools import compare_folders +from .testing_tools import compare_folders, modify_maps @pytest.fixture( @@ -101,6 +100,9 @@ def test_train_cnn(cmdopt, tmp_path, test_name): else: raise NotImplementedError(f"Test {test_name} is not implemented.") + if cmdopt["no-gpu"]: + test_input.append("--no-gpu") + if tmp_out_dir.is_dir(): shutil.rmtree(tmp_out_dir) @@ -117,6 +119,13 @@ def test_train_cnn(cmdopt, tmp_path, test_name): with open(ref_dir / ("maps_" + test_name) / "maps.json", "r") as ref: json_data_ref = json.load(ref) + if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: + json_data_ref = modify_maps( + maps=json_data_ref, + base_dir=base_dir, + no_gpu=cmdopt["no-gpu"], + adapt_base_dir=cmdopt["adapt-base-dir"], + ) assert json_data_out == json_data_ref # ["mode"] == mode assert compare_folders( diff --git a/tests/test_train_from_json.py b/tests/test_train_from_json.py index f48791d31..363af9aff 100644 --- a/tests/test_train_from_json.py +++ b/tests/test_train_from_json.py @@ -1,17 +1,14 @@ -import os -import pathlib +import json import shutil -from os import path, system -from os.path import join +from os import system from pathlib import Path -from .testing_tools import compare_folders_with_hashes, create_hashes_dict, models_equal +from .testing_tools import compare_folders_with_hashes, create_hashes_dict, modify_maps def test_json_compatibility(cmdopt, tmp_path): base_dir = Path(cmdopt["input"]) input_dir = base_dir / "train_from_json" / "in" - ref_dir = base_dir / "train_from_json" / "ref" tmp_out_dir = tmp_path / "train_from_json" / "out" tmp_out_dir.mkdir(parents=True) @@ -22,6 +19,19 @@ def test_json_compatibility(cmdopt, tmp_path): if reproduced_maps_dir.exists(): shutil.rmtree(reproduced_maps_dir) + if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: # virtually modify the input MAPS + with open(config_json, "r") as f: + config = json.load(f) + config_json = tmp_out_dir / "modified_maps.json" + config = modify_maps( + maps=config, + base_dir=base_dir, + no_gpu=cmdopt["no-gpu"], + adapt_base_dir=cmdopt["adapt-base-dir"], + ) + with open(config_json, "w+") as f: + json.dump(config, f) + flag_error = not system( f"clinicadl train from_json {str(config_json)} {str(reproduced_maps_dir)} -s {split}" ) @@ -31,7 +41,6 @@ def test_json_compatibility(cmdopt, tmp_path): def test_determinism(cmdopt, tmp_path): base_dir = Path(cmdopt["input"]) input_dir = base_dir / "train_from_json" / "in" - ref_dir = base_dir / "train_from_json" / "ref" tmp_out_dir = tmp_path / "train_from_json" / "out" tmp_out_dir.mkdir(parents=True) @@ -50,8 +59,11 @@ def test_determinism(cmdopt, tmp_path): str(maps_dir), "-c", str(input_dir / "reproducibility_config.toml"), - "--no-gpu", ] + + if cmdopt["no-gpu"]: + test_input.append("--no-gpu") + # Run first experiment flag_error = not system("clinicadl " + " ".join(test_input)) assert flag_error diff --git a/tests/test_transfer_learning.py b/tests/test_transfer_learning.py index 95713d7ad..b9c3f999b 100644 --- a/tests/test_transfer_learning.py +++ b/tests/test_transfer_learning.py @@ -1,12 +1,11 @@ import json import os import shutil -from os.path import join from pathlib import Path import pytest -from tests.testing_tools import compare_folders +from .testing_tools import compare_folders, modify_maps # Everything is tested on roi except for cnn --> multicnn (patch) as multicnn is not implemented for roi. @@ -41,7 +40,7 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): str(caps_roi_path), extract_roi_str, str(labels_path), - str(tmp_out_dir), + str(tmp_out_dir / "maps_roi_ae"), "-c", str(config_path), ] @@ -55,7 +54,7 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): "-c", str(config_path), "--transfer_path", - str(tmp_out_dir), + str(tmp_out_dir / "maps_roi_ae"), ] name = "aeTOae" elif test_name == "transfer_ae_cnn": @@ -65,7 +64,7 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): str(caps_roi_path), extract_roi_str, str(labels_path), - str(tmp_out_dir), + str(tmp_out_dir / "maps_roi_ae"), "-c", str(config_path), ] @@ -79,7 +78,7 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): "-c", str(config_path), "--transfer_path", - str(tmp_out_dir), + str(tmp_out_dir / "maps_roi_ae"), ] name = "aeTOcnn" elif test_name == "transfer_cnn_cnn": @@ -89,7 +88,7 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): str(caps_roi_path), extract_roi_str, str(labels_path), - str(tmp_out_dir), + str(tmp_out_dir / "maps_roi_cnn"), "-c", str(config_path), ] @@ -103,7 +102,7 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): "-c", str(config_path), "--transfer_path", - str(tmp_out_dir), + str(tmp_out_dir / "maps_roi_cnn"), ] name = "cnnTOcnn" elif test_name == "transfer_cnn_multicnn": @@ -113,7 +112,7 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): str(caps_roi_path), extract_roi_str, str(labels_path), - str(tmp_out_dir), + str(tmp_out_dir / "maps_roi_cnn"), "-c", str(config_path), ] @@ -127,12 +126,17 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): "-c", str(config_path), "--transfer_path", - str(tmp_out_dir), + str(tmp_out_dir / "maps_roi_cnn"), + "--multi_network", ] - name = "cnnTOcnn" + name = "cnnTOmulticnn" else: raise NotImplementedError(f"Test {test_name} is not implemented.") + if cmdopt["no-gpu"]: + source_task.append("--no-gpu") + target_task.append("--no-gpu") + if tmp_out_dir.exists(): shutil.rmtree(tmp_out_dir) if tmp_target_dir.exists(): @@ -148,9 +152,21 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): with open(ref_dir / ("maps_roi_" + name) / "maps.json", "r") as ref: json_data_ref = json.load(ref) - json_data_ref["transfer_path"] = json_data_out["transfer_path"] - json_data_ref["gpu"] = json_data_out["gpu"] + ref_source_dir = Path(json_data_ref["transfer_path"]).parent + json_data_ref["transfer_path"] = str( + tmp_out_dir / Path(json_data_ref["transfer_path"]).relative_to(ref_source_dir) + ) + if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: + json_data_ref = modify_maps( + maps=json_data_ref, + base_dir=base_dir, + no_gpu=cmdopt["no-gpu"], + adapt_base_dir=cmdopt["adapt-base-dir"], + ) + # TODO: remove and update data json_data_ref["caps_directory"] = json_data_out["caps_directory"] + json_data_ref["gpu"] = json_data_out["gpu"] + ### assert json_data_out == json_data_ref # ["mode"] == mode assert compare_folders( diff --git a/tests/testing_tools.py b/tests/testing_tools.py index d4cb29c8a..ff7eb97b1 100644 --- a/tests/testing_tools.py +++ b/tests/testing_tools.py @@ -1,7 +1,7 @@ import pathlib from os import PathLike from pathlib import Path -from typing import Dict, List +from typing import Any, Dict, List def ignore_pattern(file_path: pathlib.Path, ignore_pattern_list: List[str]) -> bool: @@ -166,3 +166,63 @@ def clean_folder(path, recreate=True): rmtree(abs_path) if recreate: makedirs(abs_path) + + +def modify_maps( + maps: Dict[str, Any], + base_dir: Path, + no_gpu: bool = False, + adapt_base_dir: bool = False, +) -> Dict[str, Any]: + """ + Modifies a MAPS dictionary if the user passed --no-gpu or --adapt-base-dir flags. + + Parameters + ---------- + maps : Dict[str, Any] + The MAPS dictionary. + base_dir : Path + The base directory, where CI data are stored. + no_gpu : bool (optional, default=False) + Whether the user activated the --no-gpu flag. + adapt_base_dir : bool (optional, default=False) + Whether the user activated the --adapt-base-dir flag. + + Returns + ------- + Dict[str, Any] + The modified MAPS dictionary. + """ + if no_gpu: + maps["gpu"] = False + if adapt_base_dir: + base_dir = base_dir.resolve() + ref_base_dir = Path(maps["caps_directory"]).parents[2] + maps["caps_directory"] = str( + base_dir / Path(maps["caps_directory"]).relative_to(ref_base_dir) + ) + maps["tsv_path"] = str( + base_dir / Path(maps["tsv_path"]).relative_to(ref_base_dir) + ) + return maps + + +def change_gpu_in_toml(toml_path: Path) -> None: + """ + Changes GPU to false in a TOML config file. + + Parameters + ---------- + toml_path : Path + The TOML file. + """ + import toml + + config = toml.load(toml_path) + try: + config["Computational"]["gpu"] = False + except KeyError: + config["Computational"] = {"gpu": False} + f = open(toml_path, "w") + toml.dump(config, f) + f.close() From f641f30f62ea14ae87cdcc0095570fcff71fffc5 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Fri, 31 May 2024 16:56:17 +0200 Subject: [PATCH 20/29] add whole resume pipeline in trainer --- .../commandline/pipelines/train/resume/cli.py | 6 +- clinicadl/trainer/trainer.py | 97 +++++++++++++++++-- clinicadl/trainer/trainer_utils.py | 16 +++ clinicadl/utils/maps_manager/maps_manager.py | 40 +++++++- 4 files changed, 145 insertions(+), 14 deletions(-) diff --git a/clinicadl/commandline/pipelines/train/resume/cli.py b/clinicadl/commandline/pipelines/train/resume/cli.py index ee5cea61e..88c4f6bc0 100644 --- a/clinicadl/commandline/pipelines/train/resume/cli.py +++ b/clinicadl/commandline/pipelines/train/resume/cli.py @@ -4,6 +4,7 @@ from clinicadl.commandline.modules_options import ( cross_validation, ) +from clinicadl.trainer import Trainer @click.command(name="resume", no_args_is_help=True) @@ -14,6 +15,5 @@ def cli(input_maps_directory, split): INPUT_MAPS_DIRECTORY is the path to the MAPS folder where training job has started. """ - from clinicadl.train.resume import automatic_resume - - automatic_resume(input_maps_directory, user_split_list=split) + trainer = Trainer.from_maps(input_maps_directory) + trainer.resume(split) diff --git a/clinicadl/trainer/trainer.py b/clinicadl/trainer/trainer.py index 05768b2b0..64f7d532f 100644 --- a/clinicadl/trainer/trainer.py +++ b/clinicadl/trainer/trainer.py @@ -14,24 +14,26 @@ from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler +from clinicadl.transforms.transforms import get_transforms from clinicadl.utils.caps_dataset.data import return_dataset from clinicadl.utils.early_stopping import EarlyStopping from clinicadl.utils.exceptions import MAPSError from clinicadl.utils.maps_manager.ddp import DDP, cluster from clinicadl.utils.maps_manager.logwriter import LogWriter +from clinicadl.utils.maps_manager.maps_manager_utils import read_json from clinicadl.utils.metric_module import RetainBest from clinicadl.utils.seed import pl_worker_init_function, seed_everything -from clinicadl.transforms.transforms import get_transforms from clinicadl.utils.maps_manager import MapsManager from clinicadl.utils.seed import get_seed - from clinicadl.utils.enum import Task -from .trainer_utils import create_parameters_dict +from .trainer_utils import create_parameters_dict, patch_to_read_json +from clinicadl.train.tasks_utils import create_training_config if TYPE_CHECKING: from clinicadl.callbacks.callbacks import Callback from clinicadl.config.config.pipelines.train import TrainConfig + logger = getLogger("clinicadl.trainer") @@ -41,7 +43,6 @@ class Trainer: def __init__( self, config: TrainConfig, - maps_manager: Optional[MapsManager] = None, ) -> None: """ Parameters @@ -49,10 +50,7 @@ def __init__( config : BaseTaskConfig """ self.config = config - if maps_manager: - self.maps_manager = maps_manager - else: - self.maps_manager = self._init_maps_manager(config) + self.maps_manager = self._init_maps_manager(config) self._check_args() def _init_maps_manager(self, config) -> MapsManager: @@ -64,6 +62,87 @@ def _init_maps_manager(self, config) -> MapsManager: maps_path, parameters, verbose=None ) # TODO : precise which parameters in config are useful + @classmethod + def from_json(cls, config_file: Path, maps_path: Path) -> Trainer: + """ + Creates a Trainer from a json configuration file. + + Parameters + ---------- + config_file : Path + The parameters, stored in a json files. + maps_path : Path + The folder where the results of a futur training will be stored. + + Returns + ------- + Trainer + The Trainer object, instantiated with parameters found in config_file. + + Raises + ------ + FileNotFoundError + If config_file doesn't exist. + """ + if not (config_file).is_file(): + raise FileNotFoundError(f"No file found at {config_file}.") + config_dict = patch_to_read_json(read_json(config_file)) # TODO : remove patch + config_object = create_training_config(config_dict["network_task"])( + output_maps_directory=maps_path, **config_dict + ) + return cls(config_object) + + @classmethod + def from_maps(cls, maps_path: Path) -> Trainer: + """ + Creates a Trainer from a json configuration file. + + Parameters + ---------- + maps_path : Path + The path of the MAPS folder. + + Returns + ------- + Trainer + The Trainer object, instantiated with parameters found in maps_path. + + Raises + ------ + MAPSError + If maps_path folder doesn't exist or there is no maps.json file in it. + """ + if not (maps_path / "maps.json").is_file(): + raise MAPSError( + f"MAPS was not found at {maps_path}." + f"To initiate a new MAPS please give a train_dict." + ) + return cls.from_json(maps_path / "maps.json", maps_path) + + def resume(self, splits: List[int]) -> None: + """ + Resume a prematurely stopped training. + + Parameters + ---------- + splits : List[int] + The splits that must be resumed. + """ + stopped_splits = set(self.maps_manager.find_stopped_splits()) + finished_splits = set(self.maps_manager.find_finished_splits()) + absent_splits = set(splits) - stopped_splits - finished_splits + + logger.info( + f"Finished splits {finished_splits}\n" + f"Stopped splits {stopped_splits}\n" + f"Absent splits {absent_splits}" + ) + + if len(stopped_splits) > 0: + self._resume(list(stopped_splits)) + if len(absent_splits) > 0: + self.train(list(absent_splits), overwrite=True) + def _check_args(self): self.config.reproducibility.seed = get_seed(self.config.reproducibility.seed) # if len(self.config.data.label_code) == 0: @@ -120,7 +199,7 @@ def train( else: self._train_single(split_list, resume=False) - def resume( + def _resume( self, split_list: Optional[List[int]] = None, ) -> None: diff --git a/clinicadl/trainer/trainer_utils.py b/clinicadl/trainer/trainer_utils.py index eb6451124..2f327014a 100644 --- a/clinicadl/trainer/trainer_utils.py +++ b/clinicadl/trainer/trainer_utils.py @@ -61,3 +61,19 @@ def create_parameters_dict(config): parameters["normalization"] = "batch" return parameters, maps_path + + +def patch_to_read_json(config_dict): + config_dict["tsv_directory"] = config_dict["tsv_path"] + if ("track_exp" in config_dict) and (config_dict["track_exp"] == ""): + config_dict["track_exp"] = None + config_dict["preprocessing_json"] = config_dict["preprocessing_dict"][ + "extract_json" + ] + if "label_code" not in config_dict or config_dict["label_code"] is None: + config_dict["label_code"] = {} + if "preprocessing_json" not in config_dict: + config_dict["preprocessing_json"] = config_dict["preprocessing_dict"][ + "extract_json" + ] + return config_dict diff --git a/clinicadl/utils/maps_manager/maps_manager.py b/clinicadl/utils/maps_manager/maps_manager.py index e2dfbaa3b..3efd55407 100644 --- a/clinicadl/utils/maps_manager/maps_manager.py +++ b/clinicadl/utils/maps_manager/maps_manager.py @@ -332,14 +332,50 @@ def _compute_output_tensors( torch.save(output, tensor_path / output_filename) logger.debug(f"File saved at {[input_filename, output_filename]}") - def _find_splits(self): - """Find which splits were trained in the MAPS.""" + def find_splits(self): + """Find which splits that were trained in the MAPS.""" return [ int(split.name.split("-")[1]) for split in list(self.maps_path.iterdir()) if split.name.startswith(f"{self.split_name}-") ] + def find_stopped_splits(self): + """Find which splits for which training was not completed.""" + existing_split_list = self.find_splits() + stopped_splits = [ + split + for split in existing_split_list + if (self.maps_path / f"{self.split_name}-{split}" / "tmp") + in list((self.maps_path / f"{self.split_name}-{split}").iterdir()) + ] + return stopped_splits + + def find_finished_splits(self): + """Find which splits for which training was completed.""" + finished_splits = list() + existing_split_list = self.find_splits() + stopped_splits = self.find_stopped_splits() + for split in existing_split_list: + if split not in stopped_splits: + performance_dir_list = [ + performance_dir + for performance_dir in list( + (self.maps_path / f"{self.split_name}-{split}").iterdir() + ) + if "best-" in performance_dir.name + ] + if len(performance_dir_list) > 0: + finished_splits.append(split) + return finished_splits + + def find_missing_splits(self): + missing_splits = [ + split + for split in split_iterator + if split not in finished_splits and split not in stopped_splits + ] + def _ensemble_prediction( self, data_group, From a6c336f64d70b6647f53482b599100b0e718cafb Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Tue, 4 Jun 2024 10:51:08 +0200 Subject: [PATCH 21/29] change from_json cli --- .../pipelines/train/from_json/cli.py | 22 ++++-------------- clinicadl/trainer/trainer.py | 23 +++++++++++-------- 2 files changed, 18 insertions(+), 27 deletions(-) diff --git a/clinicadl/commandline/pipelines/train/from_json/cli.py b/clinicadl/commandline/pipelines/train/from_json/cli.py index b37d2e253..ab613d330 100644 --- a/clinicadl/commandline/pipelines/train/from_json/cli.py +++ b/clinicadl/commandline/pipelines/train/from_json/cli.py @@ -1,14 +1,12 @@ from logging import getLogger -from pathlib import Path import click from clinicadl.commandline import arguments from clinicadl.commandline.modules_options import ( cross_validation, - reproducibility, ) -from clinicadl.train.tasks_utils import create_training_config +from clinicadl.trainer.trainer import Trainer @click.command(name="from_json", no_args_is_help=True) @@ -24,23 +22,11 @@ def cli(**kwargs): OUTPUT_MAPS_DIRECTORY is the path to the MAPS folder where outputs and results will be saved. """ - from clinicadl.trainer.trainer import Trainer - from clinicadl.utils.maps_manager.maps_manager_utils import read_json logger = getLogger("clinicadl") logger.info(f"Reading JSON file at path {kwargs['config_file']}...") - config_dict = read_json(kwargs["config_file"]) - # temporary - config_dict["tsv_directory"] = config_dict["tsv_path"] - if ("track_exp" in config_dict) and (config_dict["track_exp"] == ""): - config_dict["track_exp"] = None - config_dict["maps_dir"] = kwargs["output_maps_directory"] - config_dict["preprocessing_json"] = config_dict["preprocessing_dict"][ - "extract_json" - ] - ### - config = create_training_config(config_dict["network_task"])( - output_maps_directory=kwargs["output_maps_directory"], **config_dict + + trainer = Trainer.from_json( + config_file=kwargs["config_file"], maps_path=kwargs["output_maps_directory"] ) - trainer = Trainer(config) trainer.train(split_list=kwargs["split"], overwrite=True) diff --git a/clinicadl/trainer/trainer.py b/clinicadl/trainer/trainer.py index 64f7d532f..a09c55a34 100644 --- a/clinicadl/trainer/trainer.py +++ b/clinicadl/trainer/trainer.py @@ -47,7 +47,7 @@ def __init__( """ Parameters ---------- - config : BaseTaskConfig + config : TrainConfig """ self.config = config self.maps_manager = self._init_maps_manager(config) @@ -63,15 +63,15 @@ def _init_maps_manager(self, config) -> MapsManager: ) # TODO : precise which parameters in config are useful @classmethod - def from_json(cls, config_file: Path, maps_path: Path) -> Trainer: + def from_json(cls, config_file: str | Path, maps_path: str | Path) -> Trainer: """ Creates a Trainer from a json configuration file. Parameters ---------- - config_file : Path + config_file : str | Path The parameters, stored in a json files. - maps_path : Path + maps_path : str | Path The folder where the results of a futur training will be stored. Returns @@ -84,22 +84,25 @@ def from_json(cls, config_file: Path, maps_path: Path) -> Trainer: FileNotFoundError If config_file doesn't exist. """ + config_file = Path(config_file) + if not (config_file).is_file(): - raise FileNotFoundError(f"No file found at {config_file}.") + raise FileNotFoundError(f"No file found at {str(config_file)}.") config_dict = patch_to_read_json(read_json(config_file)) # TODO : remove patch + config_dict["maps_dir"] = maps_path config_object = create_training_config(config_dict["network_task"])( - output_maps_directory=maps_path, **config_dict + **config_dict ) return cls(config_object) @classmethod - def from_maps(cls, maps_path: Path) -> Trainer: + def from_maps(cls, maps_path: str | Path) -> Trainer: """ Creates a Trainer from a json configuration file. Parameters ---------- - maps_path : Path + maps_path : str | Path The path of the MAPS folder. Returns @@ -112,9 +115,11 @@ def from_maps(cls, maps_path: Path) -> Trainer: MAPSError If maps_path folder doesn't exist or there is no maps.json file in it. """ + maps_path = Path(maps_path) + if not (maps_path / "maps.json").is_file(): raise MAPSError( - f"MAPS was not found at {maps_path}." + f"MAPS was not found at {str(maps_path)}." f"To initiate a new MAPS please give a train_dict." ) return cls.from_json(maps_path / "maps.json", maps_path) From f20e7fb31abe444d9204ce92665af43265f284d9 Mon Sep 17 00:00:00 2001 From: HuguesRoy <149707970+HuguesRoy@users.noreply.github.com> Date: Tue, 4 Jun 2024 13:53:33 +0200 Subject: [PATCH 22/29] Update quality_check.py (#609) * Update quality_check.py --- clinicadl/quality_check/t1_linear/quality_check.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/clinicadl/quality_check/t1_linear/quality_check.py b/clinicadl/quality_check/t1_linear/quality_check.py index c684858ea..86d85366d 100755 --- a/clinicadl/quality_check/t1_linear/quality_check.py +++ b/clinicadl/quality_check/t1_linear/quality_check.py @@ -141,7 +141,10 @@ def quality_check( qc_df = pd.DataFrame(columns=columns) qc_df["pass"] = qc_df["pass"].astype(bool) softmax = torch.nn.Softmax(dim=1) - logger.info(f"Quality check will be performed over {len(dataloader)} images.") + + logger.info( + f"Quality check will be performed over {len(dataloader.dataset)} images." + ) for data in dataloader: logger.debug(f"Processing subject {data['participant_id']}.") From f6f382aba02e4cb8cc3a2f79f355ea7e8f1f54ed Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Tue, 4 Jun 2024 14:22:11 +0200 Subject: [PATCH 23/29] Fix issue in compare_folders (#610) * add FileNotFound error in tree --- tests/test_generate.py | 4 +- tests/test_predict.py | 69 ++++++++++++++++++++++----------- tests/test_qc.py | 14 +++---- tests/test_random_search.py | 24 ++++++------ tests/test_resume.py | 2 +- tests/test_train_ae.py | 13 +++---- tests/test_train_from_json.py | 2 +- tests/test_transfer_learning.py | 25 ++++++------ tests/testing_tools.py | 51 ++++++++++++++++++------ 9 files changed, 128 insertions(+), 76 deletions(-) diff --git a/tests/test_generate.py b/tests/test_generate.py index 78ad55156..9fc03535b 100644 --- a/tests/test_generate.py +++ b/tests/test_generate.py @@ -46,12 +46,12 @@ def test_generate(cmdopt, tmp_path, test_name): "t1-linear", ] elif test_name == "hypometabolic_example": - output_folder = str(tmp_out_dir / test_name) + output_folder = tmp_out_dir / test_name test_input = [ "generate", "hypometabolic", data_caps_pet, - output_folder, + str(output_folder), "--n_subjects", "2", "--pathology", diff --git a/tests/test_predict.py b/tests/test_predict.py index 34427eeeb..c6b6a39fa 100644 --- a/tests/test_predict.py +++ b/tests/test_predict.py @@ -1,6 +1,5 @@ # coding: utf8 import json -import os import shutil from os.path import exists from pathlib import Path @@ -8,7 +7,8 @@ import pytest from clinicadl import MapsManager -from tests.testing_tools import clean_folder, compare_folders + +from .testing_tools import compare_folders, modify_maps @pytest.fixture( @@ -33,46 +33,71 @@ def test_predict(cmdopt, tmp_path, test_name): tmp_out_dir.mkdir(parents=True) if test_name == "predict_image_classification": - model_folder = input_dir / "maps_image_cnn" + maps_name = "maps_image_cnn" modes = ["image"] use_labels = True elif test_name == "predict_slice_classification": - model_folder = input_dir / "maps_slice_cnn" + maps_name = "maps_slice_cnn" modes = ["image", "slice"] use_labels = True elif test_name == "predict_patch_regression": - model_folder = input_dir / "maps_patch_cnn" + maps_name = "maps_patch_cnn" modes = ["image", "patch"] use_labels = False elif test_name == "predict_roi_regression": - model_folder = input_dir / "maps_roi_cnn" + maps_name = "maps_roi_cnn" modes = ["image", "roi"] use_labels = False elif test_name == "predict_patch_multi_classification": - model_folder = input_dir / "maps_patch_multi_cnn" + maps_name = "maps_patch_multi_cnn" modes = ["image", "patch"] use_labels = False elif test_name == "predict_roi_reconstruction": - model_folder = input_dir / "maps_roi_ae" + maps_name = "maps_roi_ae" modes = ["roi"] use_labels = False else: raise NotImplementedError(f"Test {test_name} is not implemented.") - out_dir = str(model_folder / "split-0/best-loss/test-RANDOM") + shutil.copytree(input_dir / maps_name, tmp_out_dir / maps_name) + model_folder = tmp_out_dir / maps_name + + if cmdopt["adapt-base-dir"]: + with open(model_folder / "maps.json", "r") as f: + config = json.load(f) + config = modify_maps( + maps=config, + base_dir=base_dir, + no_gpu=cmdopt["no-gpu"], + adapt_base_dir=cmdopt["adapt-base-dir"], + ) + with open(model_folder / "maps.json", "w") as f: + json.dump(config, f, skipkeys=True, indent=4) + + with open(model_folder / "groups/test-RANDOM/maps.json", "r") as f: + config = json.load(f) + config = modify_maps( + maps=config, + base_dir=base_dir, + no_gpu=False, + adapt_base_dir=cmdopt["adapt-base-dir"], + ) + with open(model_folder / "groups/test-RANDOM/maps.json", "w") as f: + json.dump(config, f, skipkeys=True, indent=4) - if exists(out_dir): - shutil.rmtree(out_dir) + tmp_out_subdir = str(model_folder / "split-0/best-loss/test-RANDOM") + if exists(tmp_out_subdir): + shutil.rmtree(tmp_out_subdir) - # Correction of JSON file for ROI - if "roi" in modes: - json_path = model_folder / "maps.json" - with open(json_path, "r") as f: - parameters = json.load(f) - parameters["roi_list"] = ["leftHippocampusBox", "rightHippocampusBox"] - json_data = json.dumps(parameters, skipkeys=True, indent=4) - with open(json_path, "w") as f: - f.write(json_data) + # # Correction of JSON file for ROI + # if "roi" in modes: + # json_path = model_folder / "maps.json" + # with open(json_path, "r") as f: + # parameters = json.load(f) + # parameters["roi_list"] = ["leftHippocampusBox", "rightHippocampusBox"] + # json_data = json.dumps(parameters, skipkeys=True, indent=4) + # with open(json_path, "w") as f: + # f.write(json_data) maps_manager = MapsManager(model_folder, verbose="debug") maps_manager.predict( @@ -91,7 +116,7 @@ def test_predict(cmdopt, tmp_path, test_name): maps_manager.get_metrics(data_group="test-RANDOM", mode=mode) assert compare_folders( - tmp_out_dir / test_name, - ref_dir / test_name, + tmp_out_dir / maps_name, + input_dir / maps_name, tmp_out_dir, ) diff --git a/tests/test_qc.py b/tests/test_qc.py index 910c357d4..9b03c2151 100644 --- a/tests/test_qc.py +++ b/tests/test_qc.py @@ -22,29 +22,29 @@ def test_qc(cmdopt, tmp_path, test_name): tmp_out_dir.mkdir(parents=True) if test_name == "t1-linear": - out_tsv = str(tmp_out_dir / "QC.tsv") + out_tsv = tmp_out_dir / "QC.tsv" test_input = [ "t1-linear", str(input_dir / "caps"), - out_tsv, + str(out_tsv), "--no-gpu", ] elif test_name == "t1-volume": - out_dir = str(tmp_out_dir / "QC_T1V") + out_dir = tmp_out_dir / "QC_T1V" test_input = [ "t1-volume", str(input_dir / "caps_T1V"), - out_dir, + str(out_dir), "Ixi549Space", ] elif test_name == "pet-linear": - out_tsv = str(tmp_out_dir / "QC_pet.tsv") + out_tsv = tmp_out_dir / "QC_pet.tsv" test_input = [ "pet-linear", str(input_dir / "caps_pet"), - out_tsv, + str(out_tsv), "18FFDG", "cerebellumPons2", "--threshold", @@ -73,7 +73,7 @@ def test_qc(cmdopt, tmp_path, test_name): assert out_df.equals(ref_df) elif test_name == "t1-volume": - assert compare_folders(out_dir, str(ref_dir / "QC_T1V"), tmp_out_dir) + assert compare_folders(out_dir, ref_dir / "QC_T1V", tmp_out_dir) elif test_name == "pet-linear": out_df = pd.read_csv(out_tsv, sep="\t") diff --git a/tests/test_random_search.py b/tests/test_random_search.py index 5b68787e8..864f8b1fa 100644 --- a/tests/test_random_search.py +++ b/tests/test_random_search.py @@ -7,7 +7,7 @@ import pytest -from .testing_tools import change_gpu_in_toml, compare_folders +from .testing_tools import compare_folders, modify_toml # random searxh for ROI with CNN @@ -25,6 +25,9 @@ def test_random_search(cmdopt, tmp_path, test_name): input_dir = base_dir / "randomSearch" / "in" ref_dir = base_dir / "randomSearch" / "ref" tmp_out_dir = tmp_path / "randomSearch" / "out" + + if os.path.exists(tmp_out_dir): + shutil.rmtree(tmp_out_dir) tmp_out_dir.mkdir(parents=True) if test_name == "rs_roi_cnn": @@ -33,21 +36,16 @@ def test_random_search(cmdopt, tmp_path, test_name): else: raise NotImplementedError(f"Test {test_name} is not implemented.") - run_test_random_search( - toml_path, generate_input, tmp_out_dir, ref_dir, cmdopt["no-gpu"] - ) - - -def run_test_random_search(toml_path, generate_input, tmp_out_dir, ref_dir, no_gpu): - if os.path.exists(tmp_out_dir): - shutil.rmtree(tmp_out_dir) - # Write random_search.toml file - os.makedirs(tmp_out_dir, exist_ok=True) shutil.copy(toml_path, tmp_out_dir) - if no_gpu: - change_gpu_in_toml(tmp_out_dir / "random_search.toml") + if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: + modify_toml( + toml_path=tmp_out_dir / "random_search.toml", + base_dir=base_dir, + no_gpu=cmdopt["no-gpu"], + adapt_base_dir=cmdopt["adapt-base-dir"], + ) flag_error_generate = not os.system("clinicadl " + " ".join(generate_input)) performances_flag = os.path.exists( diff --git a/tests/test_resume.py b/tests/test_resume.py index cdf6031ee..5827bda0f 100644 --- a/tests/test_resume.py +++ b/tests/test_resume.py @@ -42,7 +42,7 @@ def test_resume(cmdopt, tmp_path, test_name): adapt_base_dir=cmdopt["adapt-base-dir"], ) with open(maps_stopped / "maps.json", "w") as f: - json.dump(config, f) + json.dump(config, f, skipkeys=True, indent=4) flag_error = not system(f"clinicadl -vv train resume {maps_stopped}") assert flag_error diff --git a/tests/test_train_ae.py b/tests/test_train_ae.py index b20749258..c7fbcb276 100644 --- a/tests/test_train_ae.py +++ b/tests/test_train_ae.py @@ -33,8 +33,10 @@ def test_train_ae(cmdopt, tmp_path, test_name): labels_path = str(input_dir / "labels_list" / "2_fold") config_path = str(input_dir / "train_config.toml") + split = 0 + if test_name == "image_ae": - split = [0, 0] + split = 1 test_input = [ "train", "reconstruction", @@ -45,10 +47,9 @@ def test_train_ae(cmdopt, tmp_path, test_name): "-c", config_path, "--split", - "1", + str(split), ] elif test_name == "patch_multi_ae": - split = [0, 0] test_input = [ "train", "reconstruction", @@ -61,7 +62,6 @@ def test_train_ae(cmdopt, tmp_path, test_name): "--multi_network", ] elif test_name == "roi_ae": - split = [0, 0] test_input = [ "train", "reconstruction", @@ -73,7 +73,6 @@ def test_train_ae(cmdopt, tmp_path, test_name): config_path, ] elif test_name == "slice_ae": - split = [0, 0] test_input = [ "train", "reconstruction", @@ -116,7 +115,7 @@ def test_train_ae(cmdopt, tmp_path, test_name): tmp_path, ) assert compare_folders( - tmp_out_dir / f"split-{split[0]}" / "best-loss", - ref_dir / ("maps_" + test_name) / f"split-{split[1]}" / "best-loss", + tmp_out_dir / f"split-{split}" / "best-loss", + ref_dir / ("maps_" + test_name) / f"split-{split}" / "best-loss", tmp_path, ) diff --git a/tests/test_train_from_json.py b/tests/test_train_from_json.py index 363af9aff..06b307b0f 100644 --- a/tests/test_train_from_json.py +++ b/tests/test_train_from_json.py @@ -30,7 +30,7 @@ def test_json_compatibility(cmdopt, tmp_path): adapt_base_dir=cmdopt["adapt-base-dir"], ) with open(config_json, "w+") as f: - json.dump(config, f) + json.dump(config, f, skipkeys=True, indent=4) flag_error = not system( f"clinicadl train from_json {str(config_json)} {str(reproduced_maps_dir)} -s {split}" diff --git a/tests/test_transfer_learning.py b/tests/test_transfer_learning.py index b9c3f999b..d49cbd61f 100644 --- a/tests/test_transfer_learning.py +++ b/tests/test_transfer_learning.py @@ -152,20 +152,23 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): with open(ref_dir / ("maps_roi_" + name) / "maps.json", "r") as ref: json_data_ref = json.load(ref) - ref_source_dir = Path(json_data_ref["transfer_path"]).parent - json_data_ref["transfer_path"] = str( - tmp_out_dir / Path(json_data_ref["transfer_path"]).relative_to(ref_source_dir) - ) - if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: - json_data_ref = modify_maps( - maps=json_data_ref, - base_dir=base_dir, - no_gpu=cmdopt["no-gpu"], - adapt_base_dir=cmdopt["adapt-base-dir"], - ) + # TODO : uncomment when CI data are correct + # ref_source_dir = Path(json_data_ref["transfer_path"]).parent + # json_data_ref["transfer_path"] = str( + # tmp_out_dir / Path(json_data_ref["transfer_path"]).relative_to(ref_source_dir) + # ) + # if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: + # json_data_ref = modify_maps( + # maps=json_data_ref, + # base_dir=base_dir, + # no_gpu=cmdopt["no-gpu"], + # adapt_base_dir=cmdopt["adapt-base-dir"], + # ) # TODO: remove and update data json_data_ref["caps_directory"] = json_data_out["caps_directory"] json_data_ref["gpu"] = json_data_out["gpu"] + json_data_ref["transfer_path"] = json_data_out["transfer_path"] + json_data_ref["tsv_path"] = json_data_out["tsv_path"] ### assert json_data_out == json_data_ref # ["mode"] == mode diff --git a/tests/testing_tools.py b/tests/testing_tools.py index ff7eb97b1..4044d1022 100644 --- a/tests/testing_tools.py +++ b/tests/testing_tools.py @@ -95,6 +95,9 @@ def tree(dir_: PathLike, file_out: PathLike): """ from pathlib import Path + if not dir_.is_dir(): + raise FileNotFoundError(f"No directory found at {dir_}.") + file_content = "" for path in sorted(Path(dir_).rglob("*")): @@ -104,8 +107,6 @@ def tree(dir_: PathLike, file_out: PathLike): spacer = " " * depth file_content = file_content + f"{spacer}+ {path.name}\n" - print(file_content) - Path(file_out).write_text(file_content) @@ -201,28 +202,54 @@ def modify_maps( maps["caps_directory"] = str( base_dir / Path(maps["caps_directory"]).relative_to(ref_base_dir) ) - maps["tsv_path"] = str( - base_dir / Path(maps["tsv_path"]).relative_to(ref_base_dir) - ) + try: + maps["tsv_path"] = str( + base_dir / Path(maps["tsv_path"]).relative_to(ref_base_dir) + ) + except KeyError: # maps with only caps directory + pass return maps -def change_gpu_in_toml(toml_path: Path) -> None: +def modify_toml( + toml_path: Path, + base_dir: Path, + no_gpu: bool = False, + adapt_base_dir: bool = False, +) -> None: """ - Changes GPU to false in a TOML config file. + Modifies a TOML file if the user passed --no-gpu or --adapt-base-dir flags. Parameters ---------- toml_path : Path - The TOML file. + The path of the TOML file. + base_dir : Path + The base directory, where CI data are stored. + no_gpu : bool (optional, default=False) + Whether the user activated the --no-gpu flag. + adapt_base_dir : bool (optional, default=False) + Whether the user activated the --adapt-base-dir flag. """ import toml config = toml.load(toml_path) - try: - config["Computational"]["gpu"] = False - except KeyError: - config["Computational"] = {"gpu": False} + if no_gpu: + try: + config["Computational"]["gpu"] = False + except KeyError: + config["Computational"] = {"gpu": False} + if adapt_base_dir: + random_search_config = config["Random_Search"] + base_dir = base_dir.resolve() + ref_base_dir = Path(random_search_config["caps_directory"]).parents[2] + random_search_config["caps_directory"] = str( + base_dir + / Path(random_search_config["caps_directory"]).relative_to(ref_base_dir) + ) + random_search_config["tsv_path"] = str( + base_dir / Path(random_search_config["tsv_path"]).relative_to(ref_base_dir) + ) f = open(toml_path, "w") toml.dump(config, f) f.close() From 523563d985f1ad28e97e754d6a0d10ea9799e263 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Tue, 4 Jun 2024 15:35:56 +0200 Subject: [PATCH 24/29] revert change on poetry --- poetry.lock | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/poetry.lock b/poetry.lock index c9b87b84f..eafdc75ff 100644 --- a/poetry.lock +++ b/poetry.lock @@ -46,6 +46,20 @@ files = [ [package.extras] dev = ["black", "coverage", "isort", "pre-commit", "pyenchant", "pylint"] +[[package]] +name = "annotated-types" +version = "0.6.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, + {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + [[package]] name = "appdirs" version = "1.4.4" From 4971fa7b77b8dcf5bc742b59b328d26ab2dbca2e Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Tue, 4 Jun 2024 15:43:48 +0200 Subject: [PATCH 25/29] correction of wrong conflict choice in rebasing --- tests/test_predict.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_predict.py b/tests/test_predict.py index ebc9d0fd1..93d3a4a38 100644 --- a/tests/test_predict.py +++ b/tests/test_predict.py @@ -6,7 +6,7 @@ import pytest -from clinicadl import MapsManager +from clinicadl.predict.predict_manager import PredictManager from .testing_tools import compare_folders, modify_maps From fdae3dda371526c4b028f00c223d1ab242d0213e Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Tue, 4 Jun 2024 18:35:43 +0200 Subject: [PATCH 26/29] restore split_manager --- clinicadl/trainer/trainer.py | 22 +++++++++++++++----- clinicadl/utils/maps_manager/maps_manager.py | 16 +++++--------- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/clinicadl/trainer/trainer.py b/clinicadl/trainer/trainer.py index a09c55a34..7b6b19382 100644 --- a/clinicadl/trainer/trainer.py +++ b/clinicadl/trainer/trainer.py @@ -57,10 +57,14 @@ def _init_maps_manager(self, config) -> MapsManager: # temporary: to match CLI data. TODO : change CLI data parameters, maps_path = create_parameters_dict(config) - - return MapsManager( - maps_path, parameters, verbose=None - ) # TODO : precise which parameters in config are useful + if maps_path.is_dir(): + return MapsManager( + maps_path, verbose=None + ) # TODO : precise which parameters in config are useful + else: + return MapsManager( + maps_path, parameters, verbose=None + ) # TODO : precise which parameters in config are useful @classmethod def from_json(cls, config_file: str | Path, maps_path: str | Path) -> Trainer: @@ -135,7 +139,11 @@ def resume(self, splits: List[int]) -> None: """ stopped_splits = set(self.maps_manager.find_stopped_splits()) finished_splits = set(self.maps_manager.find_finished_splits()) - absent_splits = set(splits) - stopped_splits - finished_splits + # TODO : check these two lines. Why do we need a split_manager? + split_manager = self.maps_manager._init_split_manager(split_list=splits) + split_iterator = split_manager.split_iterator() + ### + absent_splits = set(split_iterator) - stopped_splits - finished_splits logger.info( f"Finished splits {finished_splits}\n" @@ -143,6 +151,10 @@ def resume(self, splits: List[int]) -> None: f"Absent splits {absent_splits}" ) + if len(stopped_splits) == 0 and len(absent_splits) == 0: + raise ValueError( + "Training has been completed on all the splits you passed." + ) if len(stopped_splits) > 0: self._resume(list(stopped_splits)) if len(absent_splits) > 0: diff --git a/clinicadl/utils/maps_manager/maps_manager.py b/clinicadl/utils/maps_manager/maps_manager.py index 3efd55407..462a74e41 100644 --- a/clinicadl/utils/maps_manager/maps_manager.py +++ b/clinicadl/utils/maps_manager/maps_manager.py @@ -332,15 +332,16 @@ def _compute_output_tensors( torch.save(output, tensor_path / output_filename) logger.debug(f"File saved at {[input_filename, output_filename]}") - def find_splits(self): + def find_splits(self) -> List[int]: """Find which splits that were trained in the MAPS.""" - return [ + splits = [ int(split.name.split("-")[1]) for split in list(self.maps_path.iterdir()) if split.name.startswith(f"{self.split_name}-") ] + return splits - def find_stopped_splits(self): + def find_stopped_splits(self) -> List[int]: """Find which splits for which training was not completed.""" existing_split_list = self.find_splits() stopped_splits = [ @@ -351,7 +352,7 @@ def find_stopped_splits(self): ] return stopped_splits - def find_finished_splits(self): + def find_finished_splits(self) -> List[int]: """Find which splits for which training was completed.""" finished_splits = list() existing_split_list = self.find_splits() @@ -369,13 +370,6 @@ def find_finished_splits(self): finished_splits.append(split) return finished_splits - def find_missing_splits(self): - missing_splits = [ - split - for split in split_iterator - if split not in finished_splits and split not in stopped_splits - ] - def _ensemble_prediction( self, data_group, From 9a57198ce07eb1d83a93ddcb2b09ae196eeeea65 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Tue, 4 Jun 2024 18:39:14 +0200 Subject: [PATCH 27/29] trigger tests --- clinicadl/trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clinicadl/trainer/trainer.py b/clinicadl/trainer/trainer.py index 7b6b19382..5ff3714d0 100644 --- a/clinicadl/trainer/trainer.py +++ b/clinicadl/trainer/trainer.py @@ -164,7 +164,7 @@ def _check_args(self): self.config.reproducibility.seed = get_seed(self.config.reproducibility.seed) # if len(self.config.data.label_code) == 0: # self.config.data.label_code = self.maps_manager.label_code - # TODO : deal with label_code and replace self.maps_manager.label_code + # TODO: deal with label_code and replace self.maps_manager.label_code def train( self, From 375e67e7bc76bee12c027fbc99ac1cfecf778ea8 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Wed, 5 Jun 2024 09:40:24 +0200 Subject: [PATCH 28/29] delete automatic resume function --- clinicadl/train/resume.py | 82 --------------------------------------- 1 file changed, 82 deletions(-) delete mode 100644 clinicadl/train/resume.py diff --git a/clinicadl/train/resume.py b/clinicadl/train/resume.py deleted file mode 100644 index f80f5791b..000000000 --- a/clinicadl/train/resume.py +++ /dev/null @@ -1,82 +0,0 @@ -""" -Automatic relaunch of jobs that were stopped before the end of training. -Unfinished splits are detected as they do not contain a "performances" sub-folder -""" -# TODO: Remove this file and put everything in trainer.resume() ?? -from logging import getLogger -from pathlib import Path - -from clinicadl import MapsManager -from clinicadl.train.tasks_utils import create_training_config -from clinicadl.trainer.trainer import Trainer - - -def replace_arg(options, key_name, value): - if value is not None: - setattr(options, key_name, value) - - -def automatic_resume(model_path: Path, user_split_list=None, verbose=0): - logger = getLogger("clinicadl") - - verbose_list = ["warning", "info", "debug"] - maps_manager = MapsManager(model_path, verbose=verbose_list[verbose]) - config_dict = maps_manager.get_parameters() - # temporary, TODO - config_dict["tsv_directory"] = config_dict["tsv_path"] - if config_dict["track_exp"] == "": - config_dict["track_exp"] = None - if "label_code" not in config_dict or config_dict["label_code"] is None: - config_dict["label_code"] = {} - if "preprocessing_json" not in config_dict: - config_dict["preprocessing_json"] = config_dict["preprocessing_dict"][ - "extract_json" - ] - config_dict["maps_dir"] = model_path - ### - config = create_training_config(config_dict["network_task"])( - output_maps_directory=model_path, **config_dict - ) - trainer = Trainer(config, maps_manager=maps_manager) - - existing_split_list = maps_manager._find_splits() - stopped_splits = [ - split - for split in existing_split_list - if (model_path / f"{maps_manager.split_name}-{split}" / "tmp") - in list((model_path / f"{maps_manager.split_name}-{split}").iterdir()) - ] - - # Find finished split - finished_splits = list() - for split in existing_split_list: - if split not in stopped_splits: - performance_dir_list = [ - performance_dir - for performance_dir in list( - (model_path / f"{maps_manager.split_name}-{split}").iterdir() - ) - if "best-" in performance_dir.name - ] - if len(performance_dir_list) > 0: - finished_splits.append(split) - - split_manager = maps_manager._init_split_manager(split_list=user_split_list) - split_iterator = split_manager.split_iterator() - - absent_splits = [ - split - for split in split_iterator - if split not in finished_splits and split not in stopped_splits - ] - - # To ensure retro-compatibility with random search - logger.info( - f"Finished splits {finished_splits}\n" - f"Stopped splits {stopped_splits}\n" - f"Absent splits {absent_splits}" - ) - if len(stopped_splits) > 0: - trainer.resume(stopped_splits) - if len(absent_splits) > 0: - trainer.train(absent_splits, overwrite=True) From 919f93086e7f34db0dd92603adec6b9323c5bcde Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Wed, 5 Jun 2024 09:40:44 +0200 Subject: [PATCH 29/29] change find_splits to _find_splits --- clinicadl/config/config/cross_validation.py | 2 +- clinicadl/predict/predict_manager.py | 2 +- clinicadl/utils/meta_maps/getter.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/clinicadl/config/config/cross_validation.py b/clinicadl/config/config/cross_validation.py index fd2b4cb40..3441d72d1 100644 --- a/clinicadl/config/config/cross_validation.py +++ b/clinicadl/config/config/cross_validation.py @@ -34,5 +34,5 @@ def validator_split(cls, v): def adapt_cross_val_with_maps_manager_info(self, maps_manager: MapsManager): # TEMPORARY if not self.split: - self.split = maps_manager._find_splits() + self.split = maps_manager.find_splits() logger.debug(f"List of splits {self.split}") diff --git a/clinicadl/predict/predict_manager.py b/clinicadl/predict/predict_manager.py index 6684cb149..01df83dd8 100644 --- a/clinicadl/predict/predict_manager.py +++ b/clinicadl/predict/predict_manager.py @@ -791,7 +791,7 @@ def _check_data_group( raise MAPSError("Cannot overwrite train or validation data group.") else: # if not split_list: - # split_list = self.maps_manager._find_splits() + # split_list = self.maps_manager.find_splits() assert self._config.split for split in self._config.split: selection_metrics = self.maps_manager._find_selection_metrics( diff --git a/clinicadl/utils/meta_maps/getter.py b/clinicadl/utils/meta_maps/getter.py index 2f400ffc3..ea75e4da5 100644 --- a/clinicadl/utils/meta_maps/getter.py +++ b/clinicadl/utils/meta_maps/getter.py @@ -34,7 +34,7 @@ def meta_maps_analysis(launch_dir: Path, evaluation_metric="loss"): for job in jobs_list: performances_dict[job] = dict() maps_manager = MapsManager(launch_dir / job) - split_list = maps_manager._find_splits() + split_list = maps_manager.find_splits() split_set = split_set | set(split_list) for split in split_set: performances_dict[job][split] = dict()