From f5f8573ba53414072213890ec442e1008eed4192 Mon Sep 17 00:00:00 2001 From: ioangatop Date: Mon, 18 Mar 2024 17:51:26 +0100 Subject: [PATCH] move from `pytorch_lightning` to `lightning` --- configs/core/tests/offline/embeddings.yaml | 4 +- configs/vision/dino_vit/offline/bach.yaml | 8 ++-- configs/vision/dino_vit/offline/crc.yaml | 8 ++-- configs/vision/dino_vit/offline/mhist.yaml | 8 ++-- .../dino_vit/offline/patch_camelyon.yaml | 8 ++-- configs/vision/dino_vit/online/bach.yaml | 8 ++-- configs/vision/dino_vit/online/crc.yaml | 8 ++-- configs/vision/dino_vit/online/mhist.yaml | 8 ++-- .../dino_vit/online/patch_camelyon.yaml | 8 ++-- configs/vision/owkin/phikon/offline/bach.yaml | 8 ++-- configs/vision/owkin/phikon/offline/crc.yaml | 8 ++-- .../vision/owkin/phikon/offline/mhist.yaml | 8 ++-- .../owkin/phikon/offline/patch_camelyon.yaml | 8 ++-- .../vision/tests/offline/patch_camelyon.yaml | 4 +- pdm.lock | 39 +++++++++++++++---- pyproject.toml | 8 ++-- src/eva/core/callbacks/writers/embeddings.py | 4 +- src/eva/core/data/datamodules/datamodule.py | 4 +- src/eva/core/models/modules/head.py | 4 +- src/eva/core/models/modules/inference.py | 2 +- src/eva/core/models/modules/module.py | 6 +-- src/eva/core/models/modules/typings.py | 2 +- .../models/modules/utils/batch_postprocess.py | 2 +- src/eva/core/trainers/_recorder.py | 2 +- src/eva/core/trainers/functional.py | 2 +- src/eva/core/trainers/trainer.py | 14 +++---- .../core/callbacks/writers/test_embeddings.py | 4 +- .../models/networks/wrappers/test_onnx.py | 2 +- tests/eva/core/trainers/test_recorder.py | 2 +- 29 files changed, 112 insertions(+), 89 deletions(-) diff --git a/configs/core/tests/offline/embeddings.yaml b/configs/core/tests/offline/embeddings.yaml index 6e1d31ba..b10881f9 100644 --- a/configs/core/tests/offline/embeddings.yaml +++ b/configs/core/tests/offline/embeddings.yaml @@ -7,11 +7,11 @@ trainer: limit_train_batches: 2 limit_val_batches: 2 callbacks: - - class_path: pytorch_lightning.callbacks.LearningRateMonitor + - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch logger: - - class_path: pytorch_lightning.loggers.TensorBoardLogger + - class_path: lightning.pytorch.loggers.TensorBoardLogger init_args: save_dir: *OUTPUT_DIR name: "" diff --git a/configs/vision/dino_vit/offline/bach.yaml b/configs/vision/dino_vit/offline/bach.yaml index 3a6b6a06..3d1dd721 100644 --- a/configs/vision/dino_vit/offline/bach.yaml +++ b/configs/vision/dino_vit/offline/bach.yaml @@ -6,17 +6,17 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/offline/bach} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: - - class_path: pytorch_lightning.callbacks.LearningRateMonitor + - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch - - class_path: pytorch_lightning.callbacks.ModelCheckpoint + - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: filename: best save_last: true save_top_k: 1 monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/MulticlassAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - - class_path: pytorch_lightning.callbacks.EarlyStopping + - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: min_delta: 0 patience: 400 @@ -39,7 +39,7 @@ trainer: force_reload: ${oc.env:FORCE_RELOAD, false} checkpoint_path: ${oc.env:CHECKPOINT_PATH, null} logger: - - class_path: pytorch_lightning.loggers.TensorBoardLogger + - class_path: lightning.pytorch.loggers.TensorBoardLogger init_args: save_dir: *OUTPUT_ROOT name: "" diff --git a/configs/vision/dino_vit/offline/crc.yaml b/configs/vision/dino_vit/offline/crc.yaml index 1aafaa41..1790d610 100644 --- a/configs/vision/dino_vit/offline/crc.yaml +++ b/configs/vision/dino_vit/offline/crc.yaml @@ -6,17 +6,17 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/offline/crc} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: - - class_path: pytorch_lightning.callbacks.LearningRateMonitor + - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch - - class_path: pytorch_lightning.callbacks.ModelCheckpoint + - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: filename: best save_last: true save_top_k: 1 monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/MulticlassAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - - class_path: pytorch_lightning.callbacks.EarlyStopping + - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: min_delta: 0 patience: 24 @@ -39,7 +39,7 @@ trainer: force_reload: ${oc.env:FORCE_RELOAD, false} checkpoint_path: ${oc.env:CHECKPOINT_PATH, null} logger: - - class_path: pytorch_lightning.loggers.TensorBoardLogger + - class_path: lightning.pytorch.loggers.TensorBoardLogger init_args: save_dir: *OUTPUT_ROOT name: "" diff --git a/configs/vision/dino_vit/offline/mhist.yaml b/configs/vision/dino_vit/offline/mhist.yaml index 2d0bdca4..2fac6964 100644 --- a/configs/vision/dino_vit/offline/mhist.yaml +++ b/configs/vision/dino_vit/offline/mhist.yaml @@ -6,17 +6,17 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/offline/mhist} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: - - class_path: pytorch_lightning.callbacks.LearningRateMonitor + - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch - - class_path: pytorch_lightning.callbacks.ModelCheckpoint + - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: filename: best save_last: true save_top_k: 1 monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - - class_path: pytorch_lightning.callbacks.EarlyStopping + - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: min_delta: 0 patience: 51 @@ -39,7 +39,7 @@ trainer: force_reload: ${oc.env:FORCE_RELOAD, false} checkpoint_path: ${oc.env:CHECKPOINT_PATH, null} logger: - - class_path: pytorch_lightning.loggers.TensorBoardLogger + - class_path: lightning.pytorch.loggers.TensorBoardLogger init_args: save_dir: *OUTPUT_ROOT name: "" diff --git a/configs/vision/dino_vit/offline/patch_camelyon.yaml b/configs/vision/dino_vit/offline/patch_camelyon.yaml index 8bcc3f99..1a6a7b98 100644 --- a/configs/vision/dino_vit/offline/patch_camelyon.yaml +++ b/configs/vision/dino_vit/offline/patch_camelyon.yaml @@ -6,17 +6,17 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/offline/patch_camelyon} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: - - class_path: pytorch_lightning.callbacks.LearningRateMonitor + - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch - - class_path: pytorch_lightning.callbacks.ModelCheckpoint + - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: filename: best save_last: true save_top_k: 1 monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - - class_path: pytorch_lightning.callbacks.EarlyStopping + - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: min_delta: 0 patience: 9 @@ -40,7 +40,7 @@ trainer: force_reload: ${oc.env:FORCE_RELOAD, false} checkpoint_path: ${oc.env:CHECKPOINT_PATH, null} logger: - - class_path: pytorch_lightning.loggers.TensorBoardLogger + - class_path: lightning.pytorch.loggers.TensorBoardLogger init_args: save_dir: *OUTPUT_ROOT name: "" diff --git a/configs/vision/dino_vit/online/bach.yaml b/configs/vision/dino_vit/online/bach.yaml index 3ec211d7..6171eda2 100644 --- a/configs/vision/dino_vit/online/bach.yaml +++ b/configs/vision/dino_vit/online/bach.yaml @@ -5,24 +5,24 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/online/bach} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: - - class_path: pytorch_lightning.callbacks.LearningRateMonitor + - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch - - class_path: pytorch_lightning.callbacks.ModelCheckpoint + - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: filename: best save_last: true save_top_k: 1 monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/MulticlassAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - - class_path: pytorch_lightning.callbacks.EarlyStopping + - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: min_delta: 0 patience: 400 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE logger: - - class_path: pytorch_lightning.loggers.TensorBoardLogger + - class_path: lightning.pytorch.loggers.TensorBoardLogger init_args: save_dir: *OUTPUT_ROOT name: "" diff --git a/configs/vision/dino_vit/online/crc.yaml b/configs/vision/dino_vit/online/crc.yaml index 2bb0b6c7..f90c0cfc 100644 --- a/configs/vision/dino_vit/online/crc.yaml +++ b/configs/vision/dino_vit/online/crc.yaml @@ -5,24 +5,24 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/online/crc} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: - - class_path: pytorch_lightning.callbacks.LearningRateMonitor + - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch - - class_path: pytorch_lightning.callbacks.ModelCheckpoint + - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: filename: best save_last: true save_top_k: 1 monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/MulticlassAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - - class_path: pytorch_lightning.callbacks.EarlyStopping + - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: min_delta: 0 patience: 24 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE logger: - - class_path: pytorch_lightning.loggers.TensorBoardLogger + - class_path: lightning.pytorch.loggers.TensorBoardLogger init_args: save_dir: *OUTPUT_ROOT name: "" diff --git a/configs/vision/dino_vit/online/mhist.yaml b/configs/vision/dino_vit/online/mhist.yaml index b3b09c03..cf4c6770 100644 --- a/configs/vision/dino_vit/online/mhist.yaml +++ b/configs/vision/dino_vit/online/mhist.yaml @@ -5,24 +5,24 @@ trainer: default_root_dir: &LIGHTNING_ROOT ${oc.env:LIGHTNING_ROOT, logs/dino_vits16/online/mhist} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: - - class_path: pytorch_lightning.callbacks.LearningRateMonitor + - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch - - class_path: pytorch_lightning.callbacks.ModelCheckpoint + - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: filename: best save_last: true save_top_k: 1 monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - - class_path: pytorch_lightning.callbacks.EarlyStopping + - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: min_delta: 0 patience: 51 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE logger: - - class_path: pytorch_lightning.loggers.TensorBoardLogger + - class_path: lightning.pytorch.loggers.TensorBoardLogger init_args: save_dir: *LIGHTNING_ROOT name: "" diff --git a/configs/vision/dino_vit/online/patch_camelyon.yaml b/configs/vision/dino_vit/online/patch_camelyon.yaml index 1708d944..0f3d2e2c 100644 --- a/configs/vision/dino_vit/online/patch_camelyon.yaml +++ b/configs/vision/dino_vit/online/patch_camelyon.yaml @@ -5,24 +5,24 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, dino_vits16}/online/patch_camelyon} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: - - class_path: pytorch_lightning.callbacks.LearningRateMonitor + - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch - - class_path: pytorch_lightning.callbacks.ModelCheckpoint + - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: filename: best save_last: true save_top_k: 1 monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - - class_path: pytorch_lightning.callbacks.EarlyStopping + - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: min_delta: 0 patience: 9 monitor: *MONITOR_METRIC mode: *MONITOR_METRIC_MODE logger: - - class_path: pytorch_lightning.loggers.TensorBoardLogger + - class_path: lightning.pytorch.loggers.TensorBoardLogger init_args: save_dir: *OUTPUT_ROOT name: "" diff --git a/configs/vision/owkin/phikon/offline/bach.yaml b/configs/vision/owkin/phikon/offline/bach.yaml index 34305d67..35fe73e6 100644 --- a/configs/vision/owkin/phikon/offline/bach.yaml +++ b/configs/vision/owkin/phikon/offline/bach.yaml @@ -6,17 +6,17 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, owkin/phikon}/offline/bach} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: - - class_path: pytorch_lightning.callbacks.LearningRateMonitor + - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch - - class_path: pytorch_lightning.callbacks.ModelCheckpoint + - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: filename: best save_last: true save_top_k: 1 monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/MulticlassAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - - class_path: pytorch_lightning.callbacks.EarlyStopping + - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: min_delta: 0 patience: 400 @@ -35,7 +35,7 @@ trainer: tensor_transforms: class_path: eva.core.models.networks.transforms.ExtractCLSFeatures logger: - - class_path: pytorch_lightning.loggers.TensorBoardLogger + - class_path: lightning.pytorch.loggers.TensorBoardLogger init_args: save_dir: *OUTPUT_ROOT name: "" diff --git a/configs/vision/owkin/phikon/offline/crc.yaml b/configs/vision/owkin/phikon/offline/crc.yaml index 93135777..a1abcca6 100644 --- a/configs/vision/owkin/phikon/offline/crc.yaml +++ b/configs/vision/owkin/phikon/offline/crc.yaml @@ -6,17 +6,17 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, owkin/phikon}/offline/crc} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: - - class_path: pytorch_lightning.callbacks.LearningRateMonitor + - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch - - class_path: pytorch_lightning.callbacks.ModelCheckpoint + - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: filename: best save_last: true save_top_k: 1 monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/MulticlassAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - - class_path: pytorch_lightning.callbacks.EarlyStopping + - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: min_delta: 0 patience: 24 @@ -35,7 +35,7 @@ trainer: tensor_transforms: class_path: eva.core.models.networks.transforms.ExtractCLSFeatures logger: - - class_path: pytorch_lightning.loggers.TensorBoardLogger + - class_path: lightning.pytorch.loggers.TensorBoardLogger init_args: save_dir: *OUTPUT_ROOT name: "" diff --git a/configs/vision/owkin/phikon/offline/mhist.yaml b/configs/vision/owkin/phikon/offline/mhist.yaml index b9dcd6e7..a4dbf234 100644 --- a/configs/vision/owkin/phikon/offline/mhist.yaml +++ b/configs/vision/owkin/phikon/offline/mhist.yaml @@ -6,17 +6,17 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, owkin/phikon}/offline/mhist} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: - - class_path: pytorch_lightning.callbacks.LearningRateMonitor + - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch - - class_path: pytorch_lightning.callbacks.ModelCheckpoint + - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: filename: best save_last: true save_top_k: 1 monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - - class_path: pytorch_lightning.callbacks.EarlyStopping + - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: min_delta: 0 patience: 51 @@ -35,7 +35,7 @@ trainer: tensor_transforms: class_path: eva.core.models.networks.transforms.ExtractCLSFeatures logger: - - class_path: pytorch_lightning.loggers.TensorBoardLogger + - class_path: lightning.pytorch.loggers.TensorBoardLogger init_args: save_dir: *OUTPUT_ROOT name: "" diff --git a/configs/vision/owkin/phikon/offline/patch_camelyon.yaml b/configs/vision/owkin/phikon/offline/patch_camelyon.yaml index a9d6fe01..be2bc4a7 100644 --- a/configs/vision/owkin/phikon/offline/patch_camelyon.yaml +++ b/configs/vision/owkin/phikon/offline/patch_camelyon.yaml @@ -6,17 +6,17 @@ trainer: default_root_dir: &OUTPUT_ROOT ${oc.env:OUTPUT_ROOT, logs/${oc.env:DINO_BACKBONE, owkin/phikon}/offline/patch_camelyon} max_steps: &MAX_STEPS ${oc.env:MAX_STEPS, 12500} callbacks: - - class_path: pytorch_lightning.callbacks.LearningRateMonitor + - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: logging_interval: epoch - - class_path: pytorch_lightning.callbacks.ModelCheckpoint + - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: filename: best save_last: true save_top_k: 1 monitor: &MONITOR_METRIC ${oc.env:MONITOR_METRIC, val/BinaryAccuracy} mode: &MONITOR_METRIC_MODE ${oc.env:MONITOR_METRIC_MODE, max} - - class_path: pytorch_lightning.callbacks.EarlyStopping + - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: min_delta: 0 patience: 9 @@ -36,7 +36,7 @@ trainer: tensor_transforms: class_path: eva.core.models.networks.transforms.ExtractCLSFeatures logger: - - class_path: pytorch_lightning.loggers.TensorBoardLogger + - class_path: lightning.pytorch.loggers.TensorBoardLogger init_args: save_dir: *OUTPUT_ROOT name: "" diff --git a/configs/vision/tests/offline/patch_camelyon.yaml b/configs/vision/tests/offline/patch_camelyon.yaml index 912bc915..2b8e106f 100644 --- a/configs/vision/tests/offline/patch_camelyon.yaml +++ b/configs/vision/tests/offline/patch_camelyon.yaml @@ -23,7 +23,7 @@ trainer: model: dino_vits16 pretrained: false checkpoint_path: &CHECKPOINT_PATH ${oc.env:CHECKPOINT_PATH, null} - - class_path: pytorch_lightning.callbacks.ModelCheckpoint + - class_path: lightning.pytorch.callbacks.ModelCheckpoint init_args: filename: best save_last: true @@ -31,7 +31,7 @@ trainer: monitor: &MONITOR_METRIC val/BinaryAccuracy mode: &MONITOR_METRIC_MODE max logger: - - class_path: pytorch_lightning.loggers.TensorBoardLogger + - class_path: lightning.pytorch.loggers.TensorBoardLogger init_args: save_dir: *LIGHTNING_ROOT name: "" diff --git a/pdm.lock b/pdm.lock index 189d276a..35e81f65 100644 --- a/pdm.lock +++ b/pdm.lock @@ -2,10 +2,10 @@ # It is not intended for manual editing. [metadata] -groups = ["default", "dev", "docs", "test", "lint", "all", "vision", "typecheck"] +groups = ["default", "dev", "docs", "all", "typecheck", "lint", "vision", "test"] strategy = ["cross_platform", "inherit_metadata"] lock_version = "4.4.1" -content_hash = "sha256:3447a7b87556f973900f21e9b85ef3ecd39631d2092584cf8411f29f6fde2a7f" +content_hash = "sha256:745291ea57d3487b9569e739876731c5fd5442288e01d53da2de8ff86bb0d909" [[package]] name = "absl-py" @@ -733,6 +733,29 @@ files = [ {file = "jsonargparse-4.27.6.tar.gz", hash = "sha256:ebd2e0a4faef85a075bb6ef79c6b2f03f57a5f8e3db26c911b55518a1bca68ad"}, ] +[[package]] +name = "lightning" +version = "2.2.1" +requires_python = ">=3.8" +summary = "The Deep Learning framework to train, deploy, and ship AI products Lightning fast." +groups = ["default"] +dependencies = [ + "PyYAML<8.0,>=5.4", + "fsspec[http]<2025.0,>=2022.5.0", + "lightning-utilities<2.0,>=0.8.0", + "numpy<3.0,>=1.17.2", + "packaging<25.0,>=20.0", + "pytorch-lightning", + "torch<4.0,>=1.13.0", + "torchmetrics<3.0,>=0.7.0", + "tqdm<6.0,>=4.57.0", + "typing-extensions<6.0,>=4.4.0", +] +files = [ + {file = "lightning-2.2.1-py3-none-any.whl", hash = "sha256:fec9b49d29a6019e8fe49e825082bab8d5ea3fde8e4b36dcf5c8896c2bdb86c3"}, + {file = "lightning-2.2.1.tar.gz", hash = "sha256:b3e46d596b32cafd1fb9b21fdba1b1767df97b1af5cc702693d1c51df60b19aa"}, +] + [[package]] name = "lightning-utilities" version = "0.10.1" @@ -895,7 +918,7 @@ files = [ [[package]] name = "mkdocs-material" -version = "9.5.13" +version = "9.5.14" requires_python = ">=3.8" summary = "Documentation that simply works" groups = ["dev", "docs"] @@ -913,8 +936,8 @@ dependencies = [ "requests~=2.26", ] files = [ - {file = "mkdocs_material-9.5.13-py3-none-any.whl", hash = "sha256:5cbe17fee4e3b4980c8420a04cc762d8dc052ef1e10532abd4fce88e5ea9ce6a"}, - {file = "mkdocs_material-9.5.13.tar.gz", hash = "sha256:d8e4caae576312a88fd2609b81cf43d233cdbe36860d67a68702b018b425bd87"}, + {file = "mkdocs_material-9.5.14-py3-none-any.whl", hash = "sha256:a45244ac221fda46ecf8337f00ec0e5cb5348ab9ffb203ca2a0c313b0d4dbc27"}, + {file = "mkdocs_material-9.5.14.tar.gz", hash = "sha256:2a1f8e67cda2587ab93ecea9ba42d0ca61d1d7b5fad8cf690eeaeb39dcd4b9af"}, ] [[package]] @@ -2244,7 +2267,7 @@ files = [ [[package]] name = "torchmetrics" -version = "1.3.1" +version = "1.3.2" requires_python = ">=3.8" summary = "PyTorch native Metrics" groups = ["default"] @@ -2255,8 +2278,8 @@ dependencies = [ "torch>=1.10.0", ] files = [ - {file = "torchmetrics-1.3.1-py3-none-any.whl", hash = "sha256:a44bd1edee629bbf463eb81bfba8300b3785d8b3b8d758bdcafa862b80955b4f"}, - {file = "torchmetrics-1.3.1.tar.gz", hash = "sha256:8d371f7597a1a5eb02d5f2ed59642d6fef09093926997ce91e18b1147cc8defa"}, + {file = "torchmetrics-1.3.2-py3-none-any.whl", hash = "sha256:44ca3a9f86dc050cb3f554836ef291698ea797778457195b4f685fce8e2e64a3"}, + {file = "torchmetrics-1.3.2.tar.gz", hash = "sha256:0a67694a4c4265eeb54cda741eaf5cb1f3a71da74b7e7e6215ad156c9f2379f6"}, ] [[package]] diff --git a/pyproject.toml b/pyproject.toml index a19fa396..668fa8ff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ build-backend = "pdm.backend" [project] name = "kaiko-eva" -version = "0.0.0.dev4" +version = "0.0.0.dev5" description = "Evaluation Framework for oncology foundation models." readme = "README.md" authors = [ @@ -21,14 +21,14 @@ maintainers = [ ] requires-python = ">=3.10" dependencies = [ - "pytorch-lightning>=2.1.4", - "loguru>=0.7.2", "jsonargparse[omegaconf]>=4.27.4", + "lightning>=2.2.1", + "tensorboard>=2.16.2", + "loguru>=0.7.2", "pandas>=2.2.0", "pyarrow>=15.0.0", "nibabel>=5.2.0", "timm>=0.9.12", - "tensorboard>=2.16.2", "transformers>=4.38.2", "onnxruntime>=1.17.1", "onnx>=1.15.0", diff --git a/src/eva/core/callbacks/writers/embeddings.py b/src/eva/core/callbacks/writers/embeddings.py index b6b3188c..40e706f1 100644 --- a/src/eva/core/callbacks/writers/embeddings.py +++ b/src/eva/core/callbacks/writers/embeddings.py @@ -5,10 +5,10 @@ import os from typing import Any, Dict, Sequence -import pytorch_lightning as pl +import lightning.pytorch as pl import torch from loguru import logger -from pytorch_lightning import callbacks +from lightning.pytorch import callbacks from torch import multiprocessing, nn from typing_extensions import override diff --git a/src/eva/core/data/datamodules/datamodule.py b/src/eva/core/data/datamodules/datamodule.py index ca049208..1f050ec7 100644 --- a/src/eva/core/data/datamodules/datamodule.py +++ b/src/eva/core/data/datamodules/datamodule.py @@ -2,8 +2,8 @@ from typing import List -import pytorch_lightning as pl -from pytorch_lightning.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS +import lightning.pytorch as pl +from lightning.pytorch.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS from typing_extensions import override from eva.core.data import dataloaders as dataloaders_lib diff --git a/src/eva/core/models/modules/head.py b/src/eva/core/models/modules/head.py index 14e91648..0976e8f2 100644 --- a/src/eva/core/models/modules/head.py +++ b/src/eva/core/models/modules/head.py @@ -3,8 +3,8 @@ from typing import Any, Callable import torch -from pytorch_lightning.cli import LRSchedulerCallable, OptimizerCallable -from pytorch_lightning.utilities.types import STEP_OUTPUT +from lightning.pytorch.cli import LRSchedulerCallable, OptimizerCallable +from lightning.pytorch.utilities.types import STEP_OUTPUT from torch import optim from torch.optim import lr_scheduler from typing_extensions import override diff --git a/src/eva/core/models/modules/inference.py b/src/eva/core/models/modules/inference.py index d41f584f..2d0d9a3d 100644 --- a/src/eva/core/models/modules/inference.py +++ b/src/eva/core/models/modules/inference.py @@ -1,7 +1,7 @@ """Model inference module.""" import torch -from pytorch_lightning.utilities.types import STEP_OUTPUT +from lightning.pytorch.utilities.types import STEP_OUTPUT from typing_extensions import override from eva.core.models.modules import module diff --git a/src/eva/core/models/modules/module.py b/src/eva/core/models/modules/module.py index 7a25d185..cb5e222a 100644 --- a/src/eva/core/models/modules/module.py +++ b/src/eva/core/models/modules/module.py @@ -2,10 +2,10 @@ from typing import Any, Mapping -import pytorch_lightning as pl +import lightning.pytorch as pl import torch -from pytorch_lightning.utilities import memory -from pytorch_lightning.utilities.types import STEP_OUTPUT +from lightning.pytorch.utilities import memory +from lightning.pytorch.utilities.types import STEP_OUTPUT from typing_extensions import override from eva.core.metrics import structs as metrics_lib diff --git a/src/eva/core/models/modules/typings.py b/src/eva/core/models/modules/typings.py index 07289517..fa476bd1 100644 --- a/src/eva/core/models/modules/typings.py +++ b/src/eva/core/models/modules/typings.py @@ -2,7 +2,7 @@ from typing import Any, Dict, NamedTuple -import pytorch_lightning as pl +import lightning.pytorch as pl import torch from torch import nn diff --git a/src/eva/core/models/modules/utils/batch_postprocess.py b/src/eva/core/models/modules/utils/batch_postprocess.py index d0024943..902cb5c0 100644 --- a/src/eva/core/models/modules/utils/batch_postprocess.py +++ b/src/eva/core/models/modules/utils/batch_postprocess.py @@ -5,7 +5,7 @@ from typing import Callable, List import torch -from pytorch_lightning.utilities.types import STEP_OUTPUT +from lightning.pytorch.utilities.types import STEP_OUTPUT Transform = Callable[[torch.Tensor], torch.Tensor] """Post-process transform type.""" diff --git a/src/eva/core/trainers/_recorder.py b/src/eva/core/trainers/_recorder.py index 5d52d897..502d9497 100644 --- a/src/eva/core/trainers/_recorder.py +++ b/src/eva/core/trainers/_recorder.py @@ -10,7 +10,7 @@ from lightning_fabric.utilities import cloud_io from loguru import logger from omegaconf import OmegaConf -from pytorch_lightning.utilities.types import _EVALUATE_OUTPUT +from lightning.pytorch.utilities.types import _EVALUATE_OUTPUT from toolz import dicttoolz SESSION_METRICS = Mapping[str, List[float]] diff --git a/src/eva/core/trainers/functional.py b/src/eva/core/trainers/functional.py index 8a8a28d7..00f81f5e 100644 --- a/src/eva/core/trainers/functional.py +++ b/src/eva/core/trainers/functional.py @@ -2,7 +2,7 @@ from typing import Tuple -from pytorch_lightning.utilities.types import _EVALUATE_OUTPUT +from lightning.pytorch.utilities.types import _EVALUATE_OUTPUT from eva.core.data import datamodules from eva.core.models import modules diff --git a/src/eva/core/trainers/trainer.py b/src/eva/core/trainers/trainer.py index 168bcdf2..7b50d51a 100644 --- a/src/eva/core/trainers/trainer.py +++ b/src/eva/core/trainers/trainer.py @@ -3,9 +3,9 @@ import os from typing import Any -from pytorch_lightning import loggers as pl_loggers -from pytorch_lightning import trainer as pl_trainer -from pytorch_lightning.utilities import argparse +from lightning.pytorch import loggers as pl_loggers +from lightning.pytorch import trainer as pl_trainer +from lightning.pytorch.utilities import argparse from typing_extensions import override from eva.core.data import datamodules @@ -29,15 +29,15 @@ def __init__( ) -> None: """Initializes the trainer. - For the input arguments, refer to ::class::`pytorch_lightning.Trainer`. + For the input arguments, refer to ::class::`lightning.pytorch.Trainer`. Args: - args: Positional arguments of ::class::`pytorch_lightning.Trainer`. + args: Positional arguments of ::class::`lightning.pytorch.Trainer`. default_root_dir: The default root directory to store the output logs. - Unlike in ::class::`pytorch_lightning.Trainer`, this path would be the + Unlike in ::class::`lightning.pytorch.Trainer`, this path would be the prioritized destination point. n_runs: The amount of runs (fit and evaluate) to perform in an evaluation session. - kwargs: Kew-word arguments of ::class::`pytorch_lightning.Trainer`. + kwargs: Kew-word arguments of ::class::`lightning.pytorch.Trainer`. """ super().__init__(*args, default_root_dir=default_root_dir, **kwargs) diff --git a/tests/eva/core/callbacks/writers/test_embeddings.py b/tests/eva/core/callbacks/writers/test_embeddings.py index 0256a2ba..ef0ea6e8 100644 --- a/tests/eva/core/callbacks/writers/test_embeddings.py +++ b/tests/eva/core/callbacks/writers/test_embeddings.py @@ -8,8 +8,8 @@ import pandas as pd import pytest -import pytorch_lightning as pl -from pytorch_lightning.demos import boring_classes +import lightning.pytorch as pl +from lightning.pytorch.demos import boring_classes from torch import nn from typing_extensions import override diff --git a/tests/eva/core/models/networks/wrappers/test_onnx.py b/tests/eva/core/models/networks/wrappers/test_onnx.py index cfcdaaab..de1d35c5 100644 --- a/tests/eva/core/models/networks/wrappers/test_onnx.py +++ b/tests/eva/core/models/networks/wrappers/test_onnx.py @@ -6,7 +6,7 @@ import pytest import torch -from pytorch_lightning.demos import boring_classes +from lightning.pytorch.demos import boring_classes from eva.core.models.networks import wrappers diff --git a/tests/eva/core/trainers/test_recorder.py b/tests/eva/core/trainers/test_recorder.py index 6e3f30e3..75924627 100644 --- a/tests/eva/core/trainers/test_recorder.py +++ b/tests/eva/core/trainers/test_recorder.py @@ -5,7 +5,7 @@ from unittest import mock import pytest -from pytorch_lightning.utilities.types import _EVALUATE_OUTPUT +from lightning.pytorch.utilities.types import _EVALUATE_OUTPUT from eva.core.trainers import _recorder