From 90c7526be86013561d5f044e5f709911beee5286 Mon Sep 17 00:00:00 2001 From: camillebrianceau Date: Fri, 11 Oct 2024 14:42:44 +0200 Subject: [PATCH 01/16] add API --- clinicadl/API_test_v2.py | 102 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 clinicadl/API_test_v2.py diff --git a/clinicadl/API_test_v2.py b/clinicadl/API_test_v2.py new file mode 100644 index 000000000..de24565c7 --- /dev/null +++ b/clinicadl/API_test_v2.py @@ -0,0 +1,102 @@ +# %% class +class MapsIO: + pass + + +class CapsDataset: + pass + + +class Splitter: + pass + + +class ClinicaDLModels: + pass + + +class Networks: + pass + + +class VAE(Networks): + pass + + +class Optimizer: + pass + + +class Loss: + pass + + +class Metrics: + pass + + +class Trainer: + pass + + +class Validator: + pass + + +# %% maps +maps = MapsIO("/path/to/maps") # Crée un dossier + +# %% Dataset +DataConfig = { + "caps_dir": "", + "tsv": "", + "mode": "", +} +capsdataset = CapsDataset(DataConfig, maps) # torch.dataset + +# %% Model +network = VAE() # nn.module +loss = Loss() +optimizer = ClinicaDLOptim( + Adam() +) # get_optimizer({"name": "Adam", "par1": 0.5}) # torch.optim +# model = ClinicaDLModels( +# network, +# loss, +# optimizer, +# ) + +# %% Cross val +SplitConfig = SplitterConfig() +splitter = Splitter(SplitConfig, capsdataset) + +# %% Metrics +metrics1 = Metrics("MAE") # monai.metric +metrics2 = Metrics("MSE") # monai.metric + + +# %% Option 1 +for split in splitter.iterate(): + trainer = Trainer(split, maps, (optimizer)) + validator = Validator(split, [metrics1, metrics2], maps) + + trainer.train(validator, model) + + +# %% Option 2 +val = Validator([metrics1, metrics2], maps) +trainer = Trainer(validator, maps) +for split in splitter.iterate(): + trainer.train(model, split) + +# %% Option 3 +trainer = Trainer( + maps, [metrics1, metrics2] +) # Initialise un maps manager + initialise un validator +for split in splitter.iterate(): + model = ClinicaDLModels( + network, + loss, + optimizer, + ) + trainer.train(model, split) From b0d54d06e6d550c562f8b1028d15e1ea225d69bc Mon Sep 17 00:00:00 2001 From: camillebrianceau <57992134+camillebrianceau@users.noreply.github.com> Date: Tue, 29 Oct 2024 17:46:13 +0100 Subject: [PATCH 02/16] Update clinicadl/API_test_v2.py --- clinicadl/API_test_v2.py | 142 +++++++++++++++++++++------------------ 1 file changed, 76 insertions(+), 66 deletions(-) diff --git a/clinicadl/API_test_v2.py b/clinicadl/API_test_v2.py index de24565c7..0bf730d3c 100644 --- a/clinicadl/API_test_v2.py +++ b/clinicadl/API_test_v2.py @@ -1,102 +1,112 @@ -# %% class -class MapsIO: +from pathlib import Path +from clinicadl.caps_dataset2.config.preprocessing import PreprocessingConfig +from clinicadl.caps_dataset2.config.extraction import ExtractionConfig +from clinicadl.caps_dataset2.data import CapsDatasetRoi, CapsDatasetPatch, CapsDatasetSlice +from clinicadl.transforms.config import TransformsConfig +import torchio +from clinicadl import tsvtools +from clinicadl.trainer.trainer import Trainer + +class ExperimentManager: pass - -class CapsDataset: +class CapsReader: pass - -class Splitter: +class Transforms: pass - -class ClinicaDLModels: +class Predictor: pass +class ClinicaDLModel: + pass -class Networks: +class KFolder: pass +def get_loss_function(): + pass -class VAE(Networks): +def get_network_from_config(): pass +def create_network_config(): + pass -class Optimizer: +def get_single_split(): pass +# Create the Maps Manager / Read/write manager / +maps_path = Path("/") +manager = ExperimentManager(maps_path, overwrite = False) -class Loss: - pass +caps_directory = Path("caps_directory") # output of clinica pipelines +caps_reader = CapsReader(caps_directory, manager= manager) +preprocessing_1: PreprocessingConfig = caps_reader.get_preprocessing("t1-linear") +extraction_1: ExtractionConfig = caps_reader.extract_slice(preprocessing=preprocessing_1, arg_slice = 2) +transforms_1 = Transforms(data_augmentation = [torchio.t1, torchio.t2], image_transforms= [torchio.t1, torchio.t2], object_transforms=[torchio.t1, torchio.t2]) # not mandatory -class Metrics: - pass +preprocessing_2: PreprocessingConfig = caps_reader.get_preprocessing("pet-linear") +extraction_2: ExtractionConfig= caps_reader.extract_patch(preprocessing=preprocessing_2, arg_patch = 2) +transforms_2 = Transforms(data_augmentation = [torchio.t2], image_transforms= [torchio.t1], object_transforms=[torchio.t1, torchio.t2]) +sub_ses_tsv = Path("") +split_dir = tsvtools.split_tsv(sub_ses_tsv) #-> creer un test.tsv et un train.tsv -class Trainer: - pass +dataset_t1_roi: CapsDatasetRoi = caps_reader.get_dataset( extraction = extraction_1, preprocessing = preprocessing_1, sub_ses_tsv = split_dir / "train.tsv", transforms = transforms_1) +dataset_pet_patch: CapsDatasetPatch = caps_reader.get_dataset(extraction = extraction_2, preprocessing= preprocessing_2, sub_ses_tsv = split_dir / "train.tsv", transforms = transforms_2) +dataset_multi_modality_multi_extract = concat_dataset(dataset_t1, dataset_pet) # 2 train.tsv en entrée qu'il faut concat et pareil pour les transforms à faire attention -class Validator: - pass +config_file = Path("config_file") +trainer = Trainer.from_json(config_file=config_file, manager=manager) +# CAS CROSS-VALIDATION +splitter = KFolder(n_splits = 3, caps_dataset=dataset_multi_modality_multi_extract, manager = manager) -# %% maps -maps = MapsIO("/path/to/maps") # Crée un dossier +for split in splitter.split_iterator(split_list = [0,1]): + # bien définir ce qu'il y a dans l'objet split + + loss, loss_config = get_loss_function(CrossEntropyLossConfig()) + network_config = create_network_config(ImplementedNetworks.CNN)(in_shape = [2,2,2], num_outputs = 1, conv_args = ConvEncoderOptions(channels = [3, 2, 2])) + network = get_network_from_config(network_config) + + model = ClinicaDLModel( + network= network, + loss=loss, + optimizer= AdamConfig() + ) -# %% Dataset -DataConfig = { - "caps_dir": "", - "tsv": "", - "mode": "", -} -capsdataset = CapsDataset(DataConfig, maps) # torch.dataset + trainer.train(model, split) + # le trainer va instancier un predictor/valdiator dans le train ou dans le init -# %% Model -network = VAE() # nn.module -loss = Loss() -optimizer = ClinicaDLOptim( - Adam() -) # get_optimizer({"name": "Adam", "par1": 0.5}) # torch.optim -# model = ClinicaDLModels( -# network, -# loss, -# optimizer, -# ) -# %% Cross val -SplitConfig = SplitterConfig() -splitter = Splitter(SplitConfig, capsdataset) +# CAS SINGLE SPLIT +split = get_single_split(n_subject_validation = 0, caps_dataset=dataset_multi_modality_multi_extract, manager = manager) -# %% Metrics -metrics1 = Metrics("MAE") # monai.metric -metrics2 = Metrics("MSE") # monai.metric +loss, loss_config = get_loss_function(CrossEntropyLossConfig()) +network_config = create_network_config(ImplementedNetworks.CNN)(in_shape = [2,2,2], num_outputs = 1, conv_args = ConvEncoderOptions(channels = [3, 2, 2])) +network = get_network_from_config(network_config) + +model = ClinicaDLModel( + network= network, + loss=loss, + optimizer= AdamConfig() +) +trainer.train(model, split) +# le trainer va instancier un predictor/valdiator dans le train ou dans le init -# %% Option 1 -for split in splitter.iterate(): - trainer = Trainer(split, maps, (optimizer)) - validator = Validator(split, [metrics1, metrics2], maps) - trainer.train(validator, model) +# TEST +preprocessing_test: PreprocessingConfig = caps_reader.get_preprocessing("pet-linear") +extraction_test: ExtractionConfig= caps_reader.extract_patch(preprocessing=preprocessing_2, arg_patch = 2) +transforms_test = Transforms(data_augmentation = [torchio.t2], image_transforms= [torchio.t1], object_transforms=[torchio.t1, torchio.t2]) -# %% Option 2 -val = Validator([metrics1, metrics2], maps) -trainer = Trainer(validator, maps) -for split in splitter.iterate(): - trainer.train(model, split) +dataset_test: CapsDatasetROI = caps_reader.get_dataset( extraction = extraction_test, preprocessing = preprocessing_test, sub_ses_tsv = split_dir / "test.tsv", transforms = transforms_test) -# %% Option 3 -trainer = Trainer( - maps, [metrics1, metrics2] -) # Initialise un maps manager + initialise un validator -for split in splitter.iterate(): - model = ClinicaDLModels( - network, - loss, - optimizer, - ) - trainer.train(model, split) +predictor = Predictor(manager= manager) +predictor.predict(dataset_test= dataset_test, split = 2) From ce26efb9ae82c051376d002cd64105ce08a139a0 Mon Sep 17 00:00:00 2001 From: thibaultdvx Date: Wed, 16 Oct 2024 14:29:21 +0200 Subject: [PATCH 03/16] reset to solve rebase issue --- .github/workflows/test.yml | 2 +- docs/Installation.md | 2 +- environment.yml | 2 +- poetry.lock | 3224 ++++++++++++++++++------------------ pyproject.toml | 12 +- 5 files changed, 1632 insertions(+), 1610 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 219e86c2b..05af7550c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -17,7 +17,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest] - python-version: ['3.8', '3.9', '3.10', '3.11'] + python-version: ['3.9', '3.10', '3.11', '3.12'] steps: - uses: actions/checkout@v4 - uses: snok/install-poetry@v1 diff --git a/docs/Installation.md b/docs/Installation.md index 3f8c7dc22..4ccf75640 100644 --- a/docs/Installation.md +++ b/docs/Installation.md @@ -30,7 +30,7 @@ bash /tmp/miniconda-installer.sh The latest release of ClinicaDL can be installed using `pip` as follows: ```{.sourceCode .bash} -conda create --name clinicadlEnv python=3.8 +conda create --name clinicadlEnv python=3.11 conda activate clinicadlEnv pip install clinicadl ``` diff --git a/environment.yml b/environment.yml index 34f633da4..cc36c6c17 100644 --- a/environment.yml +++ b/environment.yml @@ -3,4 +3,4 @@ channels: - defaults - conda-forge dependencies: - - python=3.9 + - python=3.11 diff --git a/poetry.lock b/poetry.lock index 34b282b1c..71072daf2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -13,18 +13,16 @@ files = [ [[package]] name = "alembic" -version = "1.13.2" +version = "1.13.3" description = "A database migration tool for SQLAlchemy." optional = false python-versions = ">=3.8" files = [ - {file = "alembic-1.13.2-py3-none-any.whl", hash = "sha256:6b8733129a6224a9a711e17c99b08462dbf7cc9670ba8f2e2ae9af860ceb1953"}, - {file = "alembic-1.13.2.tar.gz", hash = "sha256:1ff0ae32975f4fd96028c39ed9bb3c867fe3af956bd7bb37343b54c9fe7445ef"}, + {file = "alembic-1.13.3-py3-none-any.whl", hash = "sha256:908e905976d15235fae59c9ac42c4c5b75cfcefe3d27c0fbf7ae15a37715d80e"}, + {file = "alembic-1.13.3.tar.gz", hash = "sha256:203503117415561e203aa14541740643a611f641517f0209fcae63e9fa09f1a2"}, ] [package.dependencies] -importlib-metadata = {version = "*", markers = "python_version < \"3.9\""} -importlib-resources = {version = "*", markers = "python_version < \"3.9\""} Mako = "*" SQLAlchemy = ">=1.3.0" typing-extensions = ">=4" @@ -57,9 +55,6 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} - [[package]] name = "appdirs" version = "1.4.4" @@ -101,9 +96,6 @@ files = [ {file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"}, ] -[package.dependencies] -pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} - [package.extras] dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] @@ -153,101 +145,116 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.3.2" +version = "3.4.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, + {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, + {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, ] [[package]] @@ -285,24 +292,24 @@ tests-cov = ["coverage", "coveralls", "pytest", "pytest-cov"] [[package]] name = "cloudpickle" -version = "3.0.0" +version = "3.1.0" description = "Pickler class to extend the standard pickle.Pickler functionality" optional = false python-versions = ">=3.8" files = [ - {file = "cloudpickle-3.0.0-py3-none-any.whl", hash = "sha256:246ee7d0c295602a036e86369c77fecda4ab17b506496730f2f576d9016fd9c7"}, - {file = "cloudpickle-3.0.0.tar.gz", hash = "sha256:996d9a482c6fb4f33c1a35335cf8afd065d2a56e973270364840712d9131a882"}, + {file = "cloudpickle-3.1.0-py3-none-any.whl", hash = "sha256:fe11acda67f61aaaec473e3afe030feb131d78a43461b718185363384f1ba12e"}, + {file = "cloudpickle-3.1.0.tar.gz", hash = "sha256:81a929b6e3c7335c863c771d673d105f02efdb89dfaba0c90495d1c64796601b"}, ] [[package]] name = "codecarbon" -version = "2.6.0" +version = "2.7.1" description = "" optional = false python-versions = ">=3.7" files = [ - {file = "codecarbon-2.6.0-py3-none-any.whl", hash = "sha256:4e11467f7f844894512dd4cd623da27598c5e803941b45eed8438b06e9410c55"}, - {file = "codecarbon-2.6.0.tar.gz", hash = "sha256:05b0d39c60650ffa2e4b3eb72fdcbfe29c7853aa2e4c8ab37abea264a749057b"}, + {file = "codecarbon-2.7.1-py3-none-any.whl", hash = "sha256:d056e3a422b956f7902bb1b7c910487134d6079cdd42f0f6b99abaa9b7302d09"}, + {file = "codecarbon-2.7.1.tar.gz", hash = "sha256:b1e15cb6746a1b3760719f3534c458f59f22ee0f644a6d0070011093b1c84ee1"}, ] [package.dependencies] @@ -335,154 +342,157 @@ files = [ [[package]] name = "contourpy" -version = "1.1.1" +version = "1.3.0" description = "Python library for calculating contours of 2D quadrilateral grids" optional = false -python-versions = ">=3.8" -files = [ - {file = "contourpy-1.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:46e24f5412c948d81736509377e255f6040e94216bf1a9b5ea1eaa9d29f6ec1b"}, - {file = "contourpy-1.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e48694d6a9c5a26ee85b10130c77a011a4fedf50a7279fa0bdaf44bafb4299d"}, - {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a66045af6cf00e19d02191ab578a50cb93b2028c3eefed999793698e9ea768ae"}, - {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ebf42695f75ee1a952f98ce9775c873e4971732a87334b099dde90b6af6a916"}, - {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6aec19457617ef468ff091669cca01fa7ea557b12b59a7908b9474bb9674cf0"}, - {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:462c59914dc6d81e0b11f37e560b8a7c2dbab6aca4f38be31519d442d6cde1a1"}, - {file = "contourpy-1.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6d0a8efc258659edc5299f9ef32d8d81de8b53b45d67bf4bfa3067f31366764d"}, - {file = "contourpy-1.1.1-cp310-cp310-win32.whl", hash = "sha256:d6ab42f223e58b7dac1bb0af32194a7b9311065583cc75ff59dcf301afd8a431"}, - {file = "contourpy-1.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:549174b0713d49871c6dee90a4b499d3f12f5e5f69641cd23c50a4542e2ca1eb"}, - {file = "contourpy-1.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:407d864db716a067cc696d61fa1ef6637fedf03606e8417fe2aeed20a061e6b2"}, - {file = "contourpy-1.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe80c017973e6a4c367e037cb31601044dd55e6bfacd57370674867d15a899b"}, - {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e30aaf2b8a2bac57eb7e1650df1b3a4130e8d0c66fc2f861039d507a11760e1b"}, - {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3de23ca4f381c3770dee6d10ead6fff524d540c0f662e763ad1530bde5112532"}, - {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:566f0e41df06dfef2431defcfaa155f0acfa1ca4acbf8fd80895b1e7e2ada40e"}, - {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b04c2f0adaf255bf756cf08ebef1be132d3c7a06fe6f9877d55640c5e60c72c5"}, - {file = "contourpy-1.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d0c188ae66b772d9d61d43c6030500344c13e3f73a00d1dc241da896f379bb62"}, - {file = "contourpy-1.1.1-cp311-cp311-win32.whl", hash = "sha256:0683e1ae20dc038075d92e0e0148f09ffcefab120e57f6b4c9c0f477ec171f33"}, - {file = "contourpy-1.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:8636cd2fc5da0fb102a2504fa2c4bea3cbc149533b345d72cdf0e7a924decc45"}, - {file = "contourpy-1.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:560f1d68a33e89c62da5da4077ba98137a5e4d3a271b29f2f195d0fba2adcb6a"}, - {file = "contourpy-1.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:24216552104ae8f3b34120ef84825400b16eb6133af2e27a190fdc13529f023e"}, - {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56de98a2fb23025882a18b60c7f0ea2d2d70bbbcfcf878f9067234b1c4818442"}, - {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:07d6f11dfaf80a84c97f1a5ba50d129d9303c5b4206f776e94037332e298dda8"}, - {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1eaac5257a8f8a047248d60e8f9315c6cff58f7803971170d952555ef6344a7"}, - {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19557fa407e70f20bfaba7d55b4d97b14f9480856c4fb65812e8a05fe1c6f9bf"}, - {file = "contourpy-1.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:081f3c0880712e40effc5f4c3b08feca6d064cb8cfbb372ca548105b86fd6c3d"}, - {file = "contourpy-1.1.1-cp312-cp312-win32.whl", hash = "sha256:059c3d2a94b930f4dafe8105bcdc1b21de99b30b51b5bce74c753686de858cb6"}, - {file = "contourpy-1.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:f44d78b61740e4e8c71db1cf1fd56d9050a4747681c59ec1094750a658ceb970"}, - {file = "contourpy-1.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:70e5a10f8093d228bb2b552beeb318b8928b8a94763ef03b858ef3612b29395d"}, - {file = "contourpy-1.1.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8394e652925a18ef0091115e3cc191fef350ab6dc3cc417f06da66bf98071ae9"}, - {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5bd5680f844c3ff0008523a71949a3ff5e4953eb7701b28760805bc9bcff217"}, - {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66544f853bfa85c0d07a68f6c648b2ec81dafd30f272565c37ab47a33b220684"}, - {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0c02b75acfea5cab07585d25069207e478d12309557f90a61b5a3b4f77f46ce"}, - {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41339b24471c58dc1499e56783fedc1afa4bb018bcd035cfb0ee2ad2a7501ef8"}, - {file = "contourpy-1.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f29fb0b3f1217dfe9362ec55440d0743fe868497359f2cf93293f4b2701b8251"}, - {file = "contourpy-1.1.1-cp38-cp38-win32.whl", hash = "sha256:f9dc7f933975367251c1b34da882c4f0e0b2e24bb35dc906d2f598a40b72bfc7"}, - {file = "contourpy-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:498e53573e8b94b1caeb9e62d7c2d053c263ebb6aa259c81050766beb50ff8d9"}, - {file = "contourpy-1.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ba42e3810999a0ddd0439e6e5dbf6d034055cdc72b7c5c839f37a7c274cb4eba"}, - {file = "contourpy-1.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c06e4c6e234fcc65435223c7b2a90f286b7f1b2733058bdf1345d218cc59e34"}, - {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca6fab080484e419528e98624fb5c4282148b847e3602dc8dbe0cb0669469887"}, - {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93df44ab351119d14cd1e6b52a5063d3336f0754b72736cc63db59307dabb718"}, - {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eafbef886566dc1047d7b3d4b14db0d5b7deb99638d8e1be4e23a7c7ac59ff0f"}, - {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efe0fab26d598e1ec07d72cf03eaeeba8e42b4ecf6b9ccb5a356fde60ff08b85"}, - {file = "contourpy-1.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f08e469821a5e4751c97fcd34bcb586bc243c39c2e39321822060ba902eac49e"}, - {file = "contourpy-1.1.1-cp39-cp39-win32.whl", hash = "sha256:bfc8a5e9238232a45ebc5cb3bfee71f1167064c8d382cadd6076f0d51cff1da0"}, - {file = "contourpy-1.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:c84fdf3da00c2827d634de4fcf17e3e067490c4aea82833625c4c8e6cdea0887"}, - {file = "contourpy-1.1.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:229a25f68046c5cf8067d6d6351c8b99e40da11b04d8416bf8d2b1d75922521e"}, - {file = "contourpy-1.1.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10dab5ea1bd4401c9483450b5b0ba5416be799bbd50fc7a6cc5e2a15e03e8a3"}, - {file = "contourpy-1.1.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4f9147051cb8fdb29a51dc2482d792b3b23e50f8f57e3720ca2e3d438b7adf23"}, - {file = "contourpy-1.1.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a75cc163a5f4531a256f2c523bd80db509a49fc23721b36dd1ef2f60ff41c3cb"}, - {file = "contourpy-1.1.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b53d5769aa1f2d4ea407c65f2d1d08002952fac1d9e9d307aa2e1023554a163"}, - {file = "contourpy-1.1.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11b836b7dbfb74e049c302bbf74b4b8f6cb9d0b6ca1bf86cfa8ba144aedadd9c"}, - {file = "contourpy-1.1.1.tar.gz", hash = "sha256:96ba37c2e24b7212a77da85004c38e7c4d155d3e72a45eeaf22c1f03f607e8ab"}, +python-versions = ">=3.9" +files = [ + {file = "contourpy-1.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:880ea32e5c774634f9fcd46504bf9f080a41ad855f4fef54f5380f5133d343c7"}, + {file = "contourpy-1.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:76c905ef940a4474a6289c71d53122a4f77766eef23c03cd57016ce19d0f7b42"}, + {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92f8557cbb07415a4d6fa191f20fd9d2d9eb9c0b61d1b2f52a8926e43c6e9af7"}, + {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36f965570cff02b874773c49bfe85562b47030805d7d8360748f3eca570f4cab"}, + {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cacd81e2d4b6f89c9f8a5b69b86490152ff39afc58a95af002a398273e5ce589"}, + {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69375194457ad0fad3a839b9e29aa0b0ed53bb54db1bfb6c3ae43d111c31ce41"}, + {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a52040312b1a858b5e31ef28c2e865376a386c60c0e248370bbea2d3f3b760d"}, + {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3faeb2998e4fcb256542e8a926d08da08977f7f5e62cf733f3c211c2a5586223"}, + {file = "contourpy-1.3.0-cp310-cp310-win32.whl", hash = "sha256:36e0cff201bcb17a0a8ecc7f454fe078437fa6bda730e695a92f2d9932bd507f"}, + {file = "contourpy-1.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:87ddffef1dbe5e669b5c2440b643d3fdd8622a348fe1983fad7a0f0ccb1cd67b"}, + {file = "contourpy-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fa4c02abe6c446ba70d96ece336e621efa4aecae43eaa9b030ae5fb92b309ad"}, + {file = "contourpy-1.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:834e0cfe17ba12f79963861e0f908556b2cedd52e1f75e6578801febcc6a9f49"}, + {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbc4c3217eee163fa3984fd1567632b48d6dfd29216da3ded3d7b844a8014a66"}, + {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4865cd1d419e0c7a7bf6de1777b185eebdc51470800a9f42b9e9decf17762081"}, + {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:303c252947ab4b14c08afeb52375b26781ccd6a5ccd81abcdfc1fafd14cf93c1"}, + {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:637f674226be46f6ba372fd29d9523dd977a291f66ab2a74fbeb5530bb3f445d"}, + {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:76a896b2f195b57db25d6b44e7e03f221d32fe318d03ede41f8b4d9ba1bff53c"}, + {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e1fd23e9d01591bab45546c089ae89d926917a66dceb3abcf01f6105d927e2cb"}, + {file = "contourpy-1.3.0-cp311-cp311-win32.whl", hash = "sha256:d402880b84df3bec6eab53cd0cf802cae6a2ef9537e70cf75e91618a3801c20c"}, + {file = "contourpy-1.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:6cb6cc968059db9c62cb35fbf70248f40994dfcd7aa10444bbf8b3faeb7c2d67"}, + {file = "contourpy-1.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:570ef7cf892f0afbe5b2ee410c507ce12e15a5fa91017a0009f79f7d93a1268f"}, + {file = "contourpy-1.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:da84c537cb8b97d153e9fb208c221c45605f73147bd4cadd23bdae915042aad6"}, + {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0be4d8425bfa755e0fd76ee1e019636ccc7c29f77a7c86b4328a9eb6a26d0639"}, + {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c0da700bf58f6e0b65312d0a5e695179a71d0163957fa381bb3c1f72972537c"}, + {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eb8b141bb00fa977d9122636b16aa67d37fd40a3d8b52dd837e536d64b9a4d06"}, + {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3634b5385c6716c258d0419c46d05c8aa7dc8cb70326c9a4fb66b69ad2b52e09"}, + {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0dce35502151b6bd35027ac39ba6e5a44be13a68f55735c3612c568cac3805fd"}, + {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aea348f053c645100612b333adc5983d87be69acdc6d77d3169c090d3b01dc35"}, + {file = "contourpy-1.3.0-cp312-cp312-win32.whl", hash = "sha256:90f73a5116ad1ba7174341ef3ea5c3150ddf20b024b98fb0c3b29034752c8aeb"}, + {file = "contourpy-1.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:b11b39aea6be6764f84360fce6c82211a9db32a7c7de8fa6dd5397cf1d079c3b"}, + {file = "contourpy-1.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3e1c7fa44aaae40a2247e2e8e0627f4bea3dd257014764aa644f319a5f8600e3"}, + {file = "contourpy-1.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:364174c2a76057feef647c802652f00953b575723062560498dc7930fc9b1cb7"}, + {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32b238b3b3b649e09ce9aaf51f0c261d38644bdfa35cbaf7b263457850957a84"}, + {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d51fca85f9f7ad0b65b4b9fe800406d0d77017d7270d31ec3fb1cc07358fdea0"}, + {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:732896af21716b29ab3e988d4ce14bc5133733b85956316fb0c56355f398099b"}, + {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d73f659398a0904e125280836ae6f88ba9b178b2fed6884f3b1f95b989d2c8da"}, + {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c6c7c2408b7048082932cf4e641fa3b8ca848259212f51c8c59c45aa7ac18f14"}, + {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f317576606de89da6b7e0861cf6061f6146ead3528acabff9236458a6ba467f8"}, + {file = "contourpy-1.3.0-cp313-cp313-win32.whl", hash = "sha256:31cd3a85dbdf1fc002280c65caa7e2b5f65e4a973fcdf70dd2fdcb9868069294"}, + {file = "contourpy-1.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:4553c421929ec95fb07b3aaca0fae668b2eb5a5203d1217ca7c34c063c53d087"}, + {file = "contourpy-1.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:345af746d7766821d05d72cb8f3845dfd08dd137101a2cb9b24de277d716def8"}, + {file = "contourpy-1.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3bb3808858a9dc68f6f03d319acd5f1b8a337e6cdda197f02f4b8ff67ad2057b"}, + {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:420d39daa61aab1221567b42eecb01112908b2cab7f1b4106a52caaec8d36973"}, + {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d63ee447261e963af02642ffcb864e5a2ee4cbfd78080657a9880b8b1868e18"}, + {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:167d6c890815e1dac9536dca00828b445d5d0df4d6a8c6adb4a7ec3166812fa8"}, + {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:710a26b3dc80c0e4febf04555de66f5fd17e9cf7170a7b08000601a10570bda6"}, + {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:75ee7cb1a14c617f34a51d11fa7524173e56551646828353c4af859c56b766e2"}, + {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:33c92cdae89ec5135d036e7218e69b0bb2851206077251f04a6c4e0e21f03927"}, + {file = "contourpy-1.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a11077e395f67ffc2c44ec2418cfebed032cd6da3022a94fc227b6faf8e2acb8"}, + {file = "contourpy-1.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e8134301d7e204c88ed7ab50028ba06c683000040ede1d617298611f9dc6240c"}, + {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e12968fdfd5bb45ffdf6192a590bd8ddd3ba9e58360b29683c6bb71a7b41edca"}, + {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fd2a0fc506eccaaa7595b7e1418951f213cf8255be2600f1ea1b61e46a60c55f"}, + {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4cfb5c62ce023dfc410d6059c936dcf96442ba40814aefbfa575425a3a7f19dc"}, + {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68a32389b06b82c2fdd68276148d7b9275b5f5cf13e5417e4252f6d1a34f72a2"}, + {file = "contourpy-1.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:94e848a6b83da10898cbf1311a815f770acc9b6a3f2d646f330d57eb4e87592e"}, + {file = "contourpy-1.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d78ab28a03c854a873787a0a42254a0ccb3cb133c672f645c9f9c8f3ae9d0800"}, + {file = "contourpy-1.3.0-cp39-cp39-win32.whl", hash = "sha256:81cb5ed4952aae6014bc9d0421dec7c5835c9c8c31cdf51910b708f548cf58e5"}, + {file = "contourpy-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:14e262f67bd7e6eb6880bc564dcda30b15e351a594657e55b7eec94b6ef72843"}, + {file = "contourpy-1.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:fe41b41505a5a33aeaed2a613dccaeaa74e0e3ead6dd6fd3a118fb471644fd6c"}, + {file = "contourpy-1.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca7e17a65f72a5133bdbec9ecf22401c62bcf4821361ef7811faee695799779"}, + {file = "contourpy-1.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1ec4dc6bf570f5b22ed0d7efba0dfa9c5b9e0431aeea7581aa217542d9e809a4"}, + {file = "contourpy-1.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:00ccd0dbaad6d804ab259820fa7cb0b8036bda0686ef844d24125d8287178ce0"}, + {file = "contourpy-1.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ca947601224119117f7c19c9cdf6b3ab54c5726ef1d906aa4a69dfb6dd58102"}, + {file = "contourpy-1.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6ec93afeb848a0845a18989da3beca3eec2c0f852322efe21af1931147d12cb"}, + {file = "contourpy-1.3.0.tar.gz", hash = "sha256:7ffa0db17717a8ffb127efd0c95a4362d996b892c2904db72428d5b52e1938a4"}, ] [package.dependencies] -numpy = {version = ">=1.16,<2.0", markers = "python_version <= \"3.11\""} +numpy = ">=1.23" [package.extras] bokeh = ["bokeh", "selenium"] docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] -mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.4.1)", "types-Pillow"] +mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.11.1)", "types-Pillow"] test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] -test-no-images = ["pytest", "pytest-cov", "wurlitzer"] +test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist", "wurlitzer"] [[package]] name = "coverage" -version = "7.6.1" +version = "7.6.3" description = "Code coverage measurement for Python" optional = false -python-versions = ">=3.8" -files = [ - {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"}, - {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"}, - {file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"}, - {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"}, - {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"}, - {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"}, - {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"}, - {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"}, - {file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"}, - {file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"}, - {file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"}, - {file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"}, - {file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"}, - {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"}, - {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"}, - {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"}, - {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"}, - {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"}, - {file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"}, - {file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"}, - {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, - {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, - {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, - {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, - {file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"}, - {file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"}, - {file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"}, - {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"}, - {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"}, - {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"}, - {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"}, - {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"}, - {file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"}, - {file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"}, - {file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"}, - {file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"}, - {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"}, - {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"}, - {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"}, - {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"}, - {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"}, - {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"}, - {file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"}, - {file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"}, - {file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"}, - {file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"}, - {file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"}, - {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"}, - {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"}, - {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"}, - {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"}, - {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"}, - {file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"}, - {file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"}, - {file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"}, - {file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"}, - {file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"}, - {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"}, - {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"}, - {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"}, - {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"}, - {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"}, - {file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"}, - {file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"}, - {file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"}, - {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, +python-versions = ">=3.9" +files = [ + {file = "coverage-7.6.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6da42bbcec130b188169107ecb6ee7bd7b4c849d24c9370a0c884cf728d8e976"}, + {file = "coverage-7.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c222958f59b0ae091f4535851cbb24eb57fc0baea07ba675af718fb5302dddb2"}, + {file = "coverage-7.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab84a8b698ad5a6c365b08061920138e7a7dd9a04b6feb09ba1bfae68346ce6d"}, + {file = "coverage-7.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70a6756ce66cd6fe8486c775b30889f0dc4cb20c157aa8c35b45fd7868255c5c"}, + {file = "coverage-7.6.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c2e6fa98032fec8282f6b27e3f3986c6e05702828380618776ad794e938f53a"}, + {file = "coverage-7.6.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:921fbe13492caf6a69528f09d5d7c7d518c8d0e7b9f6701b7719715f29a71e6e"}, + {file = "coverage-7.6.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:6d99198203f0b9cb0b5d1c0393859555bc26b548223a769baf7e321a627ed4fc"}, + {file = "coverage-7.6.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:87cd2e29067ea397a47e352efb13f976eb1b03e18c999270bb50589323294c6e"}, + {file = "coverage-7.6.3-cp310-cp310-win32.whl", hash = "sha256:a3328c3e64ea4ab12b85999eb0779e6139295bbf5485f69d42cf794309e3d007"}, + {file = "coverage-7.6.3-cp310-cp310-win_amd64.whl", hash = "sha256:bca4c8abc50d38f9773c1ec80d43f3768df2e8576807d1656016b9d3eeaa96fd"}, + {file = "coverage-7.6.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c51ef82302386d686feea1c44dbeef744585da16fcf97deea2a8d6c1556f519b"}, + {file = "coverage-7.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0ca37993206402c6c35dc717f90d4c8f53568a8b80f0bf1a1b2b334f4d488fba"}, + {file = "coverage-7.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c77326300b839c44c3e5a8fe26c15b7e87b2f32dfd2fc9fee1d13604347c9b38"}, + {file = "coverage-7.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e484e479860e00da1f005cd19d1c5d4a813324e5951319ac3f3eefb497cc549"}, + {file = "coverage-7.6.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c6c0f4d53ef603397fc894a895b960ecd7d44c727df42a8d500031716d4e8d2"}, + {file = "coverage-7.6.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:37be7b5ea3ff5b7c4a9db16074dc94523b5f10dd1f3b362a827af66a55198175"}, + {file = "coverage-7.6.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:43b32a06c47539fe275106b376658638b418c7cfdfff0e0259fbf877e845f14b"}, + {file = "coverage-7.6.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ee77c7bef0724165e795b6b7bf9c4c22a9b8468a6bdb9c6b4281293c6b22a90f"}, + {file = "coverage-7.6.3-cp311-cp311-win32.whl", hash = "sha256:43517e1f6b19f610a93d8227e47790722c8bf7422e46b365e0469fc3d3563d97"}, + {file = "coverage-7.6.3-cp311-cp311-win_amd64.whl", hash = "sha256:04f2189716e85ec9192df307f7c255f90e78b6e9863a03223c3b998d24a3c6c6"}, + {file = "coverage-7.6.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:27bd5f18d8f2879e45724b0ce74f61811639a846ff0e5c0395b7818fae87aec6"}, + {file = "coverage-7.6.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d546cfa78844b8b9c1c0533de1851569a13f87449897bbc95d698d1d3cb2a30f"}, + {file = "coverage-7.6.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9975442f2e7a5cfcf87299c26b5a45266ab0696348420049b9b94b2ad3d40234"}, + {file = "coverage-7.6.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:583049c63106c0555e3ae3931edab5669668bbef84c15861421b94e121878d3f"}, + {file = "coverage-7.6.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2341a78ae3a5ed454d524206a3fcb3cec408c2a0c7c2752cd78b606a2ff15af4"}, + {file = "coverage-7.6.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a4fb91d5f72b7e06a14ff4ae5be625a81cd7e5f869d7a54578fc271d08d58ae3"}, + {file = "coverage-7.6.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e279f3db904e3b55f520f11f983cc8dc8a4ce9b65f11692d4718ed021ec58b83"}, + {file = "coverage-7.6.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aa23ce39661a3e90eea5f99ec59b763b7d655c2cada10729ed920a38bfc2b167"}, + {file = "coverage-7.6.3-cp312-cp312-win32.whl", hash = "sha256:52ac29cc72ee7e25ace7807249638f94c9b6a862c56b1df015d2b2e388e51dbd"}, + {file = "coverage-7.6.3-cp312-cp312-win_amd64.whl", hash = "sha256:40e8b1983080439d4802d80b951f4a93d991ef3261f69e81095a66f86cf3c3c6"}, + {file = "coverage-7.6.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9134032f5aa445ae591c2ba6991d10136a1f533b1d2fa8f8c21126468c5025c6"}, + {file = "coverage-7.6.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:99670790f21a96665a35849990b1df447993880bb6463a0a1d757897f30da929"}, + {file = "coverage-7.6.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dc7d6b380ca76f5e817ac9eef0c3686e7834c8346bef30b041a4ad286449990"}, + {file = "coverage-7.6.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f7b26757b22faf88fcf232f5f0e62f6e0fd9e22a8a5d0d5016888cdfe1f6c1c4"}, + {file = "coverage-7.6.3-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c59d6a4a4633fad297f943c03d0d2569867bd5372eb5684befdff8df8522e39"}, + {file = "coverage-7.6.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f263b18692f8ed52c8de7f40a0751e79015983dbd77b16906e5b310a39d3ca21"}, + {file = "coverage-7.6.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:79644f68a6ff23b251cae1c82b01a0b51bc40c8468ca9585c6c4b1aeee570e0b"}, + {file = "coverage-7.6.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:71967c35828c9ff94e8c7d405469a1fb68257f686bca7c1ed85ed34e7c2529c4"}, + {file = "coverage-7.6.3-cp313-cp313-win32.whl", hash = "sha256:e266af4da2c1a4cbc6135a570c64577fd3e6eb204607eaff99d8e9b710003c6f"}, + {file = "coverage-7.6.3-cp313-cp313-win_amd64.whl", hash = "sha256:ea52bd218d4ba260399a8ae4bb6b577d82adfc4518b93566ce1fddd4a49d1dce"}, + {file = "coverage-7.6.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8d4c6ea0f498c7c79111033a290d060c517853a7bcb2f46516f591dab628ddd3"}, + {file = "coverage-7.6.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:331b200ad03dbaa44151d74daeb7da2cf382db424ab923574f6ecca7d3b30de3"}, + {file = "coverage-7.6.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54356a76b67cf8a3085818026bb556545ebb8353951923b88292556dfa9f812d"}, + {file = "coverage-7.6.3-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ebec65f5068e7df2d49466aab9128510c4867e532e07cb6960075b27658dca38"}, + {file = "coverage-7.6.3-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d33a785ea8354c480515e781554d3be582a86297e41ccbea627a5c632647f2cd"}, + {file = "coverage-7.6.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:f7ddb920106bbbbcaf2a274d56f46956bf56ecbde210d88061824a95bdd94e92"}, + {file = "coverage-7.6.3-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:70d24936ca6c15a3bbc91ee9c7fc661132c6f4c9d42a23b31b6686c05073bde5"}, + {file = "coverage-7.6.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c30e42ea11badb147f0d2e387115b15e2bd8205a5ad70d6ad79cf37f6ac08c91"}, + {file = "coverage-7.6.3-cp313-cp313t-win32.whl", hash = "sha256:365defc257c687ce3e7d275f39738dcd230777424117a6c76043459db131dd43"}, + {file = "coverage-7.6.3-cp313-cp313t-win_amd64.whl", hash = "sha256:23bb63ae3f4c645d2d82fa22697364b0046fbafb6261b258a58587441c5f7bd0"}, + {file = "coverage-7.6.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:da29ceabe3025a1e5a5aeeb331c5b1af686daab4ff0fb4f83df18b1180ea83e2"}, + {file = "coverage-7.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:df8c05a0f574d480947cba11b947dc41b1265d721c3777881da2fb8d3a1ddfba"}, + {file = "coverage-7.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec1e3b40b82236d100d259854840555469fad4db64f669ab817279eb95cd535c"}, + {file = "coverage-7.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b4adeb878a374126f1e5cf03b87f66279f479e01af0e9a654cf6d1509af46c40"}, + {file = "coverage-7.6.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43d6a66e33b1455b98fc7312b124296dad97a2e191c80320587234a77b1b736e"}, + {file = "coverage-7.6.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1990b1f4e2c402beb317840030bb9f1b6a363f86e14e21b4212e618acdfce7f6"}, + {file = "coverage-7.6.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:12f9515d875859faedb4144fd38694a761cd2a61ef9603bf887b13956d0bbfbb"}, + {file = "coverage-7.6.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:99ded130555c021d99729fabd4ddb91a6f4cc0707df4b1daf912c7850c373b13"}, + {file = "coverage-7.6.3-cp39-cp39-win32.whl", hash = "sha256:c3a79f56dee9136084cf84a6c7c4341427ef36e05ae6415bf7d787c96ff5eaa3"}, + {file = "coverage-7.6.3-cp39-cp39-win_amd64.whl", hash = "sha256:aac7501ae73d4a02f4b7ac8fcb9dc55342ca98ffb9ed9f2dfb8a25d53eda0e4d"}, + {file = "coverage-7.6.3-pp39.pp310-none-any.whl", hash = "sha256:b9853509b4bf57ba7b1f99b9d866c422c9c5248799ab20e652bbb8a184a38181"}, + {file = "coverage-7.6.3.tar.gz", hash = "sha256:bb7d5fe92bd0dc235f63ebe9f8c6e0884f7360f88f3411bfed1350c872ef2054"}, ] [package.dependencies] @@ -508,13 +518,13 @@ tests = ["pytest", "pytest-cov", "pytest-xdist"] [[package]] name = "databricks-sdk" -version = "0.31.1" +version = "0.34.0" description = "Databricks SDK for Python (Beta)" optional = false python-versions = ">=3.7" files = [ - {file = "databricks_sdk-0.31.1-py3-none-any.whl", hash = "sha256:9ab286f87ae1cc98a00ef7d207e40661f4d14a464071425ad169d235919b35f6"}, - {file = "databricks_sdk-0.31.1.tar.gz", hash = "sha256:8609e655d0e5ecb15c2a8a6468e737f8dcb4f28c33239388de3ab386b921d790"}, + {file = "databricks_sdk-0.34.0-py3-none-any.whl", hash = "sha256:8c8e023007041fee275764067013ccf9e119509047f0670aee71a7831c8efaec"}, + {file = "databricks_sdk-0.34.0.tar.gz", hash = "sha256:1d4ec47783cf17cb6fc2aec43025625e04519f01dbb1696d621ed3cacdb64eb5"}, ] [package.dependencies] @@ -544,13 +554,13 @@ dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] [[package]] name = "distlib" -version = "0.3.8" +version = "0.3.9" description = "Distribution utilities" optional = false python-versions = "*" files = [ - {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, - {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, + {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, + {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, ] [[package]] @@ -630,19 +640,19 @@ testing = ["hatch", "pre-commit", "pytest", "tox"] [[package]] name = "filelock" -version = "3.15.4" +version = "3.16.1" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7"}, - {file = "filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb"}, + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"] -typing = ["typing-extensions (>=4.8)"] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] +typing = ["typing-extensions (>=4.12.2)"] [[package]] name = "flask" @@ -669,53 +679,59 @@ dotenv = ["python-dotenv"] [[package]] name = "fonttools" -version = "4.53.1" +version = "4.54.1" description = "Tools to manipulate font files" optional = false python-versions = ">=3.8" files = [ - {file = "fonttools-4.53.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0679a30b59d74b6242909945429dbddb08496935b82f91ea9bf6ad240ec23397"}, - {file = "fonttools-4.53.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8bf06b94694251861ba7fdeea15c8ec0967f84c3d4143ae9daf42bbc7717fe3"}, - {file = "fonttools-4.53.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b96cd370a61f4d083c9c0053bf634279b094308d52fdc2dd9a22d8372fdd590d"}, - {file = "fonttools-4.53.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1c7c5aa18dd3b17995898b4a9b5929d69ef6ae2af5b96d585ff4005033d82f0"}, - {file = "fonttools-4.53.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e013aae589c1c12505da64a7d8d023e584987e51e62006e1bb30d72f26522c41"}, - {file = "fonttools-4.53.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9efd176f874cb6402e607e4cc9b4a9cd584d82fc34a4b0c811970b32ba62501f"}, - {file = "fonttools-4.53.1-cp310-cp310-win32.whl", hash = "sha256:c8696544c964500aa9439efb6761947393b70b17ef4e82d73277413f291260a4"}, - {file = "fonttools-4.53.1-cp310-cp310-win_amd64.whl", hash = "sha256:8959a59de5af6d2bec27489e98ef25a397cfa1774b375d5787509c06659b3671"}, - {file = "fonttools-4.53.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:da33440b1413bad53a8674393c5d29ce64d8c1a15ef8a77c642ffd900d07bfe1"}, - {file = "fonttools-4.53.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ff7e5e9bad94e3a70c5cd2fa27f20b9bb9385e10cddab567b85ce5d306ea923"}, - {file = "fonttools-4.53.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6e7170d675d12eac12ad1a981d90f118c06cf680b42a2d74c6c931e54b50719"}, - {file = "fonttools-4.53.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bee32ea8765e859670c4447b0817514ca79054463b6b79784b08a8df3a4d78e3"}, - {file = "fonttools-4.53.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6e08f572625a1ee682115223eabebc4c6a2035a6917eac6f60350aba297ccadb"}, - {file = "fonttools-4.53.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b21952c092ffd827504de7e66b62aba26fdb5f9d1e435c52477e6486e9d128b2"}, - {file = "fonttools-4.53.1-cp311-cp311-win32.whl", hash = "sha256:9dfdae43b7996af46ff9da520998a32b105c7f098aeea06b2226b30e74fbba88"}, - {file = "fonttools-4.53.1-cp311-cp311-win_amd64.whl", hash = "sha256:d4d0096cb1ac7a77b3b41cd78c9b6bc4a400550e21dc7a92f2b5ab53ed74eb02"}, - {file = "fonttools-4.53.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d92d3c2a1b39631a6131c2fa25b5406855f97969b068e7e08413325bc0afba58"}, - {file = "fonttools-4.53.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3b3c8ebafbee8d9002bd8f1195d09ed2bd9ff134ddec37ee8f6a6375e6a4f0e8"}, - {file = "fonttools-4.53.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32f029c095ad66c425b0ee85553d0dc326d45d7059dbc227330fc29b43e8ba60"}, - {file = "fonttools-4.53.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10f5e6c3510b79ea27bb1ebfcc67048cde9ec67afa87c7dd7efa5c700491ac7f"}, - {file = "fonttools-4.53.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f677ce218976496a587ab17140da141557beb91d2a5c1a14212c994093f2eae2"}, - {file = "fonttools-4.53.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9e6ceba2a01b448e36754983d376064730690401da1dd104ddb543519470a15f"}, - {file = "fonttools-4.53.1-cp312-cp312-win32.whl", hash = "sha256:791b31ebbc05197d7aa096bbc7bd76d591f05905d2fd908bf103af4488e60670"}, - {file = "fonttools-4.53.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ed170b5e17da0264b9f6fae86073be3db15fa1bd74061c8331022bca6d09bab"}, - {file = "fonttools-4.53.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c818c058404eb2bba05e728d38049438afd649e3c409796723dfc17cd3f08749"}, - {file = "fonttools-4.53.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:651390c3b26b0c7d1f4407cad281ee7a5a85a31a110cbac5269de72a51551ba2"}, - {file = "fonttools-4.53.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e54f1bba2f655924c1138bbc7fa91abd61f45c68bd65ab5ed985942712864bbb"}, - {file = "fonttools-4.53.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9cd19cf4fe0595ebdd1d4915882b9440c3a6d30b008f3cc7587c1da7b95be5f"}, - {file = "fonttools-4.53.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2af40ae9cdcb204fc1d8f26b190aa16534fcd4f0df756268df674a270eab575d"}, - {file = "fonttools-4.53.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:35250099b0cfb32d799fb5d6c651220a642fe2e3c7d2560490e6f1d3f9ae9169"}, - {file = "fonttools-4.53.1-cp38-cp38-win32.whl", hash = "sha256:f08df60fbd8d289152079a65da4e66a447efc1d5d5a4d3f299cdd39e3b2e4a7d"}, - {file = "fonttools-4.53.1-cp38-cp38-win_amd64.whl", hash = "sha256:7b6b35e52ddc8fb0db562133894e6ef5b4e54e1283dff606fda3eed938c36fc8"}, - {file = "fonttools-4.53.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75a157d8d26c06e64ace9df037ee93a4938a4606a38cb7ffaf6635e60e253b7a"}, - {file = "fonttools-4.53.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4824c198f714ab5559c5be10fd1adf876712aa7989882a4ec887bf1ef3e00e31"}, - {file = "fonttools-4.53.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:becc5d7cb89c7b7afa8321b6bb3dbee0eec2b57855c90b3e9bf5fb816671fa7c"}, - {file = "fonttools-4.53.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84ec3fb43befb54be490147b4a922b5314e16372a643004f182babee9f9c3407"}, - {file = "fonttools-4.53.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:73379d3ffdeecb376640cd8ed03e9d2d0e568c9d1a4e9b16504a834ebadc2dfb"}, - {file = "fonttools-4.53.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:02569e9a810f9d11f4ae82c391ebc6fb5730d95a0657d24d754ed7763fb2d122"}, - {file = "fonttools-4.53.1-cp39-cp39-win32.whl", hash = "sha256:aae7bd54187e8bf7fd69f8ab87b2885253d3575163ad4d669a262fe97f0136cb"}, - {file = "fonttools-4.53.1-cp39-cp39-win_amd64.whl", hash = "sha256:e5b708073ea3d684235648786f5f6153a48dc8762cdfe5563c57e80787c29fbb"}, - {file = "fonttools-4.53.1-py3-none-any.whl", hash = "sha256:f1f8758a2ad110bd6432203a344269f445a2907dc24ef6bccfd0ac4e14e0d71d"}, - {file = "fonttools-4.53.1.tar.gz", hash = "sha256:e128778a8e9bc11159ce5447f76766cefbd876f44bd79aff030287254e4752c4"}, + {file = "fonttools-4.54.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ed7ee041ff7b34cc62f07545e55e1468808691dddfd315d51dd82a6b37ddef2"}, + {file = "fonttools-4.54.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41bb0b250c8132b2fcac148e2e9198e62ff06f3cc472065dff839327945c5882"}, + {file = "fonttools-4.54.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7965af9b67dd546e52afcf2e38641b5be956d68c425bef2158e95af11d229f10"}, + {file = "fonttools-4.54.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:278913a168f90d53378c20c23b80f4e599dca62fbffae4cc620c8eed476b723e"}, + {file = "fonttools-4.54.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0e88e3018ac809b9662615072dcd6b84dca4c2d991c6d66e1970a112503bba7e"}, + {file = "fonttools-4.54.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4aa4817f0031206e637d1e685251ac61be64d1adef111060df84fdcbc6ab6c44"}, + {file = "fonttools-4.54.1-cp310-cp310-win32.whl", hash = "sha256:7e3b7d44e18c085fd8c16dcc6f1ad6c61b71ff463636fcb13df7b1b818bd0c02"}, + {file = "fonttools-4.54.1-cp310-cp310-win_amd64.whl", hash = "sha256:dd9cc95b8d6e27d01e1e1f1fae8559ef3c02c76317da650a19047f249acd519d"}, + {file = "fonttools-4.54.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5419771b64248484299fa77689d4f3aeed643ea6630b2ea750eeab219588ba20"}, + {file = "fonttools-4.54.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:301540e89cf4ce89d462eb23a89464fef50915255ece765d10eee8b2bf9d75b2"}, + {file = "fonttools-4.54.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ae5091547e74e7efecc3cbf8e75200bc92daaeb88e5433c5e3e95ea8ce5aa7"}, + {file = "fonttools-4.54.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82834962b3d7c5ca98cb56001c33cf20eb110ecf442725dc5fdf36d16ed1ab07"}, + {file = "fonttools-4.54.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d26732ae002cc3d2ecab04897bb02ae3f11f06dd7575d1df46acd2f7c012a8d8"}, + {file = "fonttools-4.54.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:58974b4987b2a71ee08ade1e7f47f410c367cdfc5a94fabd599c88165f56213a"}, + {file = "fonttools-4.54.1-cp311-cp311-win32.whl", hash = "sha256:ab774fa225238986218a463f3fe151e04d8c25d7de09df7f0f5fce27b1243dbc"}, + {file = "fonttools-4.54.1-cp311-cp311-win_amd64.whl", hash = "sha256:07e005dc454eee1cc60105d6a29593459a06321c21897f769a281ff2d08939f6"}, + {file = "fonttools-4.54.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:54471032f7cb5fca694b5f1a0aaeba4af6e10ae989df408e0216f7fd6cdc405d"}, + {file = "fonttools-4.54.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fa92cb248e573daab8d032919623cc309c005086d743afb014c836636166f08"}, + {file = "fonttools-4.54.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a911591200114969befa7f2cb74ac148bce5a91df5645443371aba6d222e263"}, + {file = "fonttools-4.54.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93d458c8a6a354dc8b48fc78d66d2a8a90b941f7fec30e94c7ad9982b1fa6bab"}, + {file = "fonttools-4.54.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5eb2474a7c5be8a5331146758debb2669bf5635c021aee00fd7c353558fc659d"}, + {file = "fonttools-4.54.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c9c563351ddc230725c4bdf7d9e1e92cbe6ae8553942bd1fb2b2ff0884e8b714"}, + {file = "fonttools-4.54.1-cp312-cp312-win32.whl", hash = "sha256:fdb062893fd6d47b527d39346e0c5578b7957dcea6d6a3b6794569370013d9ac"}, + {file = "fonttools-4.54.1-cp312-cp312-win_amd64.whl", hash = "sha256:e4564cf40cebcb53f3dc825e85910bf54835e8a8b6880d59e5159f0f325e637e"}, + {file = "fonttools-4.54.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6e37561751b017cf5c40fce0d90fd9e8274716de327ec4ffb0df957160be3bff"}, + {file = "fonttools-4.54.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:357cacb988a18aace66e5e55fe1247f2ee706e01debc4b1a20d77400354cddeb"}, + {file = "fonttools-4.54.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8e953cc0bddc2beaf3a3c3b5dd9ab7554677da72dfaf46951e193c9653e515a"}, + {file = "fonttools-4.54.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:58d29b9a294573d8319f16f2f79e42428ba9b6480442fa1836e4eb89c4d9d61c"}, + {file = "fonttools-4.54.1-cp313-cp313-win32.whl", hash = "sha256:9ef1b167e22709b46bf8168368b7b5d3efeaaa746c6d39661c1b4405b6352e58"}, + {file = "fonttools-4.54.1-cp313-cp313-win_amd64.whl", hash = "sha256:262705b1663f18c04250bd1242b0515d3bbae177bee7752be67c979b7d47f43d"}, + {file = "fonttools-4.54.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ed2f80ca07025551636c555dec2b755dd005e2ea8fbeb99fc5cdff319b70b23b"}, + {file = "fonttools-4.54.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9dc080e5a1c3b2656caff2ac2633d009b3a9ff7b5e93d0452f40cd76d3da3b3c"}, + {file = "fonttools-4.54.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d152d1be65652fc65e695e5619e0aa0982295a95a9b29b52b85775243c06556"}, + {file = "fonttools-4.54.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8583e563df41fdecef31b793b4dd3af8a9caa03397be648945ad32717a92885b"}, + {file = "fonttools-4.54.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:0d1d353ef198c422515a3e974a1e8d5b304cd54a4c2eebcae708e37cd9eeffb1"}, + {file = "fonttools-4.54.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:fda582236fee135d4daeca056c8c88ec5f6f6d88a004a79b84a02547c8f57386"}, + {file = "fonttools-4.54.1-cp38-cp38-win32.whl", hash = "sha256:e7d82b9e56716ed32574ee106cabca80992e6bbdcf25a88d97d21f73a0aae664"}, + {file = "fonttools-4.54.1-cp38-cp38-win_amd64.whl", hash = "sha256:ada215fd079e23e060157aab12eba0d66704316547f334eee9ff26f8c0d7b8ab"}, + {file = "fonttools-4.54.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f5b8a096e649768c2f4233f947cf9737f8dbf8728b90e2771e2497c6e3d21d13"}, + {file = "fonttools-4.54.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4e10d2e0a12e18f4e2dd031e1bf7c3d7017be5c8dbe524d07706179f355c5dac"}, + {file = "fonttools-4.54.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31c32d7d4b0958600eac75eaf524b7b7cb68d3a8c196635252b7a2c30d80e986"}, + {file = "fonttools-4.54.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c39287f5c8f4a0c5a55daf9eaf9ccd223ea59eed3f6d467133cc727d7b943a55"}, + {file = "fonttools-4.54.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a7a310c6e0471602fe3bf8efaf193d396ea561486aeaa7adc1f132e02d30c4b9"}, + {file = "fonttools-4.54.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d3b659d1029946f4ff9b6183984578041b520ce0f8fb7078bb37ec7445806b33"}, + {file = "fonttools-4.54.1-cp39-cp39-win32.whl", hash = "sha256:e96bc94c8cda58f577277d4a71f51c8e2129b8b36fd05adece6320dd3d57de8a"}, + {file = "fonttools-4.54.1-cp39-cp39-win_amd64.whl", hash = "sha256:e8a4b261c1ef91e7188a30571be6ad98d1c6d9fa2427244c545e2fa0a2494dd7"}, + {file = "fonttools-4.54.1-py3-none-any.whl", hash = "sha256:37cddd62d83dc4f72f7c3f3c2bcf2697e89a30efb152079896544a93907733bd"}, + {file = "fonttools-4.54.1.tar.gz", hash = "sha256:957f669d4922f92c171ba01bef7f29410668db09f6c02111e22b2bce446f3285"}, ] [package.extras] @@ -734,13 +750,13 @@ woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] [[package]] name = "fsspec" -version = "2024.6.1" +version = "2024.9.0" description = "File-system specification" optional = false python-versions = ">=3.8" files = [ - {file = "fsspec-2024.6.1-py3-none-any.whl", hash = "sha256:3cb443f8bcd2efb31295a5b9fdb02aee81d8452c80d28f97a6d0959e6cee101e"}, - {file = "fsspec-2024.6.1.tar.gz", hash = "sha256:fad7d7e209dd4c1208e3bbfda706620e0da5142bebbd9c384afb95b07e798e49"}, + {file = "fsspec-2024.9.0-py3-none-any.whl", hash = "sha256:a0947d552d8a6efa72cc2c730b12c41d043509156966cca4fb157b0f2a0c574b"}, + {file = "fsspec-2024.9.0.tar.gz", hash = "sha256:4b0afb90c2f21832df142f292649035d80b421f60a9e1c027802e5a0da2b04e8"}, ] [package.extras] @@ -822,13 +838,13 @@ test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", [[package]] name = "google-auth" -version = "2.34.0" +version = "2.35.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google_auth-2.34.0-py2.py3-none-any.whl", hash = "sha256:72fd4733b80b6d777dcde515628a9eb4a577339437012874ea286bca7261ee65"}, - {file = "google_auth-2.34.0.tar.gz", hash = "sha256:8eb87396435c19b20d32abd2f984e31c191a15284af72eb922f10e5bde9c04cc"}, + {file = "google_auth-2.35.0-py2.py3-none-any.whl", hash = "sha256:25df55f327ef021de8be50bad0dfd4a916ad0de96da86cd05661c9297723ad3f"}, + {file = "google_auth-2.35.0.tar.gz", hash = "sha256:f4c64ed4e01e8e8b646ef34c018f8bf3338df0c8e37d8b3bba40e7f574a3278a"}, ] [package.dependencies] @@ -843,24 +859,6 @@ pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] reauth = ["pyu2f (>=0.1.5)"] requests = ["requests (>=2.20.0,<3.0.0.dev0)"] -[[package]] -name = "google-auth-oauthlib" -version = "1.0.0" -description = "Google Authentication Library" -optional = false -python-versions = ">=3.6" -files = [ - {file = "google-auth-oauthlib-1.0.0.tar.gz", hash = "sha256:e375064964820b47221a7e1b7ee1fd77051b6323c3f9e3e19785f78ab67ecfc5"}, - {file = "google_auth_oauthlib-1.0.0-py2.py3-none-any.whl", hash = "sha256:95880ca704928c300f48194d1770cf5b1462835b6e49db61445a520f793fd5fb"}, -] - -[package.dependencies] -google-auth = ">=2.15.0" -requests-oauthlib = ">=0.7.0" - -[package.extras] -tool = ["click (>=6.0.0)"] - [[package]] name = "graphene" version = "3.3" @@ -883,15 +881,18 @@ test = ["coveralls (>=3.3,<4)", "iso8601 (>=1,<2)", "mock (>=4,<5)", "pytest (>= [[package]] name = "graphql-core" -version = "3.2.3" +version = "3.2.5" description = "GraphQL implementation for Python, a port of GraphQL.js, the JavaScript reference implementation for GraphQL." optional = false -python-versions = ">=3.6,<4" +python-versions = "<4,>=3.6" files = [ - {file = "graphql-core-3.2.3.tar.gz", hash = "sha256:06d2aad0ac723e35b1cb47885d3e5c45e956a53bc1b209a9fc5369007fe46676"}, - {file = "graphql_core-3.2.3-py3-none-any.whl", hash = "sha256:5766780452bd5ec8ba133f8bf287dc92713e3868ddd83aee4faab9fc3e303dc3"}, + {file = "graphql_core-3.2.5-py3-none-any.whl", hash = "sha256:2f150d5096448aa4f8ab26268567bbfeef823769893b39c1a2e1409590939c8a"}, + {file = "graphql_core-3.2.5.tar.gz", hash = "sha256:e671b90ed653c808715645e3998b7ab67d382d55467b7e2978549111bbabf8d5"}, ] +[package.dependencies] +typing-extensions = {version = ">=4,<5", markers = "python_version < \"3.10\""} + [[package]] name = "graphql-relay" version = "3.2.0" @@ -908,69 +909,84 @@ graphql-core = ">=3.2,<3.3" [[package]] name = "greenlet" -version = "3.0.3" +version = "3.1.1" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.7" files = [ - {file = "greenlet-3.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405"}, - {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f"}, - {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb"}, - {file = "greenlet-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9"}, - {file = "greenlet-3.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22"}, - {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3"}, - {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d"}, - {file = "greenlet-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728"}, - {file = "greenlet-3.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf"}, - {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305"}, - {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6"}, - {file = "greenlet-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2"}, - {file = "greenlet-3.0.3-cp37-cp37m-macosx_11_0_universal2.whl", hash = "sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41"}, - {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7"}, - {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6"}, - {file = "greenlet-3.0.3-cp37-cp37m-win32.whl", hash = "sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d"}, - {file = "greenlet-3.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67"}, - {file = "greenlet-3.0.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4"}, - {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5"}, - {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da"}, - {file = "greenlet-3.0.3-cp38-cp38-win32.whl", hash = "sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3"}, - {file = "greenlet-3.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf"}, - {file = "greenlet-3.0.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b"}, - {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6"}, - {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113"}, - {file = "greenlet-3.0.3-cp39-cp39-win32.whl", hash = "sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e"}, - {file = "greenlet-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067"}, - {file = "greenlet-3.0.3.tar.gz", hash = "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491"}, + {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6"}, + {file = "greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80"}, + {file = "greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395"}, + {file = "greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39"}, + {file = "greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942"}, + {file = "greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01"}, + {file = "greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c"}, + {file = "greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47da355d8687fd65240c364c90a31569a133b7b60de111c255ef5b606f2ae291"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98884ecf2ffb7d7fe6bd517e8eb99d31ff7855a840fa6d0d63cd07c037f6a981"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db32b5348615a04b82240cc67983cb315309e88d444a288934ee6ceaebcad6cc"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dcc62f31eae24de7f8dce72134c8651c58000d3b1868e01392baea7c32c247de"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1d3755bcb2e02de341c55b4fca7a745a24a9e7212ac953f6b3a48d117d7257aa"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b8da394b34370874b4572676f36acabac172602abf054cbc4ac910219f3340af"}, + {file = "greenlet-3.1.1-cp37-cp37m-win32.whl", hash = "sha256:a0dfc6c143b519113354e780a50381508139b07d2177cb6ad6a08278ec655798"}, + {file = "greenlet-3.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:54558ea205654b50c438029505def3834e80f0869a70fb15b871c29b4575ddef"}, + {file = "greenlet-3.1.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:346bed03fe47414091be4ad44786d1bd8bef0c3fcad6ed3dee074a032ab408a9"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfc59d69fc48664bc693842bd57acfdd490acafda1ab52c7836e3fc75c90a111"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21e10da6ec19b457b82636209cbe2331ff4306b54d06fa04b7c138ba18c8a81"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37b9de5a96111fc15418819ab4c4432e4f3c2ede61e660b1e33971eba26ef9ba"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef9ea3f137e5711f0dbe5f9263e8c009b7069d8a1acea822bd5e9dae0ae49c8"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85f3ff71e2e60bd4b4932a043fbbe0f499e263c628390b285cb599154a3b03b1"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95ffcf719966dd7c453f908e208e14cde192e09fde6c7186c8f1896ef778d8cd"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:03a088b9de532cbfe2ba2034b2b85e82df37874681e8c470d6fb2f8c04d7e4b7"}, + {file = "greenlet-3.1.1-cp38-cp38-win32.whl", hash = "sha256:8b8b36671f10ba80e159378df9c4f15c14098c4fd73a36b9ad715f057272fbef"}, + {file = "greenlet-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:7017b2be767b9d43cc31416aba48aab0d2309ee31b4dbf10a1d38fb7972bdf9d"}, + {file = "greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e"}, + {file = "greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c"}, + {file = "greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22"}, + {file = "greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467"}, ] [package.extras] @@ -979,61 +995,70 @@ test = ["objgraph", "psutil"] [[package]] name = "grpcio" -version = "1.66.1" +version = "1.67.0" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.8" files = [ - {file = "grpcio-1.66.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:4877ba180591acdf127afe21ec1c7ff8a5ecf0fe2600f0d3c50e8c4a1cbc6492"}, - {file = "grpcio-1.66.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:3750c5a00bd644c75f4507f77a804d0189d97a107eb1481945a0cf3af3e7a5ac"}, - {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:a013c5fbb12bfb5f927444b477a26f1080755a931d5d362e6a9a720ca7dbae60"}, - {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1b24c23d51a1e8790b25514157d43f0a4dce1ac12b3f0b8e9f66a5e2c4c132f"}, - {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7ffb8ea674d68de4cac6f57d2498fef477cef582f1fa849e9f844863af50083"}, - {file = "grpcio-1.66.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:307b1d538140f19ccbd3aed7a93d8f71103c5d525f3c96f8616111614b14bf2a"}, - {file = "grpcio-1.66.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1c17ebcec157cfb8dd445890a03e20caf6209a5bd4ac5b040ae9dbc59eef091d"}, - {file = "grpcio-1.66.1-cp310-cp310-win32.whl", hash = "sha256:ef82d361ed5849d34cf09105d00b94b6728d289d6b9235513cb2fcc79f7c432c"}, - {file = "grpcio-1.66.1-cp310-cp310-win_amd64.whl", hash = "sha256:292a846b92cdcd40ecca46e694997dd6b9be6c4c01a94a0dfb3fcb75d20da858"}, - {file = "grpcio-1.66.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:c30aeceeaff11cd5ddbc348f37c58bcb96da8d5aa93fed78ab329de5f37a0d7a"}, - {file = "grpcio-1.66.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8a1e224ce6f740dbb6b24c58f885422deebd7eb724aff0671a847f8951857c26"}, - {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:a66fe4dc35d2330c185cfbb42959f57ad36f257e0cc4557d11d9f0a3f14311df"}, - {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e3ba04659e4fce609de2658fe4dbf7d6ed21987a94460f5f92df7579fd5d0e22"}, - {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4573608e23f7e091acfbe3e84ac2045680b69751d8d67685ffa193a4429fedb1"}, - {file = "grpcio-1.66.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7e06aa1f764ec8265b19d8f00140b8c4b6ca179a6dc67aa9413867c47e1fb04e"}, - {file = "grpcio-1.66.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3885f037eb11f1cacc41f207b705f38a44b69478086f40608959bf5ad85826dd"}, - {file = "grpcio-1.66.1-cp311-cp311-win32.whl", hash = "sha256:97ae7edd3f3f91480e48ede5d3e7d431ad6005bfdbd65c1b56913799ec79e791"}, - {file = "grpcio-1.66.1-cp311-cp311-win_amd64.whl", hash = "sha256:cfd349de4158d797db2bd82d2020554a121674e98fbe6b15328456b3bf2495bb"}, - {file = "grpcio-1.66.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:a92c4f58c01c77205df6ff999faa008540475c39b835277fb8883b11cada127a"}, - {file = "grpcio-1.66.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fdb14bad0835914f325349ed34a51940bc2ad965142eb3090081593c6e347be9"}, - {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:f03a5884c56256e08fd9e262e11b5cfacf1af96e2ce78dc095d2c41ccae2c80d"}, - {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ca2559692d8e7e245d456877a85ee41525f3ed425aa97eb7a70fc9a79df91a0"}, - {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84ca1be089fb4446490dd1135828bd42a7c7f8421e74fa581611f7afdf7ab761"}, - {file = "grpcio-1.66.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:d639c939ad7c440c7b2819a28d559179a4508783f7e5b991166f8d7a34b52815"}, - {file = "grpcio-1.66.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b9feb4e5ec8dc2d15709f4d5fc367794d69277f5d680baf1910fc9915c633524"}, - {file = "grpcio-1.66.1-cp312-cp312-win32.whl", hash = "sha256:7101db1bd4cd9b880294dec41a93fcdce465bdbb602cd8dc5bd2d6362b618759"}, - {file = "grpcio-1.66.1-cp312-cp312-win_amd64.whl", hash = "sha256:b0aa03d240b5539648d996cc60438f128c7f46050989e35b25f5c18286c86734"}, - {file = "grpcio-1.66.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:ecfe735e7a59e5a98208447293ff8580e9db1e890e232b8b292dc8bd15afc0d2"}, - {file = "grpcio-1.66.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4825a3aa5648010842e1c9d35a082187746aa0cdbf1b7a2a930595a94fb10fce"}, - {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:f517fd7259fe823ef3bd21e508b653d5492e706e9f0ef82c16ce3347a8a5620c"}, - {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1fe60d0772831d96d263b53d83fb9a3d050a94b0e94b6d004a5ad111faa5b5b"}, - {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31a049daa428f928f21090403e5d18ea02670e3d5d172581670be006100db9ef"}, - {file = "grpcio-1.66.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6f914386e52cbdeb5d2a7ce3bf1fdfacbe9d818dd81b6099a05b741aaf3848bb"}, - {file = "grpcio-1.66.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bff2096bdba686019fb32d2dde45b95981f0d1490e054400f70fc9a8af34b49d"}, - {file = "grpcio-1.66.1-cp38-cp38-win32.whl", hash = "sha256:aa8ba945c96e73de29d25331b26f3e416e0c0f621e984a3ebdb2d0d0b596a3b3"}, - {file = "grpcio-1.66.1-cp38-cp38-win_amd64.whl", hash = "sha256:161d5c535c2bdf61b95080e7f0f017a1dfcb812bf54093e71e5562b16225b4ce"}, - {file = "grpcio-1.66.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:d0cd7050397b3609ea51727b1811e663ffda8bda39c6a5bb69525ef12414b503"}, - {file = "grpcio-1.66.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0e6c9b42ded5d02b6b1fea3a25f036a2236eeb75d0579bfd43c0018c88bf0a3e"}, - {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:c9f80f9fad93a8cf71c7f161778ba47fd730d13a343a46258065c4deb4b550c0"}, - {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dd67ed9da78e5121efc5c510f0122a972216808d6de70953a740560c572eb44"}, - {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48b0d92d45ce3be2084b92fb5bae2f64c208fea8ceed7fccf6a7b524d3c4942e"}, - {file = "grpcio-1.66.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4d813316d1a752be6f5c4360c49f55b06d4fe212d7df03253dfdae90c8a402bb"}, - {file = "grpcio-1.66.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9c9bebc6627873ec27a70fc800f6083a13c70b23a5564788754b9ee52c5aef6c"}, - {file = "grpcio-1.66.1-cp39-cp39-win32.whl", hash = "sha256:30a1c2cf9390c894c90bbc70147f2372130ad189cffef161f0432d0157973f45"}, - {file = "grpcio-1.66.1-cp39-cp39-win_amd64.whl", hash = "sha256:17663598aadbedc3cacd7bbde432f541c8e07d2496564e22b214b22c7523dac8"}, - {file = "grpcio-1.66.1.tar.gz", hash = "sha256:35334f9c9745add3e357e3372756fd32d925bd52c41da97f4dfdafbde0bf0ee2"}, + {file = "grpcio-1.67.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:bd79929b3bb96b54df1296cd3bf4d2b770bd1df6c2bdf549b49bab286b925cdc"}, + {file = "grpcio-1.67.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:16724ffc956ea42967f5758c2f043faef43cb7e48a51948ab593570570d1e68b"}, + {file = "grpcio-1.67.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:2b7183c80b602b0ad816315d66f2fb7887614ead950416d60913a9a71c12560d"}, + {file = "grpcio-1.67.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:efe32b45dd6d118f5ea2e5deaed417d8a14976325c93812dd831908522b402c9"}, + {file = "grpcio-1.67.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe89295219b9c9e47780a0f1c75ca44211e706d1c598242249fe717af3385ec8"}, + {file = "grpcio-1.67.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa8d025fae1595a207b4e47c2e087cb88d47008494db258ac561c00877d4c8f8"}, + {file = "grpcio-1.67.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f95e15db43e75a534420e04822df91f645664bf4ad21dfaad7d51773c80e6bb4"}, + {file = "grpcio-1.67.0-cp310-cp310-win32.whl", hash = "sha256:a6b9a5c18863fd4b6624a42e2712103fb0f57799a3b29651c0e5b8119a519d65"}, + {file = "grpcio-1.67.0-cp310-cp310-win_amd64.whl", hash = "sha256:b6eb68493a05d38b426604e1dc93bfc0137c4157f7ab4fac5771fd9a104bbaa6"}, + {file = "grpcio-1.67.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:e91d154689639932305b6ea6f45c6e46bb51ecc8ea77c10ef25aa77f75443ad4"}, + {file = "grpcio-1.67.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:cb204a742997277da678611a809a8409657b1398aaeebf73b3d9563b7d154c13"}, + {file = "grpcio-1.67.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:ae6de510f670137e755eb2a74b04d1041e7210af2444103c8c95f193340d17ee"}, + {file = "grpcio-1.67.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74b900566bdf68241118f2918d312d3bf554b2ce0b12b90178091ea7d0a17b3d"}, + {file = "grpcio-1.67.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4e95e43447a02aa603abcc6b5e727d093d161a869c83b073f50b9390ecf0fa8"}, + {file = "grpcio-1.67.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0bb94e66cd8f0baf29bd3184b6aa09aeb1a660f9ec3d85da615c5003154bc2bf"}, + {file = "grpcio-1.67.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:82e5bd4b67b17c8c597273663794a6a46a45e44165b960517fe6d8a2f7f16d23"}, + {file = "grpcio-1.67.0-cp311-cp311-win32.whl", hash = "sha256:7fc1d2b9fd549264ae585026b266ac2db53735510a207381be509c315b4af4e8"}, + {file = "grpcio-1.67.0-cp311-cp311-win_amd64.whl", hash = "sha256:ac11ecb34a86b831239cc38245403a8de25037b448464f95c3315819e7519772"}, + {file = "grpcio-1.67.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:227316b5631260e0bef8a3ce04fa7db4cc81756fea1258b007950b6efc90c05d"}, + {file = "grpcio-1.67.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d90cfdafcf4b45a7a076e3e2a58e7bc3d59c698c4f6470b0bb13a4d869cf2273"}, + {file = "grpcio-1.67.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:77196216d5dd6f99af1c51e235af2dd339159f657280e65ce7e12c1a8feffd1d"}, + {file = "grpcio-1.67.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15c05a26a0f7047f720da41dc49406b395c1470eef44ff7e2c506a47ac2c0591"}, + {file = "grpcio-1.67.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3840994689cc8cbb73d60485c594424ad8adb56c71a30d8948d6453083624b52"}, + {file = "grpcio-1.67.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:5a1e03c3102b6451028d5dc9f8591131d6ab3c8a0e023d94c28cb930ed4b5f81"}, + {file = "grpcio-1.67.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:682968427a63d898759474e3b3178d42546e878fdce034fd7474ef75143b64e3"}, + {file = "grpcio-1.67.0-cp312-cp312-win32.whl", hash = "sha256:d01793653248f49cf47e5695e0a79805b1d9d4eacef85b310118ba1dfcd1b955"}, + {file = "grpcio-1.67.0-cp312-cp312-win_amd64.whl", hash = "sha256:985b2686f786f3e20326c4367eebdaed3e7aa65848260ff0c6644f817042cb15"}, + {file = "grpcio-1.67.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:8c9a35b8bc50db35ab8e3e02a4f2a35cfba46c8705c3911c34ce343bd777813a"}, + {file = "grpcio-1.67.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:42199e704095b62688998c2d84c89e59a26a7d5d32eed86d43dc90e7a3bd04aa"}, + {file = "grpcio-1.67.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:c4c425f440fb81f8d0237c07b9322fc0fb6ee2b29fbef5f62a322ff8fcce240d"}, + {file = "grpcio-1.67.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:323741b6699cd2b04a71cb38f502db98f90532e8a40cb675393d248126a268af"}, + {file = "grpcio-1.67.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:662c8e105c5e5cee0317d500eb186ed7a93229586e431c1bf0c9236c2407352c"}, + {file = "grpcio-1.67.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:f6bd2ab135c64a4d1e9e44679a616c9bc944547357c830fafea5c3caa3de5153"}, + {file = "grpcio-1.67.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:2f55c1e0e2ae9bdd23b3c63459ee4c06d223b68aeb1961d83c48fb63dc29bc03"}, + {file = "grpcio-1.67.0-cp313-cp313-win32.whl", hash = "sha256:fd6bc27861e460fe28e94226e3673d46e294ca4673d46b224428d197c5935e69"}, + {file = "grpcio-1.67.0-cp313-cp313-win_amd64.whl", hash = "sha256:cf51d28063338608cd8d3cd64677e922134837902b70ce00dad7f116e3998210"}, + {file = "grpcio-1.67.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:7f200aca719c1c5dc72ab68be3479b9dafccdf03df530d137632c534bb6f1ee3"}, + {file = "grpcio-1.67.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0892dd200ece4822d72dd0952f7112c542a487fc48fe77568deaaa399c1e717d"}, + {file = "grpcio-1.67.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:f4d613fbf868b2e2444f490d18af472ccb47660ea3df52f068c9c8801e1f3e85"}, + {file = "grpcio-1.67.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c69bf11894cad9da00047f46584d5758d6ebc9b5950c0dc96fec7e0bce5cde9"}, + {file = "grpcio-1.67.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9bca3ca0c5e74dea44bf57d27e15a3a3996ce7e5780d61b7c72386356d231db"}, + {file = "grpcio-1.67.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:014dfc020e28a0d9be7e93a91f85ff9f4a87158b7df9952fe23cc42d29d31e1e"}, + {file = "grpcio-1.67.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d4ea4509d42c6797539e9ec7496c15473177ce9abc89bc5c71e7abe50fc25737"}, + {file = "grpcio-1.67.0-cp38-cp38-win32.whl", hash = "sha256:9d75641a2fca9ae1ae86454fd25d4c298ea8cc195dbc962852234d54a07060ad"}, + {file = "grpcio-1.67.0-cp38-cp38-win_amd64.whl", hash = "sha256:cff8e54d6a463883cda2fab94d2062aad2f5edd7f06ae3ed030f2a74756db365"}, + {file = "grpcio-1.67.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:62492bd534979e6d7127b8a6b29093161a742dee3875873e01964049d5250a74"}, + {file = "grpcio-1.67.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eef1dce9d1a46119fd09f9a992cf6ab9d9178b696382439446ca5f399d7b96fe"}, + {file = "grpcio-1.67.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:f623c57a5321461c84498a99dddf9d13dac0e40ee056d884d6ec4ebcab647a78"}, + {file = "grpcio-1.67.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54d16383044e681f8beb50f905249e4e7261dd169d4aaf6e52eab67b01cbbbe2"}, + {file = "grpcio-1.67.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2a44e572fb762c668e4812156b81835f7aba8a721b027e2d4bb29fb50ff4d33"}, + {file = "grpcio-1.67.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:391df8b0faac84d42f5b8dfc65f5152c48ed914e13c522fd05f2aca211f8bfad"}, + {file = "grpcio-1.67.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfd9306511fdfc623a1ba1dc3bc07fbd24e6cfbe3c28b4d1e05177baa2f99617"}, + {file = "grpcio-1.67.0-cp39-cp39-win32.whl", hash = "sha256:30d47dbacfd20cbd0c8be9bfa52fdb833b395d4ec32fe5cff7220afc05d08571"}, + {file = "grpcio-1.67.0-cp39-cp39-win_amd64.whl", hash = "sha256:f55f077685f61f0fbd06ea355142b71e47e4a26d2d678b3ba27248abfe67163a"}, + {file = "grpcio-1.67.0.tar.gz", hash = "sha256:e090b2553e0da1c875449c8e75073dd4415dd71c9bde6a406240fdf4c0ee467c"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.66.1)"] +protobuf = ["grpcio-tools (>=1.67.0)"] [[package]] name = "gunicorn" @@ -1058,13 +1083,13 @@ tornado = ["tornado (>=0.2)"] [[package]] name = "huggingface-hub" -version = "0.24.6" +version = "0.25.2" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.24.6-py3-none-any.whl", hash = "sha256:a990f3232aa985fe749bc9474060cbad75e8b2f115f6665a9fda5b9c97818970"}, - {file = "huggingface_hub-0.24.6.tar.gz", hash = "sha256:cc2579e761d070713eaa9c323e3debe39d5b464ae3a7261c39a9195b27bb8000"}, + {file = "huggingface_hub-0.25.2-py3-none-any.whl", hash = "sha256:1897caf88ce7f97fe0110603d8f66ac264e3ba6accdf30cd66cc0fed5282ad25"}, + {file = "huggingface_hub-0.25.2.tar.gz", hash = "sha256:a1014ea111a5f40ccd23f7f7ba8ac46e20fa3b658ced1f86a00c75c06ec6423c"}, ] [package.dependencies] @@ -1092,13 +1117,13 @@ typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "t [[package]] name = "humanize" -version = "4.10.0" +version = "4.11.0" description = "Python humanize utilities" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "humanize-4.10.0-py3-none-any.whl", hash = "sha256:39e7ccb96923e732b5c2e27aeaa3b10a8dfeeba3eb965ba7b74a3eb0e30040a6"}, - {file = "humanize-4.10.0.tar.gz", hash = "sha256:06b6eb0293e4b85e8d385397c5868926820db32b9b654b932f57fa41c23c9978"}, + {file = "humanize-4.11.0-py3-none-any.whl", hash = "sha256:b53caaec8532bcb2fff70c8826f904c35943f8cecaca29d272d9df38092736c0"}, + {file = "humanize-4.11.0.tar.gz", hash = "sha256:e66f36020a2d5a974c504bd2555cf770621dbdbb6d82f94a6857c0b1ea2608be"}, ] [package.extras] @@ -1106,13 +1131,13 @@ tests = ["freezegun", "pytest", "pytest-cov"] [[package]] name = "identify" -version = "2.6.0" +version = "2.6.1" description = "File identification library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "identify-2.6.0-py2.py3-none-any.whl", hash = "sha256:e79ae4406387a9d300332b5fd366d8994f1525e8414984e1a59e058b2eda2dd0"}, - {file = "identify-2.6.0.tar.gz", hash = "sha256:cb171c685bdc31bcc4c1734698736a7d5b6c8bf2e0c15117f4d469c8640ae5cf"}, + {file = "identify-2.6.1-py2.py3-none-any.whl", hash = "sha256:53863bcac7caf8d2ed85bd20312ea5dcfc22226800f6d6881f232d861db5a8f0"}, + {file = "identify-2.6.1.tar.gz", hash = "sha256:91478c5fb7c3aac5ff7bf9b4344f803843dc586832d5f110d672b19aa1984c98"}, ] [package.extras] @@ -1120,24 +1145,27 @@ license = ["ukkonen"] [[package]] name = "idna" -version = "3.8" +version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" files = [ - {file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"}, - {file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "imageio" -version = "2.35.1" +version = "2.36.0" description = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "imageio-2.35.1-py3-none-any.whl", hash = "sha256:6eb2e5244e7a16b85c10b5c2fe0f7bf961b40fcb9f1a9fd1bd1d2c2f8fb3cd65"}, - {file = "imageio-2.35.1.tar.gz", hash = "sha256:4952dfeef3c3947957f6d5dedb1f4ca31c6e509a476891062396834048aeed2a"}, + {file = "imageio-2.36.0-py3-none-any.whl", hash = "sha256:471f1eda55618ee44a3c9960911c35e647d9284c68f077e868df633398f137f0"}, + {file = "imageio-2.36.0.tar.gz", hash = "sha256:1c8f294db862c256e9562354d65aa54725b8dafed7f10f02bb3ec20ec1678850"}, ] [package.dependencies] @@ -1145,8 +1173,8 @@ numpy = "*" pillow = ">=8.3.2" [package.extras] -all-plugins = ["astropy", "av", "imageio-ffmpeg", "psutil", "tifffile"] -all-plugins-pypy = ["av", "imageio-ffmpeg", "psutil", "tifffile"] +all-plugins = ["astropy", "av", "imageio-ffmpeg", "numpy (>2)", "pillow-heif", "psutil", "rawpy", "tifffile"] +all-plugins-pypy = ["av", "imageio-ffmpeg", "pillow-heif", "psutil", "tifffile"] build = ["wheel"] dev = ["black", "flake8", "fsspec[github]", "pytest", "pytest-cov"] docs = ["numpydoc", "pydata-sphinx-theme", "sphinx (<6)"] @@ -1164,32 +1192,36 @@ tifffile = ["tifffile"] [[package]] name = "importlib-metadata" -version = "8.4.0" +version = "8.5.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1"}, - {file = "importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"}, + {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, + {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, ] [package.dependencies] -zipp = ">=0.5" +zipp = ">=3.20" [package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] -test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] +test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] [[package]] name = "importlib-resources" -version = "6.4.4" +version = "6.4.5" description = "Read resources from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_resources-6.4.4-py3-none-any.whl", hash = "sha256:dda242603d1c9cd836c3368b1174ed74cb4049ecd209e7a1a0104620c18c5c11"}, - {file = "importlib_resources-6.4.4.tar.gz", hash = "sha256:20600c8b7361938dc0bb2d5ec0297802e575df486f5a544fa414da65e13721f7"}, + {file = "importlib_resources-6.4.5-py3-none-any.whl", hash = "sha256:ac29d5f956f01d5e4bb63102a5a19957f1b9175e45649977264a1416783bb717"}, + {file = "importlib_resources-6.4.5.tar.gz", hash = "sha256:980862a1d16c9e147a59603677fa2aa5fd82b87f223b6cb870695bcfce830065"}, ] [package.dependencies] @@ -1255,115 +1287,125 @@ files = [ [[package]] name = "kiwisolver" -version = "1.4.5" +version = "1.4.7" description = "A fast implementation of the Cassowary constraint solver" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af"}, - {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3"}, - {file = "kiwisolver-1.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ef7afcd2d281494c0a9101d5c571970708ad911d028137cd558f02b851c08b4"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9eaa8b117dc8337728e834b9c6e2611f10c79e38f65157c4c38e9400286f5cb1"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec20916e7b4cbfb1f12380e46486ec4bcbaa91a9c448b97023fde0d5bbf9e4ff"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39b42c68602539407884cf70d6a480a469b93b81b7701378ba5e2328660c847a"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa12042de0171fad672b6c59df69106d20d5596e4f87b5e8f76df757a7c399aa"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a40773c71d7ccdd3798f6489aaac9eee213d566850a9533f8d26332d626b82c"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:19df6e621f6d8b4b9c4d45f40a66839294ff2bb235e64d2178f7522d9170ac5b"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:83d78376d0d4fd884e2c114d0621624b73d2aba4e2788182d286309ebdeed770"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e391b1f0a8a5a10ab3b9bb6afcfd74f2175f24f8975fb87ecae700d1503cdee0"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:852542f9481f4a62dbb5dd99e8ab7aedfeb8fb6342349a181d4036877410f525"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59edc41b24031bc25108e210c0def6f6c2191210492a972d585a06ff246bb79b"}, - {file = "kiwisolver-1.4.5-cp310-cp310-win32.whl", hash = "sha256:a6aa6315319a052b4ee378aa171959c898a6183f15c1e541821c5c59beaa0238"}, - {file = "kiwisolver-1.4.5-cp310-cp310-win_amd64.whl", hash = "sha256:d0ef46024e6a3d79c01ff13801cb19d0cad7fd859b15037aec74315540acc276"}, - {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:11863aa14a51fd6ec28688d76f1735f8f69ab1fabf388851a595d0721af042f5"}, - {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ab3919a9997ab7ef2fbbed0cc99bb28d3c13e6d4b1ad36e97e482558a91be90"}, - {file = "kiwisolver-1.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fcc700eadbbccbf6bc1bcb9dbe0786b4b1cb91ca0dcda336eef5c2beed37b797"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfdd7c0b105af050eb3d64997809dc21da247cf44e63dc73ff0fd20b96be55a9"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76c6a5964640638cdeaa0c359382e5703e9293030fe730018ca06bc2010c4437"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbea0db94288e29afcc4c28afbf3a7ccaf2d7e027489c449cf7e8f83c6346eb9"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ceec1a6bc6cab1d6ff5d06592a91a692f90ec7505d6463a88a52cc0eb58545da"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:040c1aebeda72197ef477a906782b5ab0d387642e93bda547336b8957c61022e"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f91de7223d4c7b793867797bacd1ee53bfe7359bd70d27b7b58a04efbb9436c8"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:faae4860798c31530dd184046a900e652c95513796ef51a12bc086710c2eec4d"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0157420efcb803e71d1b28e2c287518b8808b7cf1ab8af36718fd0a2c453eb0"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:06f54715b7737c2fecdbf140d1afb11a33d59508a47bf11bb38ecf21dc9ab79f"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fdb7adb641a0d13bdcd4ef48e062363d8a9ad4a182ac7647ec88f695e719ae9f"}, - {file = "kiwisolver-1.4.5-cp311-cp311-win32.whl", hash = "sha256:bb86433b1cfe686da83ce32a9d3a8dd308e85c76b60896d58f082136f10bffac"}, - {file = "kiwisolver-1.4.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c08e1312a9cf1074d17b17728d3dfce2a5125b2d791527f33ffbe805200a355"}, - {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:32d5cf40c4f7c7b3ca500f8985eb3fb3a7dfc023215e876f207956b5ea26632a"}, - {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f846c260f483d1fd217fe5ed7c173fb109efa6b1fc8381c8b7552c5781756192"}, - {file = "kiwisolver-1.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ff5cf3571589b6d13bfbfd6bcd7a3f659e42f96b5fd1c4830c4cf21d4f5ef45"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7269d9e5f1084a653d575c7ec012ff57f0c042258bf5db0954bf551c158466e7"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da802a19d6e15dffe4b0c24b38b3af68e6c1a68e6e1d8f30148c83864f3881db"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3aba7311af82e335dd1e36ffff68aaca609ca6290c2cb6d821a39aa075d8e3ff"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763773d53f07244148ccac5b084da5adb90bfaee39c197554f01b286cf869228"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2270953c0d8cdab5d422bee7d2007f043473f9d2999631c86a223c9db56cbd16"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d099e745a512f7e3bbe7249ca835f4d357c586d78d79ae8f1dcd4d8adeb9bda9"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:74db36e14a7d1ce0986fa104f7d5637aea5c82ca6326ed0ec5694280942d1162"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e5bab140c309cb3a6ce373a9e71eb7e4873c70c2dda01df6820474f9889d6d4"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0f114aa76dc1b8f636d077979c0ac22e7cd8f3493abbab152f20eb8d3cda71f3"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:88a2df29d4724b9237fc0c6eaf2a1adae0cdc0b3e9f4d8e7dc54b16812d2d81a"}, - {file = "kiwisolver-1.4.5-cp312-cp312-win32.whl", hash = "sha256:72d40b33e834371fd330fb1472ca19d9b8327acb79a5821d4008391db8e29f20"}, - {file = "kiwisolver-1.4.5-cp312-cp312-win_amd64.whl", hash = "sha256:2c5674c4e74d939b9d91dda0fae10597ac7521768fec9e399c70a1f27e2ea2d9"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3a2b053a0ab7a3960c98725cfb0bf5b48ba82f64ec95fe06f1d06c99b552e130"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd32d6c13807e5c66a7cbb79f90b553642f296ae4518a60d8d76243b0ad2898"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59ec7b7c7e1a61061850d53aaf8e93db63dce0c936db1fda2658b70e4a1be709"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da4cfb373035def307905d05041c1d06d8936452fe89d464743ae7fb8371078b"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2400873bccc260b6ae184b2b8a4fec0e4082d30648eadb7c3d9a13405d861e89"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1b04139c4236a0f3aff534479b58f6f849a8b351e1314826c2d230849ed48985"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4e66e81a5779b65ac21764c295087de82235597a2293d18d943f8e9e32746265"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7931d8f1f67c4be9ba1dd9c451fb0eeca1a25b89e4d3f89e828fe12a519b782a"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b3f7e75f3015df442238cca659f8baa5f42ce2a8582727981cbfa15fee0ee205"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:bbf1d63eef84b2e8c89011b7f2235b1e0bf7dacc11cac9431fc6468e99ac77fb"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4c380469bd3f970ef677bf2bcba2b6b0b4d5c75e7a020fb863ef75084efad66f"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-win32.whl", hash = "sha256:9408acf3270c4b6baad483865191e3e582b638b1654a007c62e3efe96f09a9a3"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-win_amd64.whl", hash = "sha256:5b94529f9b2591b7af5f3e0e730a4e0a41ea174af35a4fd067775f9bdfeee01a"}, - {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:11c7de8f692fc99816e8ac50d1d1aef4f75126eefc33ac79aac02c099fd3db71"}, - {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:53abb58632235cd154176ced1ae8f0d29a6657aa1aa9decf50b899b755bc2b93"}, - {file = "kiwisolver-1.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88b9f257ca61b838b6f8094a62418421f87ac2a1069f7e896c36a7d86b5d4c29"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3195782b26fc03aa9c6913d5bad5aeb864bdc372924c093b0f1cebad603dd712"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc579bf0f502e54926519451b920e875f433aceb4624a3646b3252b5caa9e0b6"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a580c91d686376f0f7c295357595c5a026e6cbc3d77b7c36e290201e7c11ecb"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cfe6ab8da05c01ba6fbea630377b5da2cd9bcbc6338510116b01c1bc939a2c18"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d2e5a98f0ec99beb3c10e13b387f8db39106d53993f498b295f0c914328b1333"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a51a263952b1429e429ff236d2f5a21c5125437861baeed77f5e1cc2d2c7c6da"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3edd2fa14e68c9be82c5b16689e8d63d89fe927e56debd6e1dbce7a26a17f81b"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:74d1b44c6cfc897df648cc9fdaa09bc3e7679926e6f96df05775d4fb3946571c"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:76d9289ed3f7501012e05abb8358bbb129149dbd173f1f57a1bf1c22d19ab7cc"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:92dea1ffe3714fa8eb6a314d2b3c773208d865a0e0d35e713ec54eea08a66250"}, - {file = "kiwisolver-1.4.5-cp38-cp38-win32.whl", hash = "sha256:5c90ae8c8d32e472be041e76f9d2f2dbff4d0b0be8bd4041770eddb18cf49a4e"}, - {file = "kiwisolver-1.4.5-cp38-cp38-win_amd64.whl", hash = "sha256:c7940c1dc63eb37a67721b10d703247552416f719c4188c54e04334321351ced"}, - {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9407b6a5f0d675e8a827ad8742e1d6b49d9c1a1da5d952a67d50ef5f4170b18d"}, - {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15568384086b6df3c65353820a4473575dbad192e35010f622c6ce3eebd57af9"}, - {file = "kiwisolver-1.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0dc9db8e79f0036e8173c466d21ef18e1befc02de8bf8aa8dc0813a6dc8a7046"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cdc8a402aaee9a798b50d8b827d7ecf75edc5fb35ea0f91f213ff927c15f4ff0"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6c3bd3cde54cafb87d74d8db50b909705c62b17c2099b8f2e25b461882e544ff"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:955e8513d07a283056b1396e9a57ceddbd272d9252c14f154d450d227606eb54"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:346f5343b9e3f00b8db8ba359350eb124b98c99efd0b408728ac6ebf38173958"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9098e0049e88c6a24ff64545cdfc50807818ba6c1b739cae221bbbcbc58aad3"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:00bd361b903dc4bbf4eb165f24d1acbee754fce22ded24c3d56eec268658a5cf"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7b8b454bac16428b22560d0a1cf0a09875339cab69df61d7805bf48919415901"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f1d072c2eb0ad60d4c183f3fb44ac6f73fb7a8f16a2694a91f988275cbf352f9"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:31a82d498054cac9f6d0b53d02bb85811185bcb477d4b60144f915f3b3126342"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6512cb89e334e4700febbffaaa52761b65b4f5a3cf33f960213d5656cea36a77"}, - {file = "kiwisolver-1.4.5-cp39-cp39-win32.whl", hash = "sha256:9db8ea4c388fdb0f780fe91346fd438657ea602d58348753d9fb265ce1bca67f"}, - {file = "kiwisolver-1.4.5-cp39-cp39-win_amd64.whl", hash = "sha256:59415f46a37f7f2efeec758353dd2eae1b07640d8ca0f0c42548ec4125492635"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5c7b3b3a728dc6faf3fc372ef24f21d1e3cee2ac3e9596691d746e5a536de920"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:620ced262a86244e2be10a676b646f29c34537d0d9cc8eb26c08f53d98013390"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:378a214a1e3bbf5ac4a8708304318b4f890da88c9e6a07699c4ae7174c09a68d"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf7be1207676ac608a50cd08f102f6742dbfc70e8d60c4db1c6897f62f71523"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ba55dce0a9b8ff59495ddd050a0225d58bd0983d09f87cfe2b6aec4f2c1234e4"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd32ea360bcbb92d28933fc05ed09bffcb1704ba3fc7942e81db0fd4f81a7892"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5e7139af55d1688f8b960ee9ad5adafc4ac17c1c473fe07133ac092310d76544"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dced8146011d2bc2e883f9bd68618b8247387f4bbec46d7392b3c3b032640126"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9bf3325c47b11b2e51bca0824ea217c7cd84491d8ac4eefd1e409705ef092bd"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5794cf59533bc3f1b1c821f7206a3617999db9fbefc345360aafe2e067514929"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e368f200bbc2e4f905b8e71eb38b3c04333bddaa6a2464a6355487b02bb7fb09"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5d706eba36b4c4d5bc6c6377bb6568098765e990cfc21ee16d13963fab7b3e7"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85267bd1aa8880a9c88a8cb71e18d3d64d2751a790e6ca6c27b8ccc724bcd5ad"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:210ef2c3a1f03272649aff1ef992df2e724748918c4bc2d5a90352849eb40bea"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11d011a7574eb3b82bcc9c1a1d35c1d7075677fdd15de527d91b46bd35e935ee"}, - {file = "kiwisolver-1.4.5.tar.gz", hash = "sha256:e57e563a57fb22a142da34f38acc2fc1a5c864bc29ca1517a88abc963e60d6ec"}, + {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8a9c83f75223d5e48b0bc9cb1bf2776cf01563e00ade8775ffe13b0b6e1af3a6"}, + {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58370b1ffbd35407444d57057b57da5d6549d2d854fa30249771775c63b5fe17"}, + {file = "kiwisolver-1.4.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aa0abdf853e09aff551db11fce173e2177d00786c688203f52c87ad7fcd91ef9"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8d53103597a252fb3ab8b5845af04c7a26d5e7ea8122303dd7a021176a87e8b9"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:88f17c5ffa8e9462fb79f62746428dd57b46eb931698e42e990ad63103f35e6c"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a9ca9c710d598fd75ee5de59d5bda2684d9db36a9f50b6125eaea3969c2599"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4d742cb7af1c28303a51b7a27aaee540e71bb8e24f68c736f6f2ffc82f2bf05"}, + {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28c7fea2196bf4c2f8d46a0415c77a1c480cc0724722f23d7410ffe9842c407"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e968b84db54f9d42046cf154e02911e39c0435c9801681e3fc9ce8a3c4130278"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0c18ec74c0472de033e1bebb2911c3c310eef5649133dd0bedf2a169a1b269e5"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8f0ea6da6d393d8b2e187e6a5e3fb81f5862010a40c3945e2c6d12ae45cfb2ad"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f106407dda69ae456dd1227966bf445b157ccc80ba0dff3802bb63f30b74e895"}, + {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84ec80df401cfee1457063732d90022f93951944b5b58975d34ab56bb150dfb3"}, + {file = "kiwisolver-1.4.7-cp310-cp310-win32.whl", hash = "sha256:71bb308552200fb2c195e35ef05de12f0c878c07fc91c270eb3d6e41698c3bcc"}, + {file = "kiwisolver-1.4.7-cp310-cp310-win_amd64.whl", hash = "sha256:44756f9fd339de0fb6ee4f8c1696cfd19b2422e0d70b4cefc1cc7f1f64045a8c"}, + {file = "kiwisolver-1.4.7-cp310-cp310-win_arm64.whl", hash = "sha256:78a42513018c41c2ffd262eb676442315cbfe3c44eed82385c2ed043bc63210a"}, + {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d2b0e12a42fb4e72d509fc994713d099cbb15ebf1103545e8a45f14da2dfca54"}, + {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2a8781ac3edc42ea4b90bc23e7d37b665d89423818e26eb6df90698aa2287c95"}, + {file = "kiwisolver-1.4.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:46707a10836894b559e04b0fd143e343945c97fd170d69a2d26d640b4e297935"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef97b8df011141c9b0f6caf23b29379f87dd13183c978a30a3c546d2c47314cb"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab58c12a2cd0fc769089e6d38466c46d7f76aced0a1f54c77652446733d2d02"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:803b8e1459341c1bb56d1c5c010406d5edec8a0713a0945851290a7930679b51"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9a9e8a507420fe35992ee9ecb302dab68550dedc0da9e2880dd88071c5fb052"}, + {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18077b53dc3bb490e330669a99920c5e6a496889ae8c63b58fbc57c3d7f33a18"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6af936f79086a89b3680a280c47ea90b4df7047b5bdf3aa5c524bbedddb9e545"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3abc5b19d24af4b77d1598a585b8a719beb8569a71568b66f4ebe1fb0449460b"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:933d4de052939d90afbe6e9d5273ae05fb836cc86c15b686edd4b3560cc0ee36"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:65e720d2ab2b53f1f72fb5da5fb477455905ce2c88aaa671ff0a447c2c80e8e3"}, + {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3bf1ed55088f214ba6427484c59553123fdd9b218a42bbc8c6496d6754b1e523"}, + {file = "kiwisolver-1.4.7-cp311-cp311-win32.whl", hash = "sha256:4c00336b9dd5ad96d0a558fd18a8b6f711b7449acce4c157e7343ba92dd0cf3d"}, + {file = "kiwisolver-1.4.7-cp311-cp311-win_amd64.whl", hash = "sha256:929e294c1ac1e9f615c62a4e4313ca1823ba37326c164ec720a803287c4c499b"}, + {file = "kiwisolver-1.4.7-cp311-cp311-win_arm64.whl", hash = "sha256:e33e8fbd440c917106b237ef1a2f1449dfbb9b6f6e1ce17c94cd6a1e0d438376"}, + {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:5360cc32706dab3931f738d3079652d20982511f7c0ac5711483e6eab08efff2"}, + {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942216596dc64ddb25adb215c3c783215b23626f8d84e8eff8d6d45c3f29f75a"}, + {file = "kiwisolver-1.4.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:48b571ecd8bae15702e4f22d3ff6a0f13e54d3d00cd25216d5e7f658242065ee"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad42ba922c67c5f219097b28fae965e10045ddf145d2928bfac2eb2e17673640"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:612a10bdae23404a72941a0fc8fa2660c6ea1217c4ce0dbcab8a8f6543ea9e7f"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e838bba3a3bac0fe06d849d29772eb1afb9745a59710762e4ba3f4cb8424483"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:22f499f6157236c19f4bbbd472fa55b063db77a16cd74d49afe28992dff8c258"}, + {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693902d433cf585133699972b6d7c42a8b9f8f826ebcaf0132ff55200afc599e"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4e77f2126c3e0b0d055f44513ed349038ac180371ed9b52fe96a32aa071a5107"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:657a05857bda581c3656bfc3b20e353c232e9193eb167766ad2dc58b56504948"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4bfa75a048c056a411f9705856abfc872558e33c055d80af6a380e3658766038"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:34ea1de54beef1c104422d210c47c7d2a4999bdecf42c7b5718fbe59a4cac383"}, + {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:90da3b5f694b85231cf93586dad5e90e2d71b9428f9aad96952c99055582f520"}, + {file = "kiwisolver-1.4.7-cp312-cp312-win32.whl", hash = "sha256:18e0cca3e008e17fe9b164b55735a325140a5a35faad8de92dd80265cd5eb80b"}, + {file = "kiwisolver-1.4.7-cp312-cp312-win_amd64.whl", hash = "sha256:58cb20602b18f86f83a5c87d3ee1c766a79c0d452f8def86d925e6c60fbf7bfb"}, + {file = "kiwisolver-1.4.7-cp312-cp312-win_arm64.whl", hash = "sha256:f5a8b53bdc0b3961f8b6125e198617c40aeed638b387913bf1ce78afb1b0be2a"}, + {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2e6039dcbe79a8e0f044f1c39db1986a1b8071051efba3ee4d74f5b365f5226e"}, + {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a1ecf0ac1c518487d9d23b1cd7139a6a65bc460cd101ab01f1be82ecf09794b6"}, + {file = "kiwisolver-1.4.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7ab9ccab2b5bd5702ab0803676a580fffa2aa178c2badc5557a84cc943fcf750"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f816dd2277f8d63d79f9c8473a79fe54047bc0467754962840782c575522224d"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf8bcc23ceb5a1b624572a1623b9f79d2c3b337c8c455405ef231933a10da379"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dea0bf229319828467d7fca8c7c189780aa9ff679c94539eed7532ebe33ed37c"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c06a4c7cf15ec739ce0e5971b26c93638730090add60e183530d70848ebdd34"}, + {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:913983ad2deb14e66d83c28b632fd35ba2b825031f2fa4ca29675e665dfecbe1"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5337ec7809bcd0f424c6b705ecf97941c46279cf5ed92311782c7c9c2026f07f"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c26ed10c4f6fa6ddb329a5120ba3b6db349ca192ae211e882970bfc9d91420b"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c619b101e6de2222c1fcb0531e1b17bbffbe54294bfba43ea0d411d428618c27"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:073a36c8273647592ea332e816e75ef8da5c303236ec0167196793eb1e34657a"}, + {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3ce6b2b0231bda412463e152fc18335ba32faf4e8c23a754ad50ffa70e4091ee"}, + {file = "kiwisolver-1.4.7-cp313-cp313-win32.whl", hash = "sha256:f4c9aee212bc89d4e13f58be11a56cc8036cabad119259d12ace14b34476fd07"}, + {file = "kiwisolver-1.4.7-cp313-cp313-win_amd64.whl", hash = "sha256:8a3ec5aa8e38fc4c8af308917ce12c536f1c88452ce554027e55b22cbbfbff76"}, + {file = "kiwisolver-1.4.7-cp313-cp313-win_arm64.whl", hash = "sha256:76c8094ac20ec259471ac53e774623eb62e6e1f56cd8690c67ce6ce4fcb05650"}, + {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5d5abf8f8ec1f4e22882273c423e16cae834c36856cac348cfbfa68e01c40f3a"}, + {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aeb3531b196ef6f11776c21674dba836aeea9d5bd1cf630f869e3d90b16cfade"}, + {file = "kiwisolver-1.4.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b7d755065e4e866a8086c9bdada157133ff466476a2ad7861828e17b6026e22c"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08471d4d86cbaec61f86b217dd938a83d85e03785f51121e791a6e6689a3be95"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7bbfcb7165ce3d54a3dfbe731e470f65739c4c1f85bb1018ee912bae139e263b"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d34eb8494bea691a1a450141ebb5385e4b69d38bb8403b5146ad279f4b30fa3"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9242795d174daa40105c1d86aba618e8eab7bf96ba8c3ee614da8302a9f95503"}, + {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a0f64a48bb81af7450e641e3fe0b0394d7381e342805479178b3d335d60ca7cf"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8e045731a5416357638d1700927529e2b8ab304811671f665b225f8bf8d8f933"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4322872d5772cae7369f8351da1edf255a604ea7087fe295411397d0cfd9655e"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e1631290ee9271dffe3062d2634c3ecac02c83890ada077d225e081aca8aab89"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:edcfc407e4eb17e037bca59be0e85a2031a2ac87e4fed26d3e9df88b4165f92d"}, + {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4d05d81ecb47d11e7f8932bd8b61b720bf0b41199358f3f5e36d38e28f0532c5"}, + {file = "kiwisolver-1.4.7-cp38-cp38-win32.whl", hash = "sha256:b38ac83d5f04b15e515fd86f312479d950d05ce2368d5413d46c088dda7de90a"}, + {file = "kiwisolver-1.4.7-cp38-cp38-win_amd64.whl", hash = "sha256:d83db7cde68459fc803052a55ace60bea2bae361fc3b7a6d5da07e11954e4b09"}, + {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f9362ecfca44c863569d3d3c033dbe8ba452ff8eed6f6b5806382741a1334bd"}, + {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e8df2eb9b2bac43ef8b082e06f750350fbbaf2887534a5be97f6cf07b19d9583"}, + {file = "kiwisolver-1.4.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f32d6edbc638cde7652bd690c3e728b25332acbadd7cad670cc4a02558d9c417"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e2e6c39bd7b9372b0be21456caab138e8e69cc0fc1190a9dfa92bd45a1e6e904"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dda56c24d869b1193fcc763f1284b9126550eaf84b88bbc7256e15028f19188a"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79849239c39b5e1fd906556c474d9b0439ea6792b637511f3fe3a41158d89ca8"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5e3bc157fed2a4c02ec468de4ecd12a6e22818d4f09cde2c31ee3226ffbefab2"}, + {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3da53da805b71e41053dc670f9a820d1157aae77b6b944e08024d17bcd51ef88"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8705f17dfeb43139a692298cb6637ee2e59c0194538153e83e9ee0c75c2eddde"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:82a5c2f4b87c26bb1a0ef3d16b5c4753434633b83d365cc0ddf2770c93829e3c"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce8be0466f4c0d585cdb6c1e2ed07232221df101a4c6f28821d2aa754ca2d9e2"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:409afdfe1e2e90e6ee7fc896f3df9a7fec8e793e58bfa0d052c8a82f99c37abb"}, + {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5b9c3f4ee0b9a439d2415012bd1b1cc2df59e4d6a9939f4d669241d30b414327"}, + {file = "kiwisolver-1.4.7-cp39-cp39-win32.whl", hash = "sha256:a79ae34384df2b615eefca647a2873842ac3b596418032bef9a7283675962644"}, + {file = "kiwisolver-1.4.7-cp39-cp39-win_amd64.whl", hash = "sha256:cf0438b42121a66a3a667de17e779330fc0f20b0d97d59d2f2121e182b0505e4"}, + {file = "kiwisolver-1.4.7-cp39-cp39-win_arm64.whl", hash = "sha256:764202cc7e70f767dab49e8df52c7455e8de0df5d858fa801a11aa0d882ccf3f"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:94252291e3fe68001b1dd747b4c0b3be12582839b95ad4d1b641924d68fd4643"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b7dfa3b546da08a9f622bb6becdb14b3e24aaa30adba66749d38f3cc7ea9706"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd3de6481f4ed8b734da5df134cd5a6a64fe32124fe83dde1e5b5f29fe30b1e6"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91b5f9f1205845d488c928e8570dcb62b893372f63b8b6e98b863ebd2368ff2"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40fa14dbd66b8b8f470d5fc79c089a66185619d31645f9b0773b88b19f7223c4"}, + {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:eb542fe7933aa09d8d8f9d9097ef37532a7df6497819d16efe4359890a2f417a"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bfa1acfa0c54932d5607e19a2c24646fb4c1ae2694437789129cf099789a3b00"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:eee3ea935c3d227d49b4eb85660ff631556841f6e567f0f7bda972df6c2c9935"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f3160309af4396e0ed04db259c3ccbfdc3621b5559b5453075e5de555e1f3a1b"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a17f6a29cf8935e587cc8a4dbfc8368c55edc645283db0ce9801016f83526c2d"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10849fb2c1ecbfae45a693c070e0320a91b35dd4bcf58172c023b994283a124d"}, + {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:ac542bf38a8a4be2dc6b15248d36315ccc65f0743f7b1a76688ffb6b5129a5c2"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8b01aac285f91ca889c800042c35ad3b239e704b150cfd3382adfc9dcc780e39"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48be928f59a1f5c8207154f935334d374e79f2b5d212826307d072595ad76a2e"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f37cfe618a117e50d8c240555331160d73d0411422b59b5ee217843d7b693608"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:599b5c873c63a1f6ed7eead644a8a380cfbdf5db91dcb6f85707aaab213b1674"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:801fa7802e5cfabe3ab0c81a34c323a319b097dfb5004be950482d882f3d7225"}, + {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0c6c43471bc764fad4bc99c5c2d6d16a676b1abf844ca7c8702bdae92df01ee0"}, + {file = "kiwisolver-1.4.7.tar.gz", hash = "sha256:9893ff81bd7107f7b685d3017cc6583daadb4fc26e4a888350df530e41980a60"}, ] [[package]] @@ -1602,127 +1644,121 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] [[package]] name = "markupsafe" -version = "2.1.5" +version = "3.0.1" description = "Safely add untrusted strings to HTML/XML markup." optional = false -python-versions = ">=3.7" -files = [ - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, - {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, +python-versions = ">=3.9" +files = [ + {file = "MarkupSafe-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:db842712984e91707437461930e6011e60b39136c7331e971952bb30465bc1a1"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3ffb4a8e7d46ed96ae48805746755fadd0909fea2306f93d5d8233ba23dda12a"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67c519635a4f64e495c50e3107d9b4075aec33634272b5db1cde839e07367589"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48488d999ed50ba8d38c581d67e496f955821dc183883550a6fbc7f1aefdc170"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f31ae06f1328595d762c9a2bf29dafd8621c7d3adc130cbb46278079758779ca"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80fcbf3add8790caddfab6764bde258b5d09aefbe9169c183f88a7410f0f6dea"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3341c043c37d78cc5ae6e3e305e988532b072329639007fd408a476642a89fd6"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cb53e2a99df28eee3b5f4fea166020d3ef9116fdc5764bc5117486e6d1211b25"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-win32.whl", hash = "sha256:db15ce28e1e127a0013dfb8ac243a8e392db8c61eae113337536edb28bdc1f97"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:4ffaaac913c3f7345579db4f33b0020db693f302ca5137f106060316761beea9"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:26627785a54a947f6d7336ce5963569b5d75614619e75193bdb4e06e21d447ad"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b954093679d5750495725ea6f88409946d69cfb25ea7b4c846eef5044194f583"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:973a371a55ce9ed333a3a0f8e0bcfae9e0d637711534bcb11e130af2ab9334e7"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:244dbe463d5fb6d7ce161301a03a6fe744dac9072328ba9fc82289238582697b"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d98e66a24497637dd31ccab090b34392dddb1f2f811c4b4cd80c230205c074a3"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ad91738f14eb8da0ff82f2acd0098b6257621410dcbd4df20aaa5b4233d75a50"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7044312a928a66a4c2a22644147bc61a199c1709712069a344a3fb5cfcf16915"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a4792d3b3a6dfafefdf8e937f14906a51bd27025a36f4b188728a73382231d91"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-win32.whl", hash = "sha256:fa7d686ed9883f3d664d39d5a8e74d3c5f63e603c2e3ff0abcba23eac6542635"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:9ba25a71ebf05b9bb0e2ae99f8bc08a07ee8e98c612175087112656ca0f5c8bf"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8ae369e84466aa70f3154ee23c1451fda10a8ee1b63923ce76667e3077f2b0c4"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40f1e10d51c92859765522cbd79c5c8989f40f0419614bcdc5015e7b6bf97fc5"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a4cb365cb49b750bdb60b846b0c0bc49ed62e59a76635095a179d440540c346"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee3941769bd2522fe39222206f6dd97ae83c442a94c90f2b7a25d847d40f4729"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62fada2c942702ef8952754abfc1a9f7658a4d5460fabe95ac7ec2cbe0d02abc"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4c2d64fdba74ad16138300815cfdc6ab2f4647e23ced81f59e940d7d4a1469d9"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:fb532dd9900381d2e8f48172ddc5a59db4c445a11b9fab40b3b786da40d3b56b"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0f84af7e813784feb4d5e4ff7db633aba6c8ca64a833f61d8e4eade234ef0c38"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-win32.whl", hash = "sha256:cbf445eb5628981a80f54087f9acdbf84f9b7d862756110d172993b9a5ae81aa"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:a10860e00ded1dd0a65b83e717af28845bb7bd16d8ace40fe5531491de76b79f"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e81c52638315ff4ac1b533d427f50bc0afc746deb949210bc85f05d4f15fd772"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:312387403cd40699ab91d50735ea7a507b788091c416dd007eac54434aee51da"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ae99f31f47d849758a687102afdd05bd3d3ff7dbab0a8f1587981b58a76152a"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c97ff7fedf56d86bae92fa0a646ce1a0ec7509a7578e1ed238731ba13aabcd1c"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7420ceda262dbb4b8d839a4ec63d61c261e4e77677ed7c66c99f4e7cb5030dd"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45d42d132cff577c92bfba536aefcfea7e26efb975bd455db4e6602f5c9f45e7"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c8817557d0de9349109acb38b9dd570b03cc5014e8aabf1cbddc6e81005becd"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a54c43d3ec4cf2a39f4387ad044221c66a376e58c0d0e971d47c475ba79c6b5"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-win32.whl", hash = "sha256:c91b394f7601438ff79a4b93d16be92f216adb57d813a78be4446fe0f6bc2d8c"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:fe32482b37b4b00c7a52a07211b479653b7fe4f22b2e481b9a9b099d8a430f2f"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:17b2aea42a7280db02ac644db1d634ad47dcc96faf38ab304fe26ba2680d359a"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:852dc840f6d7c985603e60b5deaae1d89c56cb038b577f6b5b8c808c97580f1d"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0778de17cff1acaeccc3ff30cd99a3fd5c50fc58ad3d6c0e0c4c58092b859396"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:800100d45176652ded796134277ecb13640c1a537cad3b8b53da45aa96330453"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d06b24c686a34c86c8c1fba923181eae6b10565e4d80bdd7bc1c8e2f11247aa4"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:33d1c36b90e570ba7785dacd1faaf091203d9942bc036118fab8110a401eb1a8"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:beeebf760a9c1f4c07ef6a53465e8cfa776ea6a2021eda0d0417ec41043fe984"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bbde71a705f8e9e4c3e9e33db69341d040c827c7afa6789b14c6e16776074f5a"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-win32.whl", hash = "sha256:82b5dba6eb1bcc29cc305a18a3c5365d2af06ee71b123216416f7e20d2a84e5b"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:730d86af59e0e43ce277bb83970530dd223bf7f2a838e086b50affa6ec5f9295"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4935dd7883f1d50e2ffecca0aa33dc1946a94c8f3fdafb8df5c330e48f71b132"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e9393357f19954248b00bed7c56f29a25c930593a77630c719653d51e7669c2a"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40621d60d0e58aa573b68ac5e2d6b20d44392878e0bfc159012a5787c4e35bc8"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f94190df587738280d544971500b9cafc9b950d32efcb1fba9ac10d84e6aa4e6"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6a387d61fe41cdf7ea95b38e9af11cfb1a63499af2759444b99185c4ab33f5b"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8ad4ad1429cd4f315f32ef263c1342166695fad76c100c5d979c45d5570ed58b"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e24bfe89c6ac4c31792793ad9f861b8f6dc4546ac6dc8f1c9083c7c4f2b335cd"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2a4b34a8d14649315c4bc26bbfa352663eb51d146e35eef231dd739d54a5430a"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-win32.whl", hash = "sha256:242d6860f1fd9191aef5fae22b51c5c19767f93fb9ead4d21924e0bcb17619d8"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:93e8248d650e7e9d49e8251f883eed60ecbc0e8ffd6349e18550925e31bd029b"}, + {file = "markupsafe-3.0.1.tar.gz", hash = "sha256:3e683ee4f5d0fa2dde4db77ed8dd8a876686e3fc417655c2ece9a90576905344"}, ] [[package]] name = "matplotlib" -version = "3.7.5" +version = "3.9.2" description = "Python plotting package" optional = false -python-versions = ">=3.8" -files = [ - {file = "matplotlib-3.7.5-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:4a87b69cb1cb20943010f63feb0b2901c17a3b435f75349fd9865713bfa63925"}, - {file = "matplotlib-3.7.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d3ce45010fefb028359accebb852ca0c21bd77ec0f281952831d235228f15810"}, - {file = "matplotlib-3.7.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fbea1e762b28400393d71be1a02144aa16692a3c4c676ba0178ce83fc2928fdd"}, - {file = "matplotlib-3.7.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec0e1adc0ad70ba8227e957551e25a9d2995e319c29f94a97575bb90fa1d4469"}, - {file = "matplotlib-3.7.5-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6738c89a635ced486c8a20e20111d33f6398a9cbebce1ced59c211e12cd61455"}, - {file = "matplotlib-3.7.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1210b7919b4ed94b5573870f316bca26de3e3b07ffdb563e79327dc0e6bba515"}, - {file = "matplotlib-3.7.5-cp310-cp310-win32.whl", hash = "sha256:068ebcc59c072781d9dcdb82f0d3f1458271c2de7ca9c78f5bd672141091e9e1"}, - {file = "matplotlib-3.7.5-cp310-cp310-win_amd64.whl", hash = "sha256:f098ffbaab9df1e3ef04e5a5586a1e6b1791380698e84938d8640961c79b1fc0"}, - {file = "matplotlib-3.7.5-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:f65342c147572673f02a4abec2d5a23ad9c3898167df9b47c149f32ce61ca078"}, - {file = "matplotlib-3.7.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4ddf7fc0e0dc553891a117aa083039088d8a07686d4c93fb8a810adca68810af"}, - {file = "matplotlib-3.7.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0ccb830fc29442360d91be48527809f23a5dcaee8da5f4d9b2d5b867c1b087b8"}, - {file = "matplotlib-3.7.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efc6bb28178e844d1f408dd4d6341ee8a2e906fc9e0fa3dae497da4e0cab775d"}, - {file = "matplotlib-3.7.5-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b15c4c2d374f249f324f46e883340d494c01768dd5287f8bc00b65b625ab56c"}, - {file = "matplotlib-3.7.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d028555421912307845e59e3de328260b26d055c5dac9b182cc9783854e98fb"}, - {file = "matplotlib-3.7.5-cp311-cp311-win32.whl", hash = "sha256:fe184b4625b4052fa88ef350b815559dd90cc6cc8e97b62f966e1ca84074aafa"}, - {file = "matplotlib-3.7.5-cp311-cp311-win_amd64.whl", hash = "sha256:084f1f0f2f1010868c6f1f50b4e1c6f2fb201c58475494f1e5b66fed66093647"}, - {file = "matplotlib-3.7.5-cp312-cp312-macosx_10_12_universal2.whl", hash = "sha256:34bceb9d8ddb142055ff27cd7135f539f2f01be2ce0bafbace4117abe58f8fe4"}, - {file = "matplotlib-3.7.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:c5a2134162273eb8cdfd320ae907bf84d171de948e62180fa372a3ca7cf0f433"}, - {file = "matplotlib-3.7.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:039ad54683a814002ff37bf7981aa1faa40b91f4ff84149beb53d1eb64617980"}, - {file = "matplotlib-3.7.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d742ccd1b09e863b4ca58291728db645b51dab343eebb08d5d4b31b308296ce"}, - {file = "matplotlib-3.7.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:743b1c488ca6a2bc7f56079d282e44d236bf375968bfd1b7ba701fd4d0fa32d6"}, - {file = "matplotlib-3.7.5-cp312-cp312-win_amd64.whl", hash = "sha256:fbf730fca3e1f23713bc1fae0a57db386e39dc81ea57dc305c67f628c1d7a342"}, - {file = "matplotlib-3.7.5-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:cfff9b838531698ee40e40ea1a8a9dc2c01edb400b27d38de6ba44c1f9a8e3d2"}, - {file = "matplotlib-3.7.5-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:1dbcca4508bca7847fe2d64a05b237a3dcaec1f959aedb756d5b1c67b770c5ee"}, - {file = "matplotlib-3.7.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4cdf4ef46c2a1609a50411b66940b31778db1e4b73d4ecc2eaa40bd588979b13"}, - {file = "matplotlib-3.7.5-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:167200ccfefd1674b60e957186dfd9baf58b324562ad1a28e5d0a6b3bea77905"}, - {file = "matplotlib-3.7.5-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:53e64522934df6e1818b25fd48cf3b645b11740d78e6ef765fbb5fa5ce080d02"}, - {file = "matplotlib-3.7.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3e3bc79b2d7d615067bd010caff9243ead1fc95cf735c16e4b2583173f717eb"}, - {file = "matplotlib-3.7.5-cp38-cp38-win32.whl", hash = "sha256:6b641b48c6819726ed47c55835cdd330e53747d4efff574109fd79b2d8a13748"}, - {file = "matplotlib-3.7.5-cp38-cp38-win_amd64.whl", hash = "sha256:f0b60993ed3488b4532ec6b697059897891927cbfc2b8d458a891b60ec03d9d7"}, - {file = "matplotlib-3.7.5-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:090964d0afaff9c90e4d8de7836757e72ecfb252fb02884016d809239f715651"}, - {file = "matplotlib-3.7.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:9fc6fcfbc55cd719bc0bfa60bde248eb68cf43876d4c22864603bdd23962ba25"}, - {file = "matplotlib-3.7.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7cc3078b019bb863752b8b60e8b269423000f1603cb2299608231996bd9d54"}, - {file = "matplotlib-3.7.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e4e9a868e8163abaaa8259842d85f949a919e1ead17644fb77a60427c90473c"}, - {file = "matplotlib-3.7.5-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa7ebc995a7d747dacf0a717d0eb3aa0f0c6a0e9ea88b0194d3a3cd241a1500f"}, - {file = "matplotlib-3.7.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3785bfd83b05fc0e0c2ae4c4a90034fe693ef96c679634756c50fe6efcc09856"}, - {file = "matplotlib-3.7.5-cp39-cp39-win32.whl", hash = "sha256:29b058738c104d0ca8806395f1c9089dfe4d4f0f78ea765c6c704469f3fffc81"}, - {file = "matplotlib-3.7.5-cp39-cp39-win_amd64.whl", hash = "sha256:fd4028d570fa4b31b7b165d4a685942ae9cdc669f33741e388c01857d9723eab"}, - {file = "matplotlib-3.7.5-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2a9a3f4d6a7f88a62a6a18c7e6a84aedcaf4faf0708b4ca46d87b19f1b526f88"}, - {file = "matplotlib-3.7.5-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b9b3fd853d4a7f008a938df909b96db0b454225f935d3917520305b90680579c"}, - {file = "matplotlib-3.7.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0ad550da9f160737d7890217c5eeed4337d07e83ca1b2ca6535078f354e7675"}, - {file = "matplotlib-3.7.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:20da7924a08306a861b3f2d1da0d1aa9a6678e480cf8eacffe18b565af2813e7"}, - {file = "matplotlib-3.7.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b45c9798ea6bb920cb77eb7306409756a7fab9db9b463e462618e0559aecb30e"}, - {file = "matplotlib-3.7.5-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a99866267da1e561c7776fe12bf4442174b79aac1a47bd7e627c7e4d077ebd83"}, - {file = "matplotlib-3.7.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b6aa62adb6c268fc87d80f963aca39c64615c31830b02697743c95590ce3fbb"}, - {file = "matplotlib-3.7.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e530ab6a0afd082d2e9c17eb1eb064a63c5b09bb607b2b74fa41adbe3e162286"}, - {file = "matplotlib-3.7.5.tar.gz", hash = "sha256:1e5c971558ebc811aa07f54c7b7c677d78aa518ef4c390e14673a09e0860184a"}, +python-versions = ">=3.9" +files = [ + {file = "matplotlib-3.9.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9d78bbc0cbc891ad55b4f39a48c22182e9bdaea7fc0e5dbd364f49f729ca1bbb"}, + {file = "matplotlib-3.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c375cc72229614632c87355366bdf2570c2dac01ac66b8ad048d2dabadf2d0d4"}, + {file = "matplotlib-3.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d94ff717eb2bd0b58fe66380bd8b14ac35f48a98e7c6765117fe67fb7684e64"}, + {file = "matplotlib-3.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab68d50c06938ef28681073327795c5db99bb4666214d2d5f880ed11aeaded66"}, + {file = "matplotlib-3.9.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:65aacf95b62272d568044531e41de26285d54aec8cb859031f511f84bd8b495a"}, + {file = "matplotlib-3.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:3fd595f34aa8a55b7fc8bf9ebea8aa665a84c82d275190a61118d33fbc82ccae"}, + {file = "matplotlib-3.9.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d8dd059447824eec055e829258ab092b56bb0579fc3164fa09c64f3acd478772"}, + {file = "matplotlib-3.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c797dac8bb9c7a3fd3382b16fe8f215b4cf0f22adccea36f1545a6d7be310b41"}, + {file = "matplotlib-3.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d719465db13267bcef19ea8954a971db03b9f48b4647e3860e4bc8e6ed86610f"}, + {file = "matplotlib-3.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8912ef7c2362f7193b5819d17dae8629b34a95c58603d781329712ada83f9447"}, + {file = "matplotlib-3.9.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7741f26a58a240f43bee74965c4882b6c93df3e7eb3de160126d8c8f53a6ae6e"}, + {file = "matplotlib-3.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:ae82a14dab96fbfad7965403c643cafe6515e386de723e498cf3eeb1e0b70cc7"}, + {file = "matplotlib-3.9.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ac43031375a65c3196bee99f6001e7fa5bdfb00ddf43379d3c0609bdca042df9"}, + {file = "matplotlib-3.9.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:be0fc24a5e4531ae4d8e858a1a548c1fe33b176bb13eff7f9d0d38ce5112a27d"}, + {file = "matplotlib-3.9.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf81de2926c2db243c9b2cbc3917619a0fc85796c6ba4e58f541df814bbf83c7"}, + {file = "matplotlib-3.9.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6ee45bc4245533111ced13f1f2cace1e7f89d1c793390392a80c139d6cf0e6c"}, + {file = "matplotlib-3.9.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:306c8dfc73239f0e72ac50e5a9cf19cc4e8e331dd0c54f5e69ca8758550f1e1e"}, + {file = "matplotlib-3.9.2-cp312-cp312-win_amd64.whl", hash = "sha256:5413401594cfaff0052f9d8b1aafc6d305b4bd7c4331dccd18f561ff7e1d3bd3"}, + {file = "matplotlib-3.9.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:18128cc08f0d3cfff10b76baa2f296fc28c4607368a8402de61bb3f2eb33c7d9"}, + {file = "matplotlib-3.9.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4876d7d40219e8ae8bb70f9263bcbe5714415acfdf781086601211335e24f8aa"}, + {file = "matplotlib-3.9.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d9f07a80deab4bb0b82858a9e9ad53d1382fd122be8cde11080f4e7dfedb38b"}, + {file = "matplotlib-3.9.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7c0410f181a531ec4e93bbc27692f2c71a15c2da16766f5ba9761e7ae518413"}, + {file = "matplotlib-3.9.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:909645cce2dc28b735674ce0931a4ac94e12f5b13f6bb0b5a5e65e7cea2c192b"}, + {file = "matplotlib-3.9.2-cp313-cp313-win_amd64.whl", hash = "sha256:f32c7410c7f246838a77d6d1eff0c0f87f3cb0e7c4247aebea71a6d5a68cab49"}, + {file = "matplotlib-3.9.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:37e51dd1c2db16ede9cfd7b5cabdfc818b2c6397c83f8b10e0e797501c963a03"}, + {file = "matplotlib-3.9.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b82c5045cebcecd8496a4d694d43f9cc84aeeb49fe2133e036b207abe73f4d30"}, + {file = "matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f053c40f94bc51bc03832a41b4f153d83f2062d88c72b5e79997072594e97e51"}, + {file = "matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbe196377a8248972f5cede786d4c5508ed5f5ca4a1e09b44bda889958b33f8c"}, + {file = "matplotlib-3.9.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5816b1e1fe8c192cbc013f8f3e3368ac56fbecf02fb41b8f8559303f24c5015e"}, + {file = "matplotlib-3.9.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:cef2a73d06601437be399908cf13aee74e86932a5ccc6ccdf173408ebc5f6bb2"}, + {file = "matplotlib-3.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e0830e188029c14e891fadd99702fd90d317df294c3298aad682739c5533721a"}, + {file = "matplotlib-3.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ba9c1299c920964e8d3857ba27173b4dbb51ca4bab47ffc2c2ba0eb5e2cbc5"}, + {file = "matplotlib-3.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cd93b91ab47a3616b4d3c42b52f8363b88ca021e340804c6ab2536344fad9ca"}, + {file = "matplotlib-3.9.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6d1ce5ed2aefcdce11904fc5bbea7d9c21fff3d5f543841edf3dea84451a09ea"}, + {file = "matplotlib-3.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:b2696efdc08648536efd4e1601b5fd491fd47f4db97a5fbfd175549a7365c1b2"}, + {file = "matplotlib-3.9.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:d52a3b618cb1cbb769ce2ee1dcdb333c3ab6e823944e9a2d36e37253815f9556"}, + {file = "matplotlib-3.9.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:039082812cacd6c6bec8e17a9c1e6baca230d4116d522e81e1f63a74d01d2e21"}, + {file = "matplotlib-3.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6758baae2ed64f2331d4fd19be38b7b4eae3ecec210049a26b6a4f3ae1c85dcc"}, + {file = "matplotlib-3.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:050598c2b29e0b9832cde72bcf97627bf00262adbc4a54e2b856426bb2ef0697"}, + {file = "matplotlib-3.9.2.tar.gz", hash = "sha256:96ab43906269ca64a6366934106fa01534454a69e471b7bf3d79083981aaab92"}, ] [package.dependencies] @@ -1730,13 +1766,16 @@ contourpy = ">=1.0.1" cycler = ">=0.10" fonttools = ">=4.22.0" importlib-resources = {version = ">=3.2.0", markers = "python_version < \"3.10\""} -kiwisolver = ">=1.0.1" -numpy = ">=1.20,<2" +kiwisolver = ">=1.3.1" +numpy = ">=1.23" packaging = ">=20.0" -pillow = ">=6.2.0" +pillow = ">=8" pyparsing = ">=2.3.1" python-dateutil = ">=2.7" +[package.extras] +dev = ["meson-python (>=0.13.1)", "numpy (>=1.25)", "pybind11 (>=2.6)", "setuptools (>=64)", "setuptools_scm (>=7)"] + [[package]] name = "mdurl" version = "0.1.2" @@ -1809,13 +1848,13 @@ pyyaml = ">=5.1" [[package]] name = "mkdocs-material" -version = "9.5.34" +version = "9.5.41" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.5.34-py3-none-any.whl", hash = "sha256:54caa8be708de2b75167fd4d3b9f3d949579294f49cb242515d4653dbee9227e"}, - {file = "mkdocs_material-9.5.34.tar.gz", hash = "sha256:1e60ddf716cfb5679dfd65900b8a25d277064ed82d9a53cd5190e3f894df7840"}, + {file = "mkdocs_material-9.5.41-py3-none-any.whl", hash = "sha256:990bc138c33342b5b73e7545915ebc0136e501bfbd8e365735144f5120891d83"}, + {file = "mkdocs_material-9.5.41.tar.gz", hash = "sha256:30fa5d459b4b8130848ecd8e1c908878345d9d8268f7ddbc31eebe88d462d97b"}, ] [package.dependencies] @@ -1849,13 +1888,13 @@ files = [ [[package]] name = "mlflow" -version = "2.16.0" +version = "2.17.0" description = "MLflow is an open source platform for the complete machine learning lifecycle" optional = false python-versions = ">=3.8" files = [ - {file = "mlflow-2.16.0-py3-none-any.whl", hash = "sha256:9f27ef6ae7a82d7ecd67b6b4a4d50637a5e8160639115570fbc689758f9c0b54"}, - {file = "mlflow-2.16.0.tar.gz", hash = "sha256:82ea1a2e800f404f1586783b7636091c0a5754cf9ff45afeadf3a5e467f5168f"}, + {file = "mlflow-2.17.0-py3-none-any.whl", hash = "sha256:64fbc0dfcb7322ed4cbccadc2f533bdd2944001b983ea8c10db45c7c59b46b7c"}, + {file = "mlflow-2.17.0.tar.gz", hash = "sha256:5bb2089b833da48e4a92a9b4cb1cb5fa509a571eb3c603be39f5238b4721e076"}, ] [package.dependencies] @@ -1870,7 +1909,7 @@ Jinja2 = [ ] markdown = ">=3.3,<4" matplotlib = "<4" -mlflow-skinny = "2.16.0" +mlflow-skinny = "2.17.0" numpy = "<3" pandas = "<3" pyarrow = ">=4.0.0,<18" @@ -1882,23 +1921,24 @@ waitress = {version = "<4", markers = "platform_system == \"Windows\""} [package.extras] aliyun-oss = ["aliyunstoreplugin"] databricks = ["azure-storage-file-datalake (>12)", "boto3 (>1)", "botocore", "google-cloud-storage (>=1.30.0)"] -extras = ["azureml-core (>=1.2.0)", "boto3", "botocore", "google-cloud-storage (>=1.30.0)", "kubernetes", "mlserver (>=1.2.0,!=1.3.1,<1.4.0)", "mlserver-mlflow (>=1.2.0,!=1.3.1,<1.4.0)", "prometheus-flask-exporter", "pyarrow", "pysftp", "requests-auth-aws-sigv4", "virtualenv"] +extras = ["azureml-core (>=1.2.0)", "boto3", "botocore", "google-cloud-storage (>=1.30.0)", "kubernetes", "prometheus-flask-exporter", "pyarrow", "pysftp", "requests-auth-aws-sigv4", "virtualenv"] gateway = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] genai = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] jfrog = ["mlflow-jfrog-plugin"] -langchain = ["langchain (>=0.1.0,<=0.2.15)"] +langchain = ["langchain (>=0.1.0,<=0.3.1)"] +mlserver = ["mlserver (>=1.2.0,!=1.3.1)", "mlserver-mlflow (>=1.2.0,!=1.3.1)"] sqlserver = ["mlflow-dbstore"] xethub = ["mlflow-xethub"] [[package]] name = "mlflow-skinny" -version = "2.16.0" +version = "2.17.0" description = "MLflow is an open source platform for the complete machine learning lifecycle" optional = false python-versions = ">=3.8" files = [ - {file = "mlflow_skinny-2.16.0-py3-none-any.whl", hash = "sha256:c55541f50efd0f6637377b10e8a654847a3fcd815b8680a95f02e0ca6bd7700c"}, - {file = "mlflow_skinny-2.16.0.tar.gz", hash = "sha256:9b823173063743783b4e7b6c52bdadcc7d9dab48eb883ac454c0d56609df6b2d"}, + {file = "mlflow_skinny-2.17.0-py3-none-any.whl", hash = "sha256:9eff7160f7459e09c01cc5bc2a68fdba7b64adbce069ef6d1013569830569048"}, + {file = "mlflow_skinny-2.17.0.tar.gz", hash = "sha256:bbb770368e68ffe783a76fa38854618c1411b44bda21eb8b770ca4cc28801299"}, ] [package.dependencies] @@ -1919,11 +1959,12 @@ sqlparse = ">=0.4.0,<1" [package.extras] aliyun-oss = ["aliyunstoreplugin"] databricks = ["azure-storage-file-datalake (>12)", "boto3 (>1)", "botocore", "google-cloud-storage (>=1.30.0)"] -extras = ["azureml-core (>=1.2.0)", "boto3", "botocore", "google-cloud-storage (>=1.30.0)", "kubernetes", "mlserver (>=1.2.0,!=1.3.1,<1.4.0)", "mlserver-mlflow (>=1.2.0,!=1.3.1,<1.4.0)", "prometheus-flask-exporter", "pyarrow", "pysftp", "requests-auth-aws-sigv4", "virtualenv"] +extras = ["azureml-core (>=1.2.0)", "boto3", "botocore", "google-cloud-storage (>=1.30.0)", "kubernetes", "prometheus-flask-exporter", "pyarrow", "pysftp", "requests-auth-aws-sigv4", "virtualenv"] gateway = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] genai = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] jfrog = ["mlflow-jfrog-plugin"] -langchain = ["langchain (>=0.1.0,<=0.2.15)"] +langchain = ["langchain (>=0.1.0,<=0.3.1)"] +mlserver = ["mlserver (>=1.2.0,!=1.3.1)", "mlserver-mlflow (>=1.2.0,!=1.3.1)"] sqlserver = ["mlflow-dbstore"] xethub = ["mlflow-xethub"] @@ -2002,49 +2043,50 @@ tests = ["pytest (>=4.6)"] [[package]] name = "networkx" -version = "3.1" +version = "3.2.1" description = "Python package for creating and manipulating graphs and networks" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, - {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, + {file = "networkx-3.2.1-py3-none-any.whl", hash = "sha256:f18c69adc97877c42332c170849c96cefa91881c99a7cb3e95b7c659ebdc1ec2"}, + {file = "networkx-3.2.1.tar.gz", hash = "sha256:9f1bb5cf3409bf324e0a722c20bdb4c20ee39bf1c30ce8ae499c8502b0b5e0c6"}, ] [package.extras] -default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] -developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] -doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] -extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] -test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] +default = ["matplotlib (>=3.5)", "numpy (>=1.22)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"] +developer = ["changelist (==0.4)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] +doc = ["nb2plots (>=0.7)", "nbconvert (<7.9)", "numpydoc (>=1.6)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.11)", "sympy (>=1.10)"] +test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] [[package]] name = "nibabel" -version = "5.2.1" +version = "5.3.1" description = "Access a multitude of neuroimaging data formats" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "nibabel-5.2.1-py3-none-any.whl", hash = "sha256:2cbbc22985f7f9d39d050df47249771dfb8d48447f5e7a993177e4cabfe047f0"}, - {file = "nibabel-5.2.1.tar.gz", hash = "sha256:b6c80b2e728e4bc2b65f1142d9b8d2287a9102a8bf8477e115ef0d8334559975"}, + {file = "nibabel-5.3.1-py3-none-any.whl", hash = "sha256:5c04c7139d41a59ef92839f1cabbe73061edd5787340bf2c9a34ed71f0db9d07"}, + {file = "nibabel-5.3.1.tar.gz", hash = "sha256:aec1b75dcf6bd9595a9196ff341b87957c69fb21bc5e38719463478dad83000a"}, ] [package.dependencies] -importlib-resources = {version = ">=1.3", markers = "python_version < \"3.9\""} -numpy = ">=1.20" -packaging = ">=17" +importlib-resources = {version = ">=5.12", markers = "python_version < \"3.12\""} +numpy = ">=1.22" +packaging = ">=20" +typing-extensions = {version = ">=4.6", markers = "python_version < \"3.13\""} [package.extras] -all = ["nibabel[dicomfs,minc2,spm,zstd]"] +all = ["h5py", "pillow", "pydicom (>=2.3)", "pyzstd (>=0.14.3)", "scipy"] dev = ["tox"] -dicom = ["pydicom (>=1.0.0)"] -dicomfs = ["nibabel[dicom]", "pillow"] -doc = ["matplotlib (>=1.5.3)", "numpydoc", "sphinx", "texext", "tomli"] +dicom = ["pydicom (>=2.3)"] +dicomfs = ["pillow", "pydicom (>=2.3)"] +doc = ["matplotlib (>=3.5)", "numpydoc", "sphinx", "texext", "tomli"] doctest = ["tox"] minc2 = ["h5py"] spm = ["scipy"] style = ["tox"] -test = ["pytest", "pytest-cov", "pytest-doctestplus", "pytest-httpserver", "pytest-xdist"] +test = ["coverage (>=7.2)", "pytest", "pytest-cov", "pytest-doctestplus", "pytest-httpserver", "pytest-xdist"] typing = ["tox"] zstd = ["pyzstd (>=0.14.3)"] @@ -2090,39 +2132,56 @@ files = [ [[package]] name = "numpy" -version = "1.24.4" +version = "2.0.2" description = "Fundamental package for array computing in Python" optional = false -python-versions = ">=3.8" -files = [ - {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"}, - {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"}, - {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"}, - {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"}, - {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"}, - {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"}, - {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"}, - {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"}, - {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"}, - {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"}, - {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"}, - {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"}, - {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"}, - {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"}, - {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"}, - {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"}, - {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"}, - {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"}, - {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"}, - {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"}, - {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"}, - {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"}, - {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"}, - {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"}, - {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"}, - {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"}, - {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"}, - {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"}, +python-versions = ">=3.9" +files = [ + {file = "numpy-2.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:51129a29dbe56f9ca83438b706e2e69a39892b5eda6cedcb6b0c9fdc9b0d3ece"}, + {file = "numpy-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f15975dfec0cf2239224d80e32c3170b1d168335eaedee69da84fbe9f1f9cd04"}, + {file = "numpy-2.0.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:8c5713284ce4e282544c68d1c3b2c7161d38c256d2eefc93c1d683cf47683e66"}, + {file = "numpy-2.0.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:becfae3ddd30736fe1889a37f1f580e245ba79a5855bff5f2a29cb3ccc22dd7b"}, + {file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2da5960c3cf0df7eafefd806d4e612c5e19358de82cb3c343631188991566ccd"}, + {file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:496f71341824ed9f3d2fd36cf3ac57ae2e0165c143b55c3a035ee219413f3318"}, + {file = "numpy-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a61ec659f68ae254e4d237816e33171497e978140353c0c2038d46e63282d0c8"}, + {file = "numpy-2.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d731a1c6116ba289c1e9ee714b08a8ff882944d4ad631fd411106a30f083c326"}, + {file = "numpy-2.0.2-cp310-cp310-win32.whl", hash = "sha256:984d96121c9f9616cd33fbd0618b7f08e0cfc9600a7ee1d6fd9b239186d19d97"}, + {file = "numpy-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:c7b0be4ef08607dd04da4092faee0b86607f111d5ae68036f16cc787e250a131"}, + {file = "numpy-2.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:49ca4decb342d66018b01932139c0961a8f9ddc7589611158cb3c27cbcf76448"}, + {file = "numpy-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:11a76c372d1d37437857280aa142086476136a8c0f373b2e648ab2c8f18fb195"}, + {file = "numpy-2.0.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:807ec44583fd708a21d4a11d94aedf2f4f3c3719035c76a2bbe1fe8e217bdc57"}, + {file = "numpy-2.0.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8cafab480740e22f8d833acefed5cc87ce276f4ece12fdaa2e8903db2f82897a"}, + {file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a15f476a45e6e5a3a79d8a14e62161d27ad897381fecfa4a09ed5322f2085669"}, + {file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13e689d772146140a252c3a28501da66dfecd77490b498b168b501835041f951"}, + {file = "numpy-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9ea91dfb7c3d1c56a0e55657c0afb38cf1eeae4544c208dc465c3c9f3a7c09f9"}, + {file = "numpy-2.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c1c9307701fec8f3f7a1e6711f9089c06e6284b3afbbcd259f7791282d660a15"}, + {file = "numpy-2.0.2-cp311-cp311-win32.whl", hash = "sha256:a392a68bd329eafac5817e5aefeb39038c48b671afd242710b451e76090e81f4"}, + {file = "numpy-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:286cd40ce2b7d652a6f22efdfc6d1edf879440e53e76a75955bc0c826c7e64dc"}, + {file = "numpy-2.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:df55d490dea7934f330006d0f81e8551ba6010a5bf035a249ef61a94f21c500b"}, + {file = "numpy-2.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8df823f570d9adf0978347d1f926b2a867d5608f434a7cff7f7908c6570dcf5e"}, + {file = "numpy-2.0.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:9a92ae5c14811e390f3767053ff54eaee3bf84576d99a2456391401323f4ec2c"}, + {file = "numpy-2.0.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:a842d573724391493a97a62ebbb8e731f8a5dcc5d285dfc99141ca15a3302d0c"}, + {file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05e238064fc0610c840d1cf6a13bf63d7e391717d247f1bf0318172e759e692"}, + {file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0123ffdaa88fa4ab64835dcbde75dcdf89c453c922f18dced6e27c90d1d0ec5a"}, + {file = "numpy-2.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:96a55f64139912d61de9137f11bf39a55ec8faec288c75a54f93dfd39f7eb40c"}, + {file = "numpy-2.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec9852fb39354b5a45a80bdab5ac02dd02b15f44b3804e9f00c556bf24b4bded"}, + {file = "numpy-2.0.2-cp312-cp312-win32.whl", hash = "sha256:671bec6496f83202ed2d3c8fdc486a8fc86942f2e69ff0e986140339a63bcbe5"}, + {file = "numpy-2.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:cfd41e13fdc257aa5778496b8caa5e856dc4896d4ccf01841daee1d96465467a"}, + {file = "numpy-2.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9059e10581ce4093f735ed23f3b9d283b9d517ff46009ddd485f1747eb22653c"}, + {file = "numpy-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:423e89b23490805d2a5a96fe40ec507407b8ee786d66f7328be214f9679df6dd"}, + {file = "numpy-2.0.2-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:2b2955fa6f11907cf7a70dab0d0755159bca87755e831e47932367fc8f2f2d0b"}, + {file = "numpy-2.0.2-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:97032a27bd9d8988b9a97a8c4d2c9f2c15a81f61e2f21404d7e8ef00cb5be729"}, + {file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e795a8be3ddbac43274f18588329c72939870a16cae810c2b73461c40718ab1"}, + {file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26b258c385842546006213344c50655ff1555a9338e2e5e02a0756dc3e803dd"}, + {file = "numpy-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fec9451a7789926bcf7c2b8d187292c9f93ea30284802a0ab3f5be8ab36865d"}, + {file = "numpy-2.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9189427407d88ff25ecf8f12469d4d39d35bee1db5d39fc5c168c6f088a6956d"}, + {file = "numpy-2.0.2-cp39-cp39-win32.whl", hash = "sha256:905d16e0c60200656500c95b6b8dca5d109e23cb24abc701d41c02d74c6b3afa"}, + {file = "numpy-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:a3f4ab0caa7f053f6797fcd4e1e25caee367db3112ef2b6ef82d749530768c73"}, + {file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7f0a0c6f12e07fa94133c8a67404322845220c06a9e80e85999afe727f7438b8"}, + {file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:312950fdd060354350ed123c0e25a71327d3711584beaef30cdaa93320c392d4"}, + {file = "numpy-2.0.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26df23238872200f63518dd2aa984cfca675d82469535dc7162dc2ee52d9dd5c"}, + {file = "numpy-2.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a46288ec55ebbd58947d31d72be2c63cbf839f0a63b49cb755022310792a3385"}, + {file = "numpy-2.0.2.tar.gz", hash = "sha256:883c987dee1880e2a864ab0dc9892292582510604156762362d9326444636e78"}, ] [[package]] @@ -2248,14 +2307,14 @@ files = [ [[package]] name = "nvidia-nvjitlink-cu12" -version = "12.6.68" +version = "12.6.77" description = "Nvidia JIT LTO Library" optional = false python-versions = ">=3" files = [ - {file = "nvidia_nvjitlink_cu12-12.6.68-py3-none-manylinux2014_aarch64.whl", hash = "sha256:b3fd0779845f68b92063ab1393abab1ed0a23412fc520df79a8190d098b5cd6b"}, - {file = "nvidia_nvjitlink_cu12-12.6.68-py3-none-manylinux2014_x86_64.whl", hash = "sha256:125a6c2a44e96386dda634e13d944e60b07a0402d391a070e8fb4104b34ea1ab"}, - {file = "nvidia_nvjitlink_cu12-12.6.68-py3-none-win_amd64.whl", hash = "sha256:a55744c98d70317c5e23db14866a8cc2b733f7324509e941fc96276f9f37801d"}, + {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:3bf10d85bb1801e9c894c6e197e44dd137d2a0a9e43f8450e9ad13f2df0dd52d"}, + {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:9ae346d16203ae4ea513be416495167a0101d33d2d14935aa9c1829a3fb45142"}, + {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-win_amd64.whl", hash = "sha256:410718cd44962bed862a31dd0318620f6f9a8b28a6291967bcfcb446a6516771"}, ] [[package]] @@ -2269,68 +2328,49 @@ files = [ {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"}, ] -[[package]] -name = "oauthlib" -version = "3.2.2" -description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" -optional = false -python-versions = ">=3.6" -files = [ - {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, - {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, -] - -[package.extras] -rsa = ["cryptography (>=3.0.0)"] -signals = ["blinker (>=1.4.0)"] -signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] - [[package]] name = "opentelemetry-api" -version = "1.27.0" +version = "1.16.0" description = "OpenTelemetry Python API" optional = false -python-versions = ">=3.8" +python-versions = ">=3.7" files = [ - {file = "opentelemetry_api-1.27.0-py3-none-any.whl", hash = "sha256:953d5871815e7c30c81b56d910c707588000fff7a3ca1c73e6531911d53065e7"}, - {file = "opentelemetry_api-1.27.0.tar.gz", hash = "sha256:ed673583eaa5f81b5ce5e86ef7cdaf622f88ef65f0b9aab40b843dcae5bef342"}, + {file = "opentelemetry_api-1.16.0-py3-none-any.whl", hash = "sha256:79e8f0cf88dbdd36b6abf175d2092af1efcaa2e71552d0d2b3b181a9707bf4bc"}, + {file = "opentelemetry_api-1.16.0.tar.gz", hash = "sha256:4b0e895a3b1f5e1908043ebe492d33e33f9ccdbe6d02d3994c2f8721a63ddddb"}, ] [package.dependencies] deprecated = ">=1.2.6" -importlib-metadata = ">=6.0,<=8.4.0" +setuptools = ">=16.0" [[package]] name = "opentelemetry-sdk" -version = "1.27.0" +version = "1.16.0" description = "OpenTelemetry Python SDK" optional = false -python-versions = ">=3.8" +python-versions = ">=3.7" files = [ - {file = "opentelemetry_sdk-1.27.0-py3-none-any.whl", hash = "sha256:365f5e32f920faf0fd9e14fdfd92c086e317eaa5f860edba9cdc17a380d9197d"}, - {file = "opentelemetry_sdk-1.27.0.tar.gz", hash = "sha256:d525017dea0ccce9ba4e0245100ec46ecdc043f2d7b8315d56b19aff0904fa6f"}, + {file = "opentelemetry_sdk-1.16.0-py3-none-any.whl", hash = "sha256:15f03915eec4839f885a5e6ed959cde59b8690c8c012d07c95b4b138c98dc43f"}, + {file = "opentelemetry_sdk-1.16.0.tar.gz", hash = "sha256:4d3bb91e9e209dbeea773b5565d901da4f76a29bf9dbc1c9500be3cabb239a4e"}, ] [package.dependencies] -opentelemetry-api = "1.27.0" -opentelemetry-semantic-conventions = "0.48b0" +opentelemetry-api = "1.16.0" +opentelemetry-semantic-conventions = "0.37b0" +setuptools = ">=16.0" typing-extensions = ">=3.7.4" [[package]] name = "opentelemetry-semantic-conventions" -version = "0.48b0" +version = "0.37b0" description = "OpenTelemetry Semantic Conventions" optional = false -python-versions = ">=3.8" +python-versions = ">=3.7" files = [ - {file = "opentelemetry_semantic_conventions-0.48b0-py3-none-any.whl", hash = "sha256:a0de9f45c413a8669788a38569c7e0a11ce6ce97861a628cca785deecdc32a1f"}, - {file = "opentelemetry_semantic_conventions-0.48b0.tar.gz", hash = "sha256:12d74983783b6878162208be57c9effcb89dc88691c64992d70bb89dc00daa1a"}, + {file = "opentelemetry_semantic_conventions-0.37b0-py3-none-any.whl", hash = "sha256:462982278a42dab01f68641cd89f8460fe1f93e87c68a012a76fb426dcdba5ee"}, + {file = "opentelemetry_semantic_conventions-0.37b0.tar.gz", hash = "sha256:087ce2e248e42f3ffe4d9fa2303111de72bb93baa06a0f4655980bc1557c4228"}, ] -[package.dependencies] -deprecated = ">=1.2.6" -opentelemetry-api = "1.27.0" - [[package]] name = "packaging" version = "24.1" @@ -2359,51 +2399,89 @@ lint = ["black"] [[package]] name = "pandas" -version = "1.5.3" +version = "2.2.3" description = "Powerful data structures for data analysis, time series, and statistics" optional = false -python-versions = ">=3.8" -files = [ - {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3749077d86e3a2f0ed51367f30bf5b82e131cc0f14260c4d3e499186fccc4406"}, - {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:972d8a45395f2a2d26733eb8d0f629b2f90bebe8e8eddbb8829b180c09639572"}, - {file = "pandas-1.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50869a35cbb0f2e0cd5ec04b191e7b12ed688874bd05dd777c19b28cbea90996"}, - {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3ac844a0fe00bfaeb2c9b51ab1424e5c8744f89860b138434a363b1f620f354"}, - {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0a56cef15fd1586726dace5616db75ebcfec9179a3a55e78f72c5639fa2a23"}, - {file = "pandas-1.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:478ff646ca42b20376e4ed3fa2e8d7341e8a63105586efe54fa2508ee087f328"}, - {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6973549c01ca91ec96199e940495219c887ea815b2083722821f1d7abfa2b4dc"}, - {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c39a8da13cede5adcd3be1182883aea1c925476f4e84b2807a46e2775306305d"}, - {file = "pandas-1.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f76d097d12c82a535fda9dfe5e8dd4127952b45fea9b0276cb30cca5ea313fbc"}, - {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e474390e60ed609cec869b0da796ad94f420bb057d86784191eefc62b65819ae"}, - {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f2b952406a1588ad4cad5b3f55f520e82e902388a6d5a4a91baa8d38d23c7f6"}, - {file = "pandas-1.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:bc4c368f42b551bf72fac35c5128963a171b40dce866fb066540eeaf46faa003"}, - {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:14e45300521902689a81f3f41386dc86f19b8ba8dd5ac5a3c7010ef8d2932813"}, - {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9842b6f4b8479e41968eced654487258ed81df7d1c9b7b870ceea24ed9459b31"}, - {file = "pandas-1.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:26d9c71772c7afb9d5046e6e9cf42d83dd147b5cf5bcb9d97252077118543792"}, - {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fbcb19d6fceb9e946b3e23258757c7b225ba450990d9ed63ccceeb8cae609f7"}, - {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:565fa34a5434d38e9d250af3c12ff931abaf88050551d9fbcdfafca50d62babf"}, - {file = "pandas-1.5.3-cp38-cp38-win32.whl", hash = "sha256:87bd9c03da1ac870a6d2c8902a0e1fd4267ca00f13bc494c9e5a9020920e1d51"}, - {file = "pandas-1.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:41179ce559943d83a9b4bbacb736b04c928b095b5f25dd2b7389eda08f46f373"}, - {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c74a62747864ed568f5a82a49a23a8d7fe171d0c69038b38cedf0976831296fa"}, - {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c4c00e0b0597c8e4f59e8d461f797e5d70b4d025880516a8261b2817c47759ee"}, - {file = "pandas-1.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a50d9a4336a9621cab7b8eb3fb11adb82de58f9b91d84c2cd526576b881a0c5a"}, - {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd05f7783b3274aa206a1af06f0ceed3f9b412cf665b7247eacd83be41cf7bf0"}, - {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f69c4029613de47816b1bb30ff5ac778686688751a5e9c99ad8c7031f6508e5"}, - {file = "pandas-1.5.3-cp39-cp39-win32.whl", hash = "sha256:7cec0bee9f294e5de5bbfc14d0573f65526071029d036b753ee6507d2a21480a"}, - {file = "pandas-1.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:dfd681c5dc216037e0b0a2c821f5ed99ba9f03ebcf119c7dac0e9a7b960b9ec9"}, - {file = "pandas-1.5.3.tar.gz", hash = "sha256:74a3fd7e5a7ec052f183273dc7b0acd3a863edf7520f5d3a1765c04ffdb3b0b1"}, +python-versions = ">=3.9" +files = [ + {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, + {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"}, + {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"}, + {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"}, + {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"}, + {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"}, + {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"}, + {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"}, ] [package.dependencies] numpy = [ - {version = ">=1.20.3", markers = "python_version < \"3.10\""}, - {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, - {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, + {version = ">=1.22.4", markers = "python_version < \"3.11\""}, + {version = ">=1.23.2", markers = "python_version == \"3.11\""}, + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, ] -python-dateutil = ">=2.8.1" +python-dateutil = ">=2.8.2" pytz = ">=2020.1" +tzdata = ">=2022.7" [package.extras] -test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"] +all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] +aws = ["s3fs (>=2022.11.0)"] +clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] +compression = ["zstandard (>=0.19.0)"] +computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] +consortium-standard = ["dataframe-api-compat (>=0.1.7)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] +feather = ["pyarrow (>=10.0.1)"] +fss = ["fsspec (>=2022.11.0)"] +gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] +hdf5 = ["tables (>=3.8.0)"] +html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] +mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] +parquet = ["pyarrow (>=10.0.1)"] +performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] +plot = ["matplotlib (>=3.6.3)"] +postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] +pyarrow = ["pyarrow (>=10.0.1)"] +spss = ["pyreadstat (>=1.2.0)"] +sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] +test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.9.2)"] [[package]] name = "pathspec" @@ -2428,95 +2506,90 @@ files = [ [[package]] name = "pillow" -version = "10.4.0" +version = "11.0.0" description = "Python Imaging Library (Fork)" optional = false -python-versions = ">=3.8" -files = [ - {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, - {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"}, - {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"}, - {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"}, - {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"}, - {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"}, - {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"}, - {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"}, - {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"}, - {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"}, - {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"}, - {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"}, - {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"}, - {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"}, - {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"}, - {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"}, - {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"}, - {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"}, - {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"}, - {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"}, - {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"}, - {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"}, - {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"}, - {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"}, - {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"}, - {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"}, - {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"}, - {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"}, - {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"}, - {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"}, - {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"}, - {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"}, - {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"}, - {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"}, - {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"}, - {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"}, - {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"}, - {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"}, - {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"}, - {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"}, - {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"}, - {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"}, +python-versions = ">=3.9" +files = [ + {file = "pillow-11.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:6619654954dc4936fcff82db8eb6401d3159ec6be81e33c6000dfd76ae189947"}, + {file = "pillow-11.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b3c5ac4bed7519088103d9450a1107f76308ecf91d6dabc8a33a2fcfb18d0fba"}, + {file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a65149d8ada1055029fcb665452b2814fe7d7082fcb0c5bed6db851cb69b2086"}, + {file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88a58d8ac0cc0e7f3a014509f0455248a76629ca9b604eca7dc5927cc593c5e9"}, + {file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:c26845094b1af3c91852745ae78e3ea47abf3dbcd1cf962f16b9a5fbe3ee8488"}, + {file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:1a61b54f87ab5786b8479f81c4b11f4d61702830354520837f8cc791ebba0f5f"}, + {file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:674629ff60030d144b7bca2b8330225a9b11c482ed408813924619c6f302fdbb"}, + {file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:598b4e238f13276e0008299bd2482003f48158e2b11826862b1eb2ad7c768b97"}, + {file = "pillow-11.0.0-cp310-cp310-win32.whl", hash = "sha256:9a0f748eaa434a41fccf8e1ee7a3eed68af1b690e75328fd7a60af123c193b50"}, + {file = "pillow-11.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:a5629742881bcbc1f42e840af185fd4d83a5edeb96475a575f4da50d6ede337c"}, + {file = "pillow-11.0.0-cp310-cp310-win_arm64.whl", hash = "sha256:ee217c198f2e41f184f3869f3e485557296d505b5195c513b2bfe0062dc537f1"}, + {file = "pillow-11.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1c1d72714f429a521d8d2d018badc42414c3077eb187a59579f28e4270b4b0fc"}, + {file = "pillow-11.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:499c3a1b0d6fc8213519e193796eb1a86a1be4b1877d678b30f83fd979811d1a"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8b2351c85d855293a299038e1f89db92a2f35e8d2f783489c6f0b2b5f3fe8a3"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f4dba50cfa56f910241eb7f883c20f1e7b1d8f7d91c750cd0b318bad443f4d5"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:5ddbfd761ee00c12ee1be86c9c0683ecf5bb14c9772ddbd782085779a63dd55b"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:45c566eb10b8967d71bf1ab8e4a525e5a93519e29ea071459ce517f6b903d7fa"}, + {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b4fd7bd29610a83a8c9b564d457cf5bd92b4e11e79a4ee4716a63c959699b306"}, + {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cb929ca942d0ec4fac404cbf520ee6cac37bf35be479b970c4ffadf2b6a1cad9"}, + {file = "pillow-11.0.0-cp311-cp311-win32.whl", hash = "sha256:006bcdd307cc47ba43e924099a038cbf9591062e6c50e570819743f5607404f5"}, + {file = "pillow-11.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:52a2d8323a465f84faaba5236567d212c3668f2ab53e1c74c15583cf507a0291"}, + {file = "pillow-11.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:16095692a253047fe3ec028e951fa4221a1f3ed3d80c397e83541a3037ff67c9"}, + {file = "pillow-11.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2c0a187a92a1cb5ef2c8ed5412dd8d4334272617f532d4ad4de31e0495bd923"}, + {file = "pillow-11.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:084a07ef0821cfe4858fe86652fffac8e187b6ae677e9906e192aafcc1b69903"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8069c5179902dcdce0be9bfc8235347fdbac249d23bd90514b7a47a72d9fecf4"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f02541ef64077f22bf4924f225c0fd1248c168f86e4b7abdedd87d6ebaceab0f"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:fcb4621042ac4b7865c179bb972ed0da0218a076dc1820ffc48b1d74c1e37fe9"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:00177a63030d612148e659b55ba99527803288cea7c75fb05766ab7981a8c1b7"}, + {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8853a3bf12afddfdf15f57c4b02d7ded92c7a75a5d7331d19f4f9572a89c17e6"}, + {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3107c66e43bda25359d5ef446f59c497de2b5ed4c7fdba0894f8d6cf3822dafc"}, + {file = "pillow-11.0.0-cp312-cp312-win32.whl", hash = "sha256:86510e3f5eca0ab87429dd77fafc04693195eec7fd6a137c389c3eeb4cfb77c6"}, + {file = "pillow-11.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:8ec4a89295cd6cd4d1058a5e6aec6bf51e0eaaf9714774e1bfac7cfc9051db47"}, + {file = "pillow-11.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:27a7860107500d813fcd203b4ea19b04babe79448268403172782754870dac25"}, + {file = "pillow-11.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcd1fb5bb7b07f64c15618c89efcc2cfa3e95f0e3bcdbaf4642509de1942a699"}, + {file = "pillow-11.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0e038b0745997c7dcaae350d35859c9715c71e92ffb7e0f4a8e8a16732150f38"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ae08bd8ffc41aebf578c2af2f9d8749d91f448b3bfd41d7d9ff573d74f2a6b2"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d69bfd8ec3219ae71bcde1f942b728903cad25fafe3100ba2258b973bd2bc1b2"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:61b887f9ddba63ddf62fd02a3ba7add935d053b6dd7d58998c630e6dbade8527"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:c6a660307ca9d4867caa8d9ca2c2658ab685de83792d1876274991adec7b93fa"}, + {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:73e3a0200cdda995c7e43dd47436c1548f87a30bb27fb871f352a22ab8dcf45f"}, + {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fba162b8872d30fea8c52b258a542c5dfd7b235fb5cb352240c8d63b414013eb"}, + {file = "pillow-11.0.0-cp313-cp313-win32.whl", hash = "sha256:f1b82c27e89fffc6da125d5eb0ca6e68017faf5efc078128cfaa42cf5cb38798"}, + {file = "pillow-11.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:8ba470552b48e5835f1d23ecb936bb7f71d206f9dfeee64245f30c3270b994de"}, + {file = "pillow-11.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:846e193e103b41e984ac921b335df59195356ce3f71dcfd155aa79c603873b84"}, + {file = "pillow-11.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4ad70c4214f67d7466bea6a08061eba35c01b1b89eaa098040a35272a8efb22b"}, + {file = "pillow-11.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:6ec0d5af64f2e3d64a165f490d96368bb5dea8b8f9ad04487f9ab60dc4bb6003"}, + {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c809a70e43c7977c4a42aefd62f0131823ebf7dd73556fa5d5950f5b354087e2"}, + {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:4b60c9520f7207aaf2e1d94de026682fc227806c6e1f55bba7606d1c94dd623a"}, + {file = "pillow-11.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1e2688958a840c822279fda0086fec1fdab2f95bf2b717b66871c4ad9859d7e8"}, + {file = "pillow-11.0.0-cp313-cp313t-win32.whl", hash = "sha256:607bbe123c74e272e381a8d1957083a9463401f7bd01287f50521ecb05a313f8"}, + {file = "pillow-11.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c39ed17edea3bc69c743a8dd3e9853b7509625c2462532e62baa0732163a904"}, + {file = "pillow-11.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:75acbbeb05b86bc53cbe7b7e6fe00fbcf82ad7c684b3ad82e3d711da9ba287d3"}, + {file = "pillow-11.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2e46773dc9f35a1dd28bd6981332fd7f27bec001a918a72a79b4133cf5291dba"}, + {file = "pillow-11.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2679d2258b7f1192b378e2893a8a0a0ca472234d4c2c0e6bdd3380e8dfa21b6a"}, + {file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eda2616eb2313cbb3eebbe51f19362eb434b18e3bb599466a1ffa76a033fb916"}, + {file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ec184af98a121fb2da42642dea8a29ec80fc3efbaefb86d8fdd2606619045d"}, + {file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:8594f42df584e5b4bb9281799698403f7af489fba84c34d53d1c4bfb71b7c4e7"}, + {file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:c12b5ae868897c7338519c03049a806af85b9b8c237b7d675b8c5e089e4a618e"}, + {file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:70fbbdacd1d271b77b7721fe3cdd2d537bbbd75d29e6300c672ec6bb38d9672f"}, + {file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5178952973e588b3f1360868847334e9e3bf49d19e169bbbdfaf8398002419ae"}, + {file = "pillow-11.0.0-cp39-cp39-win32.whl", hash = "sha256:8c676b587da5673d3c75bd67dd2a8cdfeb282ca38a30f37950511766b26858c4"}, + {file = "pillow-11.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:94f3e1780abb45062287b4614a5bc0874519c86a777d4a7ad34978e86428b8dd"}, + {file = "pillow-11.0.0-cp39-cp39-win_arm64.whl", hash = "sha256:290f2cc809f9da7d6d622550bbf4c1e57518212da51b6a30fe8e0a270a5b78bd"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1187739620f2b365de756ce086fdb3604573337cc28a0d3ac4a01ab6b2d2a6d2"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fbbcb7b57dc9c794843e3d1258c0fbf0f48656d46ffe9e09b63bbd6e8cd5d0a2"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d203af30149ae339ad1b4f710d9844ed8796e97fda23ffbc4cc472968a47d0b"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21a0d3b115009ebb8ac3d2ebec5c2982cc693da935f4ab7bb5c8ebe2f47d36f2"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:73853108f56df97baf2bb8b522f3578221e56f646ba345a372c78326710d3830"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e58876c91f97b0952eb766123bfef372792ab3f4e3e1f1a2267834c2ab131734"}, + {file = "pillow-11.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:224aaa38177597bb179f3ec87eeefcce8e4f85e608025e9cfac60de237ba6316"}, + {file = "pillow-11.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5bd2d3bdb846d757055910f0a59792d33b555800813c3b39ada1829c372ccb06"}, + {file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:375b8dd15a1f5d2feafff536d47e22f69625c1aa92f12b339ec0b2ca40263273"}, + {file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:daffdf51ee5db69a82dd127eabecce20729e21f7a3680cf7cbb23f0829189790"}, + {file = "pillow-11.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7326a1787e3c7b0429659e0a944725e1b03eeaa10edd945a86dead1913383944"}, + {file = "pillow-11.0.0.tar.gz", hash = "sha256:72bacbaf24ac003fea9bff9837d1eedb6088758d41e100c1552930151f677739"}, ] [package.extras] -docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +docs = ["furo", "olefile", "sphinx (>=8.1)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] fpx = ["olefile"] mic = ["olefile"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] @@ -2525,19 +2598,19 @@ xmp = ["defusedxml"] [[package]] name = "platformdirs" -version = "4.2.2" +version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, - {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] -type = ["mypy (>=1.8)"] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] [[package]] name = "pluggy" @@ -2556,13 +2629,13 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "pre-commit" -version = "3.5.0" +version = "4.0.1" description = "A framework for managing and maintaining multi-language pre-commit hooks." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "pre_commit-3.5.0-py2.py3-none-any.whl", hash = "sha256:841dc9aef25daba9a0238cd27984041fa0467b4199fc4852e27950664919f660"}, - {file = "pre_commit-3.5.0.tar.gz", hash = "sha256:5804465c675b659b0862f07907f96295d490822a450c4c40e747d0b1c6ebcb32"}, + {file = "pre_commit-4.0.1-py2.py3-none-any.whl", hash = "sha256:efde913840816312445dc98787724647c65473daefe420785f885e8ed9a06878"}, + {file = "pre_commit-4.0.1.tar.gz", hash = "sha256:80905ac375958c0444c65e9cebebd948b3cdb518f335a091a670a89d652139d2"}, ] [package.dependencies] @@ -2574,13 +2647,13 @@ virtualenv = ">=20.10.0" [[package]] name = "prometheus-client" -version = "0.20.0" +version = "0.21.0" description = "Python client for the Prometheus monitoring system." optional = false python-versions = ">=3.8" files = [ - {file = "prometheus_client-0.20.0-py3-none-any.whl", hash = "sha256:cde524a85bce83ca359cc837f28b8c0db5cac7aa653a588fd7e84ba061c329e7"}, - {file = "prometheus_client-0.20.0.tar.gz", hash = "sha256:287629d00b147a32dcb2be0b9df905da599b2d82f80377083ec8463309a4bb89"}, + {file = "prometheus_client-0.21.0-py3-none-any.whl", hash = "sha256:4fa6b4dd0ac16d58bb587c04b1caae65b8c5043e85f778f42f5f632f6af2e166"}, + {file = "prometheus_client-0.21.0.tar.gz", hash = "sha256:96c83c606b71ff2b0a433c98889d275f51ffec6c5e267de37c7a2b5c9aa9233e"}, ] [package.extras] @@ -2602,22 +2675,22 @@ wcwidth = "*" [[package]] name = "protobuf" -version = "4.25.4" +version = "4.25.5" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-4.25.4-cp310-abi3-win32.whl", hash = "sha256:db9fd45183e1a67722cafa5c1da3e85c6492a5383f127c86c4c4aa4845867dc4"}, - {file = "protobuf-4.25.4-cp310-abi3-win_amd64.whl", hash = "sha256:ba3d8504116a921af46499471c63a85260c1a5fc23333154a427a310e015d26d"}, - {file = "protobuf-4.25.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:eecd41bfc0e4b1bd3fa7909ed93dd14dd5567b98c941d6c1ad08fdcab3d6884b"}, - {file = "protobuf-4.25.4-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:4c8a70fdcb995dcf6c8966cfa3a29101916f7225e9afe3ced4395359955d3835"}, - {file = "protobuf-4.25.4-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:3319e073562e2515c6ddc643eb92ce20809f5d8f10fead3332f71c63be6a7040"}, - {file = "protobuf-4.25.4-cp38-cp38-win32.whl", hash = "sha256:7e372cbbda66a63ebca18f8ffaa6948455dfecc4e9c1029312f6c2edcd86c4e1"}, - {file = "protobuf-4.25.4-cp38-cp38-win_amd64.whl", hash = "sha256:051e97ce9fa6067a4546e75cb14f90cf0232dcb3e3d508c448b8d0e4265b61c1"}, - {file = "protobuf-4.25.4-cp39-cp39-win32.whl", hash = "sha256:90bf6fd378494eb698805bbbe7afe6c5d12c8e17fca817a646cd6a1818c696ca"}, - {file = "protobuf-4.25.4-cp39-cp39-win_amd64.whl", hash = "sha256:ac79a48d6b99dfed2729ccccee547b34a1d3d63289c71cef056653a846a2240f"}, - {file = "protobuf-4.25.4-py3-none-any.whl", hash = "sha256:bfbebc1c8e4793cfd58589acfb8a1026be0003e852b9da7db5a4285bde996978"}, - {file = "protobuf-4.25.4.tar.gz", hash = "sha256:0dc4a62cc4052a036ee2204d26fe4d835c62827c855c8a03f29fe6da146b380d"}, + {file = "protobuf-4.25.5-cp310-abi3-win32.whl", hash = "sha256:5e61fd921603f58d2f5acb2806a929b4675f8874ff5f330b7d6f7e2e784bbcd8"}, + {file = "protobuf-4.25.5-cp310-abi3-win_amd64.whl", hash = "sha256:4be0571adcbe712b282a330c6e89eae24281344429ae95c6d85e79e84780f5ea"}, + {file = "protobuf-4.25.5-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:b2fde3d805354df675ea4c7c6338c1aecd254dfc9925e88c6d31a2bcb97eb173"}, + {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:919ad92d9b0310070f8356c24b855c98df2b8bd207ebc1c0c6fcc9ab1e007f3d"}, + {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:fe14e16c22be926d3abfcb500e60cab068baf10b542b8c858fa27e098123e331"}, + {file = "protobuf-4.25.5-cp38-cp38-win32.whl", hash = "sha256:98d8d8aa50de6a2747efd9cceba361c9034050ecce3e09136f90de37ddba66e1"}, + {file = "protobuf-4.25.5-cp38-cp38-win_amd64.whl", hash = "sha256:b0234dd5a03049e4ddd94b93400b67803c823cfc405689688f59b34e0742381a"}, + {file = "protobuf-4.25.5-cp39-cp39-win32.whl", hash = "sha256:abe32aad8561aa7cc94fc7ba4fdef646e576983edb94a73381b03c53728a626f"}, + {file = "protobuf-4.25.5-cp39-cp39-win_amd64.whl", hash = "sha256:7a183f592dc80aa7c8da7ad9e55091c4ffc9497b3054452d629bb85fa27c2a45"}, + {file = "protobuf-4.25.5-py3-none-any.whl", hash = "sha256:0aebecb809cae990f8129ada5ca273d9d670b76d9bfc9b1809f0a9c02b7dbf41"}, + {file = "protobuf-4.25.5.tar.gz", hash = "sha256:7f8249476b4a9473645db7f8ab42b02fe1488cbe5fb72fddd445e0665afd8584"}, ] [[package]] @@ -2713,24 +2786,24 @@ test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"] [[package]] name = "pyasn1" -version = "0.6.0" +version = "0.6.1" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = false python-versions = ">=3.8" files = [ - {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"}, - {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"}, + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, ] [[package]] name = "pyasn1-modules" -version = "0.4.0" +version = "0.4.1" description = "A collection of ASN.1-based protocols modules" optional = false python-versions = ">=3.8" files = [ - {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"}, - {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"}, + {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, + {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, ] [package.dependencies] @@ -2738,119 +2811,120 @@ pyasn1 = ">=0.4.6,<0.7.0" [[package]] name = "pydantic" -version = "2.8.2" +version = "2.9.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, - {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, + {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"}, + {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"}, ] [package.dependencies] -annotated-types = ">=0.4.0" -pydantic-core = "2.20.1" +annotated-types = ">=0.6.0" +pydantic-core = "2.23.4" typing-extensions = {version = ">=4.6.1", markers = "python_version < \"3.13\""} [package.extras] email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.20.1" +version = "2.23.4" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, - {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, - {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, - {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, - {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, - {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, - {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, - {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, - {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, - {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, - {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, - {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, - {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, - {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"}, + {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"}, + {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"}, + {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"}, + {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"}, + {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"}, + {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"}, + {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"}, + {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"}, + {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"}, + {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"}, + {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"}, + {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"}, + {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"}, ] [package.dependencies] @@ -2872,13 +2946,13 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pymdown-extensions" -version = "10.9" +version = "10.11.2" description = "Extension pack for Python Markdown." optional = false python-versions = ">=3.8" files = [ - {file = "pymdown_extensions-10.9-py3-none-any.whl", hash = "sha256:d323f7e90d83c86113ee78f3fe62fc9dee5f56b54d912660703ea1816fed5626"}, - {file = "pymdown_extensions-10.9.tar.gz", hash = "sha256:6ff740bcd99ec4172a938970d42b96128bdc9d4b9bcad72494f29921dc69b753"}, + {file = "pymdown_extensions-10.11.2-py3-none-any.whl", hash = "sha256:41cdde0a77290e480cf53892f5c5e50921a7ee3e5cd60ba91bf19837b33badcf"}, + {file = "pymdown_extensions-10.11.2.tar.gz", hash = "sha256:bc8847ecc9e784a098efd35e20cba772bc5a1b529dfcef9dc1972db9021a1049"}, ] [package.dependencies] @@ -2901,13 +2975,13 @@ files = [ [[package]] name = "pyparsing" -version = "3.1.4" +version = "3.2.0" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = false -python-versions = ">=3.6.8" +python-versions = ">=3.9" files = [ - {file = "pyparsing-3.1.4-py3-none-any.whl", hash = "sha256:a6a7ee4235a3f944aa1fa2249307708f893fe5717dc603503c6c7969c070fb7c"}, - {file = "pyparsing-3.1.4.tar.gz", hash = "sha256:f86ec8d1a83f11977c9a6ea7598e8c27fc5cddfa5b07ea2241edbbde1d7bc032"}, + {file = "pyparsing-3.2.0-py3-none-any.whl", hash = "sha256:93d9577b88da0bbea8cc8334ee8b918ed014968fd2ec383e868fb8afb1ccef84"}, + {file = "pyparsing-3.2.0.tar.gz", hash = "sha256:cbf74e27246d595d9a74b186b810f6fbb86726dbf3b9532efb343f6d7294fe9c"}, ] [package.extras] @@ -2915,13 +2989,13 @@ diagrams = ["jinja2", "railroad-diagrams"] [[package]] name = "pytest" -version = "8.3.2" +version = "8.3.3" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, - {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, + {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, + {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, ] [package.dependencies] @@ -3003,73 +3077,40 @@ six = ">=1.5" [[package]] name = "pytz" -version = "2024.1" +version = "2024.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" files = [ - {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, - {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, -] - -[[package]] -name = "pywavelets" -version = "1.4.1" -description = "PyWavelets, wavelet transform module" -optional = false -python-versions = ">=3.8" -files = [ - {file = "PyWavelets-1.4.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:d854411eb5ee9cb4bc5d0e66e3634aeb8f594210f6a1bed96dbed57ec70f181c"}, - {file = "PyWavelets-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:231b0e0b1cdc1112f4af3c24eea7bf181c418d37922a67670e9bf6cfa2d544d4"}, - {file = "PyWavelets-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:754fa5085768227c4f4a26c1e0c78bc509a266d9ebd0eb69a278be7e3ece943c"}, - {file = "PyWavelets-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da7b9c006171be1f9ddb12cc6e0d3d703b95f7f43cb5e2c6f5f15d3233fcf202"}, - {file = "PyWavelets-1.4.1-cp310-cp310-win32.whl", hash = "sha256:67a0d28a08909f21400cb09ff62ba94c064882ffd9e3a6b27880a111211d59bd"}, - {file = "PyWavelets-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:91d3d393cffa634f0e550d88c0e3f217c96cfb9e32781f2960876f1808d9b45b"}, - {file = "PyWavelets-1.4.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:64c6bac6204327321db30b775060fbe8e8642316e6bff17f06b9f34936f88875"}, - {file = "PyWavelets-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f19327f2129fb7977bc59b966b4974dfd72879c093e44a7287500a7032695de"}, - {file = "PyWavelets-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad987748f60418d5f4138db89d82ba0cb49b086e0cbb8fd5c3ed4a814cfb705e"}, - {file = "PyWavelets-1.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:875d4d620eee655346e3589a16a73790cf9f8917abba062234439b594e706784"}, - {file = "PyWavelets-1.4.1-cp311-cp311-win32.whl", hash = "sha256:7231461d7a8eb3bdc7aa2d97d9f67ea5a9f8902522818e7e2ead9c2b3408eeb1"}, - {file = "PyWavelets-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:daf0aa79842b571308d7c31a9c43bc99a30b6328e6aea3f50388cd8f69ba7dbc"}, - {file = "PyWavelets-1.4.1-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:ab7da0a17822cd2f6545626946d3b82d1a8e106afc4b50e3387719ba01c7b966"}, - {file = "PyWavelets-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:578af438a02a86b70f1975b546f68aaaf38f28fb082a61ceb799816049ed18aa"}, - {file = "PyWavelets-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb5ca8d11d3f98e89e65796a2125be98424d22e5ada360a0dbabff659fca0fc"}, - {file = "PyWavelets-1.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:058b46434eac4c04dd89aeef6fa39e4b6496a951d78c500b6641fd5b2cc2f9f4"}, - {file = "PyWavelets-1.4.1-cp38-cp38-win32.whl", hash = "sha256:de7cd61a88a982edfec01ea755b0740e94766e00a1ceceeafef3ed4c85c605cd"}, - {file = "PyWavelets-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:7ab8d9db0fe549ab2ee0bea61f614e658dd2df419d5b75fba47baa761e95f8f2"}, - {file = "PyWavelets-1.4.1-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:23bafd60350b2b868076d976bdd92f950b3944f119b4754b1d7ff22b7acbf6c6"}, - {file = "PyWavelets-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d0e56cd7a53aed3cceca91a04d62feb3a0aca6725b1912d29546c26f6ea90426"}, - {file = "PyWavelets-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:030670a213ee8fefa56f6387b0c8e7d970c7f7ad6850dc048bd7c89364771b9b"}, - {file = "PyWavelets-1.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71ab30f51ee4470741bb55fc6b197b4a2b612232e30f6ac069106f0156342356"}, - {file = "PyWavelets-1.4.1-cp39-cp39-win32.whl", hash = "sha256:47cac4fa25bed76a45bc781a293c26ac63e8eaae9eb8f9be961758d22b58649c"}, - {file = "PyWavelets-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:88aa5449e109d8f5e7f0adef85f7f73b1ab086102865be64421a3a3d02d277f4"}, - {file = "PyWavelets-1.4.1.tar.gz", hash = "sha256:6437af3ddf083118c26d8f97ab43b0724b956c9f958e9ea788659f6a2834ba93"}, + {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, + {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, ] -[package.dependencies] -numpy = ">=1.17.3" - [[package]] name = "pywin32" -version = "306" +version = "308" description = "Python for Window Extensions" optional = false python-versions = "*" files = [ - {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, - {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, - {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, - {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, - {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, - {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, - {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, - {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, - {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, - {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, - {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, - {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, - {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, - {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, + {file = "pywin32-308-cp310-cp310-win32.whl", hash = "sha256:796ff4426437896550d2981b9c2ac0ffd75238ad9ea2d3bfa67a1abd546d262e"}, + {file = "pywin32-308-cp310-cp310-win_amd64.whl", hash = "sha256:4fc888c59b3c0bef905ce7eb7e2106a07712015ea1c8234b703a088d46110e8e"}, + {file = "pywin32-308-cp310-cp310-win_arm64.whl", hash = "sha256:a5ab5381813b40f264fa3495b98af850098f814a25a63589a8e9eb12560f450c"}, + {file = "pywin32-308-cp311-cp311-win32.whl", hash = "sha256:5d8c8015b24a7d6855b1550d8e660d8daa09983c80e5daf89a273e5c6fb5095a"}, + {file = "pywin32-308-cp311-cp311-win_amd64.whl", hash = "sha256:575621b90f0dc2695fec346b2d6302faebd4f0f45c05ea29404cefe35d89442b"}, + {file = "pywin32-308-cp311-cp311-win_arm64.whl", hash = "sha256:100a5442b7332070983c4cd03f2e906a5648a5104b8a7f50175f7906efd16bb6"}, + {file = "pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897"}, + {file = "pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47"}, + {file = "pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091"}, + {file = "pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed"}, + {file = "pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4"}, + {file = "pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd"}, + {file = "pywin32-308-cp37-cp37m-win32.whl", hash = "sha256:1f696ab352a2ddd63bd07430080dd598e6369152ea13a25ebcdd2f503a38f1ff"}, + {file = "pywin32-308-cp37-cp37m-win_amd64.whl", hash = "sha256:13dcb914ed4347019fbec6697a01a0aec61019c1046c2b905410d197856326a6"}, + {file = "pywin32-308-cp38-cp38-win32.whl", hash = "sha256:5794e764ebcabf4ff08c555b31bd348c9025929371763b2183172ff4708152f0"}, + {file = "pywin32-308-cp38-cp38-win_amd64.whl", hash = "sha256:3b92622e29d651c6b783e368ba7d6722b1634b8e70bd376fd7610fe1992e19de"}, + {file = "pywin32-308-cp39-cp39-win32.whl", hash = "sha256:7873ca4dc60ab3287919881a7d4f88baee4a6e639aa6962de25a98ba6b193341"}, + {file = "pywin32-308-cp39-cp39-win_amd64.whl", hash = "sha256:71b3322d949b4cc20776436a9c9ba0eeedcbc9c650daa536df63f0ff111bb920"}, ] [[package]] @@ -3164,210 +3205,205 @@ prompt_toolkit = ">=2.0,<=3.0.36" [[package]] name = "rapidfuzz" -version = "3.9.7" +version = "3.10.0" description = "rapid fuzzy string matching" optional = false -python-versions = ">=3.8" -files = [ - {file = "rapidfuzz-3.9.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ccf68e30b80e903f2309f90a438dbd640dd98e878eeb5ad361a288051ee5b75c"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:696a79018ef989bf1c9abd9005841cee18005ccad4748bad8a4c274c47b6241a"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4eebf6c93af0ae866c22b403a84747580bb5c10f0d7b51c82a87f25405d4dcb"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e9125377fa3d21a8abd4fbdbcf1c27be73e8b1850f0b61b5b711364bf3b59db"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c12d180b17a22d107c8747de9c68d0b9c1d15dcda5445ff9bf9f4ccfb67c3e16"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1318d42610c26dcd68bd3279a1bf9e3605377260867c9a8ed22eafc1bd93a7c"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd5fa6e3c6e0333051c1f3a49f0807b3366f4131c8d6ac8c3e05fd0d0ce3755c"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:fcf79b686962d7bec458a0babc904cb4fa319808805e036b9d5a531ee6b9b835"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:8b01153c7466d0bad48fba77a303d5a768e66f24b763853469f47220b3de4661"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:94baaeea0b4f8632a6da69348b1e741043eba18d4e3088d674d3f76586b6223d"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6c5b32875646cb7f60c193ade99b2e4b124f19583492115293cd00f6fb198b17"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:110b6294396bc0a447648627479c9320f095c2034c0537f687592e0f58622638"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-win32.whl", hash = "sha256:3445a35c4c8d288f2b2011eb61bce1227c633ce85a3154e727170f37c0266bb2"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-win_amd64.whl", hash = "sha256:0d1415a732ee75e74a90af12020b77a0b396b36c60afae1bde3208a78cd2c9fc"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-win_arm64.whl", hash = "sha256:836f4d88b8bd0fff2ebe815dcaab8aa6c8d07d1d566a7e21dd137cf6fe11ed5b"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d098ce6162eb5e48fceb0745455bc950af059df6113eec83e916c129fca11408"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:048d55d36c02c6685a2b2741688503c3d15149694506655b6169dcfd3b6c2585"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c33211cfff9aec425bb1bfedaf94afcf337063aa273754f22779d6dadebef4c2"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6d9db2fa4e9be171e9bb31cf2d2575574774966b43f5b951062bb2e67885852"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4e049d5ad61448c9a020d1061eba20944c4887d720c4069724beb6ea1692507"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cfa74aac64c85898b93d9c80bb935a96bf64985e28d4ee0f1a3d1f3bf11a5106"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:965693c2e9efd425b0f059f5be50ef830129f82892fa1858e220e424d9d0160f"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8501000a5eb8037c4b56857724797fe5a8b01853c363de91c8d0d0ad56bef319"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8d92c552c6b7577402afdd547dcf5d31ea6c8ae31ad03f78226e055cfa37f3c6"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:1ee2086f490cb501d86b7e386c1eb4e3a0ccbb0c99067089efaa8c79012c8952"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:1de91e7fd7f525e10ea79a6e62c559d1b0278ec097ad83d9da378b6fab65a265"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a4da514d13f4433e16960a17f05b67e0af30ac771719c9a9fb877e5004f74477"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-win32.whl", hash = "sha256:a40184c67db8252593ec518e17fb8a6e86d7259dc9f2d6c0bf4ff4db8cf1ad4b"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-win_amd64.whl", hash = "sha256:c4f28f1930b09a2c300357d8465b388cecb7e8b2f454a5d5425561710b7fd07f"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-win_arm64.whl", hash = "sha256:675b75412a943bb83f1f53e2e54fd18c80ef15ed642dc6eb0382d1949419d904"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1ef6a1a8f0b12f8722f595f15c62950c9a02d5abc64742561299ffd49f6c6944"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:32532af1d70c6ec02ea5ac7ee2766dfff7c8ae8c761abfe8da9e527314e634e8"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1a38bade755aa9dd95a81cda949e1bf9cd92b79341ccc5e2189c9e7bdfc5ec"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d73ee2df41224c87336448d279b5b6a3a75f36e41dd3dcf538c0c9cce36360d8"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be3a1fc3e2ab3bdf93dc0c83c00acca8afd2a80602297d96cf4a0ba028333cdf"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:603f48f621272a448ff58bb556feb4371252a02156593303391f5c3281dfaeac"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:268f8e1ca50fc61c0736f3fe9d47891424adf62d96ed30196f30f4bd8216b41f"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5f8bf3f0d02935751d8660abda6044821a861f6229f7d359f98bcdcc7e66c39b"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b997ff3b39d4cee9fb025d6c46b0a24bd67595ce5a5b652a97fb3a9d60beb651"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ca66676c8ef6557f9b81c5b2b519097817a7c776a6599b8d6fcc3e16edd216fe"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:35d3044cb635ca6b1b2b7b67b3597bd19f34f1753b129eb6d2ae04cf98cd3945"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5a93c9e60904cb76e7aefef67afffb8b37c4894f81415ed513db090f29d01101"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-win32.whl", hash = "sha256:579d107102c0725f7c79b4e79f16d3cf4d7c9208f29c66b064fa1fd4641d5155"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-win_amd64.whl", hash = "sha256:953b3780765c8846866faf891ee4290f6a41a6dacf4fbcd3926f78c9de412ca6"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-win_arm64.whl", hash = "sha256:7c20c1474b068c4bd45bf2fd0ad548df284f74e9a14a68b06746c56e3aa8eb70"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fde81b1da9a947f931711febe2e2bee694e891f6d3e6aa6bc02c1884702aea19"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:47e92c155a14f44511ea8ebcc6bc1535a1fe8d0a7d67ad3cc47ba61606df7bcf"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8772b745668260c5c4d069c678bbaa68812e6c69830f3771eaad521af7bc17f8"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:578302828dd97ee2ba507d2f71d62164e28d2fc7bc73aad0d2d1d2afc021a5d5"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc3e6081069eea61593f1d6839029da53d00c8c9b205c5534853eaa3f031085c"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0b1c2d504eddf97bc0f2eba422c8915576dbf025062ceaca2d68aecd66324ad9"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb76e5a21034f0307c51c5a2fc08856f698c53a4c593b17d291f7d6e9d09ca3"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d4ba2318ef670ce505f42881a5d2af70f948124646947341a3c6ccb33cd70369"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:057bb03f39e285047d7e9412e01ecf31bb2d42b9466a5409d715d587460dd59b"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:a8feac9006d5c9758438906f093befffc4290de75663dbb2098461df7c7d28dd"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:95b8292383e717e10455f2c917df45032b611141e43d1adf70f71b1566136b11"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e9fbf659537d246086d0297628b3795dc3e4a384101ecc01e5791c827b8d7345"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-win32.whl", hash = "sha256:1dc516ac6d32027be2b0196bedf6d977ac26debd09ca182376322ad620460feb"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-win_amd64.whl", hash = "sha256:b4f86e09d3064dca0b014cd48688964036a904a2d28048f00c8f4640796d06a8"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-win_arm64.whl", hash = "sha256:19c64d8ddb2940b42a4567b23f1681af77f50a5ff6c9b8e85daba079c210716e"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fbda3dd68d8b28ccb20ffb6f756fefd9b5ba570a772bedd7643ed441f5793308"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2379e0b2578ad3ac7004f223251550f08bca873ff76c169b09410ec562ad78d8"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d1eff95362f993b0276fd3839aee48625b09aac8938bb0c23b40d219cba5dc5"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd9360e30041690912525a210e48a897b49b230768cc8af1c702e5395690464f"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a93cd834b3c315ab437f0565ee3a2f42dd33768dc885ccbabf9710b131cf70d2"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ff196996240db7075f62c7bc4506f40a3c80cd4ae3ab0e79ac6892283a90859"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:948dcee7aaa1cd14358b2a7ef08bf0be42bf89049c3a906669874a715fc2c937"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d95751f505a301af1aaf086c19f34536056d6c8efa91b2240de532a3db57b543"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:90db86fa196eecf96cb6db09f1083912ea945c50c57188039392d810d0b784e1"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:3171653212218a162540a3c8eb8ae7d3dcc8548540b69eaecaf3b47c14d89c90"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:36dd6e820379c37a1ffefc8a52b648758e867cd9d78ee5b5dc0c9a6a10145378"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:7b702de95666a1f7d5c6b47eacadfe2d2794af3742d63d2134767d13e5d1c713"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-win32.whl", hash = "sha256:9030e7238c0df51aed5c9c5ed8eee2bdd47a2ae788e562c1454af2851c3d1906"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-win_amd64.whl", hash = "sha256:f847fb0fbfb72482b1c05c59cbb275c58a55b73708a7f77a83f8035ee3c86497"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:97f2ce529d2a70a60c290f6ab269a2bbf1d3b47b9724dccc84339b85f7afb044"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e2957fdad10bb83b1982b02deb3604a3f6911a5e545f518b59c741086f92d152"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d5262383634626eb45c536017204b8163a03bc43bda880cf1bdd7885db9a163"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:364587827d7cbd41afa0782adc2d2d19e3f07d355b0750a02a8e33ad27a9c368"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecc24af7f905f3d6efb371a01680116ffea8d64e266618fb9ad1602a9b4f7934"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9dc86aa6b29d174713c5f4caac35ffb7f232e3e649113e8d13812b35ab078228"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3dcfbe7266e74a707173a12a7b355a531f2dcfbdb32f09468e664330da14874"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b23806fbdd6b510ba9ac93bb72d503066263b0fba44b71b835be9f063a84025f"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:5551d68264c1bb6943f542da83a4dc8940ede52c5847ef158698799cc28d14f5"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:13d8675a1fa7e2b19650ca7ef9a6ec01391d4bb12ab9e0793e8eb024538b4a34"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9b6a5de507b9be6de688dae40143b656f7a93b10995fb8bd90deb555e7875c60"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:111a20a3c090cf244d9406e60500b6c34b2375ba3a5009e2b38fd806fe38e337"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-win32.whl", hash = "sha256:22589c0b8ccc6c391ce7f776c93a8c92c96ab8d34e1a19f1bd2b12a235332632"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-win_amd64.whl", hash = "sha256:6f83221db5755b8f34222e40607d87f1176a8d5d4dbda4a55a0f0b67d588a69c"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-win_arm64.whl", hash = "sha256:3665b92e788578c3bb334bd5b5fa7ee1a84bafd68be438e3110861d1578c63a0"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:d7df9c2194c7ec930b33c991c55dbd0c10951bd25800c0b7a7b571994ebbced5"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:68bd888eafd07b09585dcc8bc2716c5ecdb7eed62827470664d25588982b2873"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1230e0f9026851a6a432beaa0ce575dda7b39fe689b576f99a0704fbb81fc9c"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3b36e1c61b796ae1777f3e9e11fd39898b09d351c9384baf6e3b7e6191d8ced"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9dba13d86806fcf3fe9c9919f58575e0090eadfb89c058bde02bcc7ab24e4548"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1f1a33e84056b7892c721d84475d3bde49a145126bc4c6efe0d6d0d59cb31c29"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3492c7a42b7fa9f0051d7fcce9893e95ed91c97c9ec7fb64346f3e070dd318ed"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:ece45eb2af8b00f90d10f7419322e8804bd42fb1129026f9bfe712c37508b514"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcd14cf4876f04b488f6e54a7abd3e9b31db5f5a6aba0ce90659917aaa8c088"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:521c58c72ed8a612b25cda378ff10dee17e6deb4ee99a070b723519a345527b9"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18669bb6cdf7d40738526d37e550df09ba065b5a7560f3d802287988b6cb63cf"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7abe2dbae81120a64bb4f8d3fcafe9122f328c9f86d7f327f174187a5af4ed86"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a3c0783910911f4f24655826d007c9f4360f08107410952c01ee3df98c713eb2"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:03126f9a040ff21d2a110610bfd6b93b79377ce8b4121edcb791d61b7df6eec5"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:591908240f4085e2ade5b685c6e8346e2ed44932cffeaac2fb32ddac95b55c7f"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9012d86c6397edbc9da4ac0132de7f8ee9d6ce857f4194d5684c4ddbcdd1c5c"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df596ddd3db38aa513d4c0995611267b3946e7cbe5a8761b50e9306dfec720ee"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3ed5adb752f4308fcc8f4fb6f8eb7aa4082f9d12676fda0a74fa5564242a8107"}, - {file = "rapidfuzz-3.9.7.tar.gz", hash = "sha256:f1c7296534c1afb6f495aa95871f14ccdc197c6db42965854e483100df313030"}, +python-versions = ">=3.9" +files = [ + {file = "rapidfuzz-3.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:884453860de029380dded8f3c1918af2d8eb5adf8010261645c7e5c88c2b5428"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:718c9bd369288aca5fa929df6dbf66fdbe9768d90940a940c0b5cdc96ade4309"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a68e3724b7dab761c01816aaa64b0903734d999d5589daf97c14ef5cc0629a8e"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1af60988d47534246d9525f77288fdd9de652608a4842815d9018570b959acc6"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3084161fc3e963056232ef8d937449a2943852e07101f5a136c8f3cfa4119217"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6cd67d3d017296d98ff505529104299f78433e4b8af31b55003d901a62bbebe9"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b11a127ac590fc991e8a02c2d7e1ac86e8141c92f78546f18b5c904064a0552c"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:aadce42147fc09dcef1afa892485311e824c050352e1aa6e47f56b9b27af4cf0"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b54853c2371bf0e38d67da379519deb6fbe70055efb32f6607081641af3dc752"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ce19887268e90ee81a3957eef5e46a70ecc000713796639f83828b950343f49e"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f39a2a5ded23b9b9194ec45740dce57177b80f86c6d8eba953d3ff1a25c97766"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0ec338d5f4ad8d9339a88a08db5c23e7f7a52c2b2a10510c48a0cef1fb3f0ddc"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-win32.whl", hash = "sha256:56fd15ea8f4c948864fa5ebd9261c67cf7b89a1c517a0caef4df75446a7af18c"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:43dfc5e733808962a822ff6d9c29f3039a3cfb3620706f5953e17cfe4496724c"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-win_arm64.whl", hash = "sha256:ae7966f205b5a7fde93b44ca8fed37c1c8539328d7f179b1197de34eceaceb5f"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bb0013795b40db5cf361e6f21ee7cda09627cf294977149b50e217d7fe9a2f03"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:69ef5b363afff7150a1fbe788007e307b9802a2eb6ad92ed51ab94e6ad2674c6"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c582c46b1bb0b19f1a5f4c1312f1b640c21d78c371a6615c34025b16ee56369b"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:288f6f6e7410cacb115fb851f3f18bf0e4231eb3f6cb5bd1cec0e7b25c4d039d"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9e29a13d2fd9be3e7d8c26c7ef4ba60b5bc7efbc9dbdf24454c7e9ebba31768"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea2da0459b951ee461bd4e02b8904890bd1c4263999d291c5cd01e6620177ad4"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:457827ba82261aa2ae6ac06a46d0043ab12ba7216b82d87ae1434ec0f29736d6"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5d350864269d56f51ab81ab750c9259ae5cad3152c0680baef143dcec92206a1"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a9b8f51e08c3f983d857c3889930af9ddecc768453822076683664772d87e374"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7f3a6aa6e70fc27e4ff5c479f13cc9fc26a56347610f5f8b50396a0d344c5f55"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:803f255f10d63420979b1909ef976e7d30dec42025c9b067fc1d2040cc365a7e"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2026651761bf83a0f31495cc0f70840d5c0d54388f41316e3f9cb51bd85e49a5"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-win32.whl", hash = "sha256:4df75b3ebbb8cfdb9bf8b213b168620b88fd92d0c16a8bc9f9234630b282db59"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:f9f0bbfb6787b97c51516f3ccf97737d504db5d239ad44527673b81f598b84ab"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-win_arm64.whl", hash = "sha256:10fdad800441b9c97d471a937ba7d42625f1b530db05e572f1cb7d401d95c893"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7dc87073ba3a40dd65591a2100aa71602107443bf10770579ff9c8a3242edb94"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a425a0a868cf8e9c6e93e1cda4b758cdfd314bb9a4fc916c5742c934e3613480"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a86d5d1d75e61df060c1e56596b6b0a4422a929dff19cc3dbfd5eee762c86b61"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34f213d59219a9c3ca14e94a825f585811a68ac56b4118b4dc388b5b14afc108"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96ad46f5f56f70fab2be9e5f3165a21be58d633b90bf6e67fc52a856695e4bcf"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9178277f72d144a6c7704d7ae7fa15b7b86f0f0796f0e1049c7b4ef748a662ef"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76a35e9e19a7c883c422ffa378e9a04bc98cb3b29648c5831596401298ee51e6"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8a6405d34c394c65e4f73a1d300c001f304f08e529d2ed6413b46ee3037956eb"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:bd393683129f446a75d8634306aed7e377627098a1286ff3af2a4f1736742820"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:b0445fa9880ead81f5a7d0efc0b9c977a947d8052c43519aceeaf56eabaf6843"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:c50bc308fa29767ed8f53a8d33b7633a9e14718ced038ed89d41b886e301da32"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e89605afebbd2d4b045bccfdc12a14b16fe8ccbae05f64b4b4c64a97dad1c891"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-win32.whl", hash = "sha256:2db9187f3acf3cd33424ecdbaad75414c298ecd1513470df7bda885dcb68cc15"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:50e3d0c72ea15391ba9531ead7f2068a67c5b18a6a365fef3127583aaadd1725"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-win_arm64.whl", hash = "sha256:9eac95b4278bd53115903d89118a2c908398ee8bdfd977ae844f1bd2b02b917c"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fe5231e8afd069c742ac5b4f96344a0fe4aff52df8e53ef87faebf77f827822c"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:886882367dbc985f5736356105798f2ae6e794e671fc605476cbe2e73838a9bb"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b33e13e537e3afd1627d421a142a12bbbe601543558a391a6fae593356842f6e"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:094c26116d55bf9c53abd840d08422f20da78ec4c4723e5024322321caedca48"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:545fc04f2d592e4350f59deb0818886c1b444ffba3bec535b4fbb97191aaf769"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:916a6abf3632e592b937c3d04c00a6efadd8fd30539cdcd4e6e4d92be7ca5d90"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb6ec40cef63b1922083d33bfef2f91fc0b0bc07b5b09bfee0b0f1717d558292"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c77a7330dd15c7eb5fd3631dc646fc96327f98db8181138766bd14d3e905f0ba"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:949b5e9eeaa4ecb4c7e9c2a4689dddce60929dd1ff9c76a889cdbabe8bbf2171"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b5363932a5aab67010ae1a6205c567d1ef256fb333bc23c27582481606be480c"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:5dd6eec15b13329abe66cc241b484002ecb0e17d694491c944a22410a6a9e5e2"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:79e7f98525b60b3c14524e0a4e1fedf7654657b6e02eb25f1be897ab097706f3"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-win32.whl", hash = "sha256:d29d1b9857c65f8cb3a29270732e1591b9bacf89de9d13fa764f79f07d8f1fd2"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:fa9720e56663cc3649d62b4b5f3145e94b8f5611e8a8e1b46507777249d46aad"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-win_arm64.whl", hash = "sha256:eda4c661e68dddd56c8fbfe1ca35e40dd2afd973f7ebb1605f4d151edc63dff8"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cffbc50e0767396ed483900900dd58ce4351bc0d40e64bced8694bd41864cc71"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c038b9939da3035afb6cb2f465f18163e8f070aba0482923ecff9443def67178"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca366c2e2a54e2f663f4529b189fdeb6e14d419b1c78b754ec1744f3c01070d4"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c4c82b1689b23b1b5e6a603164ed2be41b6f6de292a698b98ba2381e889eb9d"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98f6ebe28831a482981ecfeedc8237047878424ad0c1add2c7f366ba44a20452"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4bd1a7676ee2a4c8e2f7f2550bece994f9f89e58afb96088964145a83af7408b"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec9139baa3f85b65adc700eafa03ed04995ca8533dd56c924f0e458ffec044ab"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:26de93e6495078b6af4c4d93a42ca067b16cc0e95699526c82ab7d1025b4d3bf"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f3a0bda83c18195c361b5500377d0767749f128564ca95b42c8849fd475bb327"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:63e4c175cbce8c3adc22dca5e6154588ae673f6c55374d156f3dac732c88d7de"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4dd3d8443970eaa02ab5ae45ce584b061f2799cd9f7e875190e2617440c1f9d4"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e5ddb2388610799fc46abe389600625058f2a73867e63e20107c5ad5ffa57c47"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-win32.whl", hash = "sha256:2e9be5d05cd960914024412b5406fb75a82f8562f45912ff86255acbfdbfb78e"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:47aca565a39c9a6067927871973ca827023e8b65ba6c5747f4c228c8d7ddc04f"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-win_arm64.whl", hash = "sha256:b0732343cdc4273b5921268026dd7266f75466eb21873cb7635a200d9d9c3fac"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:f744b5eb1469bf92dd143d36570d2bdbbdc88fe5cb0b5405e53dd34f479cbd8a"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b67cc21a14327a0eb0f47bc3d7e59ec08031c7c55220ece672f9476e7a8068d3"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fe5783676f0afba4a522c80b15e99dbf4e393c149ab610308a8ef1f04c6bcc8"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4688862f957c8629d557d084f20b2d803f8738b6c4066802a0b1cc472e088d9"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20bd153aacc244e4c907d772c703fea82754c4db14f8aa64d75ff81b7b8ab92d"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:50484d563f8bfa723c74c944b0bb15b9e054db9c889348c8c307abcbee75ab92"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5897242d455461f2c5b82d7397b29341fd11e85bf3608a522177071044784ee8"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:116c71a81e046ba56551d8ab68067ca7034d94b617545316d460a452c5c3c289"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0a547e4350d1fa32624d3eab51eff8cf329f4cae110b4ea0402486b1da8be40"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:399b9b79ccfcf50ca3bad7692bc098bb8eade88d7d5e15773b7f866c91156d0c"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7947a425d1be3e744707ee58c6cb318b93a56e08f080722dcc0347e0b7a1bb9a"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:94c48b4a2a4b1d22246f48e2b11cae01ec7d23f0c9123f8bb822839ad79d0a88"}, + {file = "rapidfuzz-3.10.0.tar.gz", hash = "sha256:6b62af27e65bb39276a66533655a2fa3c60a487b03935721c45b7809527979be"}, ] [package.extras] -full = ["numpy"] +all = ["numpy"] [[package]] name = "regex" -version = "2024.7.24" +version = "2024.9.11" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" files = [ - {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b0d3f567fafa0633aee87f08b9276c7062da9616931382993c03808bb68ce"}, - {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3426de3b91d1bc73249042742f45c2148803c111d1175b283270177fdf669024"}, - {file = "regex-2024.7.24-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f273674b445bcb6e4409bf8d1be67bc4b58e8b46fd0d560055d515b8830063cd"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23acc72f0f4e1a9e6e9843d6328177ae3074b4182167e34119ec7233dfeccf53"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65fd3d2e228cae024c411c5ccdffae4c315271eee4a8b839291f84f796b34eca"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c414cbda77dbf13c3bc88b073a1a9f375c7b0cb5e115e15d4b73ec3a2fbc6f59"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf7a89eef64b5455835f5ed30254ec19bf41f7541cd94f266ab7cbd463f00c41"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19c65b00d42804e3fbea9708f0937d157e53429a39b7c61253ff15670ff62cb5"}, - {file = "regex-2024.7.24-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7a5486ca56c8869070a966321d5ab416ff0f83f30e0e2da1ab48815c8d165d46"}, - {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6f51f9556785e5a203713f5efd9c085b4a45aecd2a42573e2b5041881b588d1f"}, - {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:a4997716674d36a82eab3e86f8fa77080a5d8d96a389a61ea1d0e3a94a582cf7"}, - {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c0abb5e4e8ce71a61d9446040c1e86d4e6d23f9097275c5bd49ed978755ff0fe"}, - {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:18300a1d78cf1290fa583cd8b7cde26ecb73e9f5916690cf9d42de569c89b1ce"}, - {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:416c0e4f56308f34cdb18c3f59849479dde5b19febdcd6e6fa4d04b6c31c9faa"}, - {file = "regex-2024.7.24-cp310-cp310-win32.whl", hash = "sha256:fb168b5924bef397b5ba13aabd8cf5df7d3d93f10218d7b925e360d436863f66"}, - {file = "regex-2024.7.24-cp310-cp310-win_amd64.whl", hash = "sha256:6b9fc7e9cc983e75e2518496ba1afc524227c163e43d706688a6bb9eca41617e"}, - {file = "regex-2024.7.24-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:382281306e3adaaa7b8b9ebbb3ffb43358a7bbf585fa93821300a418bb975281"}, - {file = "regex-2024.7.24-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4fdd1384619f406ad9037fe6b6eaa3de2749e2e12084abc80169e8e075377d3b"}, - {file = "regex-2024.7.24-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3d974d24edb231446f708c455fd08f94c41c1ff4f04bcf06e5f36df5ef50b95a"}, - {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2ec4419a3fe6cf8a4795752596dfe0adb4aea40d3683a132bae9c30b81e8d73"}, - {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb563dd3aea54c797adf513eeec819c4213d7dbfc311874eb4fd28d10f2ff0f2"}, - {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:45104baae8b9f67569f0f1dca5e1f1ed77a54ae1cd8b0b07aba89272710db61e"}, - {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:994448ee01864501912abf2bad9203bffc34158e80fe8bfb5b031f4f8e16da51"}, - {file = "regex-2024.7.24-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3fac296f99283ac232d8125be932c5cd7644084a30748fda013028c815ba3364"}, - {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7e37e809b9303ec3a179085415cb5f418ecf65ec98cdfe34f6a078b46ef823ee"}, - {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:01b689e887f612610c869421241e075c02f2e3d1ae93a037cb14f88ab6a8934c"}, - {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f6442f0f0ff81775eaa5b05af8a0ffa1dda36e9cf6ec1e0d3d245e8564b684ce"}, - {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:871e3ab2838fbcb4e0865a6e01233975df3a15e6fce93b6f99d75cacbd9862d1"}, - {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c918b7a1e26b4ab40409820ddccc5d49871a82329640f5005f73572d5eaa9b5e"}, - {file = "regex-2024.7.24-cp311-cp311-win32.whl", hash = "sha256:2dfbb8baf8ba2c2b9aa2807f44ed272f0913eeeba002478c4577b8d29cde215c"}, - {file = "regex-2024.7.24-cp311-cp311-win_amd64.whl", hash = "sha256:538d30cd96ed7d1416d3956f94d54e426a8daf7c14527f6e0d6d425fcb4cca52"}, - {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fe4ebef608553aff8deb845c7f4f1d0740ff76fa672c011cc0bacb2a00fbde86"}, - {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:74007a5b25b7a678459f06559504f1eec2f0f17bca218c9d56f6a0a12bfffdad"}, - {file = "regex-2024.7.24-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7df9ea48641da022c2a3c9c641650cd09f0cd15e8908bf931ad538f5ca7919c9"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a1141a1dcc32904c47f6846b040275c6e5de0bf73f17d7a409035d55b76f289"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80c811cfcb5c331237d9bad3bea2c391114588cf4131707e84d9493064d267f9"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7214477bf9bd195894cf24005b1e7b496f46833337b5dedb7b2a6e33f66d962c"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d55588cba7553f0b6ec33130bc3e114b355570b45785cebdc9daed8c637dd440"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:558a57cfc32adcf19d3f791f62b5ff564922942e389e3cfdb538a23d65a6b610"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a512eed9dfd4117110b1881ba9a59b31433caed0c4101b361f768e7bcbaf93c5"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:86b17ba823ea76256b1885652e3a141a99a5c4422f4a869189db328321b73799"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5eefee9bfe23f6df09ffb6dfb23809f4d74a78acef004aa904dc7c88b9944b05"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:731fcd76bbdbf225e2eb85b7c38da9633ad3073822f5ab32379381e8c3c12e94"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eaef80eac3b4cfbdd6de53c6e108b4c534c21ae055d1dbea2de6b3b8ff3def38"}, - {file = "regex-2024.7.24-cp312-cp312-win32.whl", hash = "sha256:185e029368d6f89f36e526764cf12bf8d6f0e3a2a7737da625a76f594bdfcbfc"}, - {file = "regex-2024.7.24-cp312-cp312-win_amd64.whl", hash = "sha256:2f1baff13cc2521bea83ab2528e7a80cbe0ebb2c6f0bfad15be7da3aed443908"}, - {file = "regex-2024.7.24-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:66b4c0731a5c81921e938dcf1a88e978264e26e6ac4ec96a4d21ae0354581ae0"}, - {file = "regex-2024.7.24-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:88ecc3afd7e776967fa16c80f974cb79399ee8dc6c96423321d6f7d4b881c92b"}, - {file = "regex-2024.7.24-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:64bd50cf16bcc54b274e20235bf8edbb64184a30e1e53873ff8d444e7ac656b2"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb462f0e346fcf41a901a126b50f8781e9a474d3927930f3490f38a6e73b6950"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a82465ebbc9b1c5c50738536fdfa7cab639a261a99b469c9d4c7dcbb2b3f1e57"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:68a8f8c046c6466ac61a36b65bb2395c74451df2ffb8458492ef49900efed293"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac8e84fff5d27420f3c1e879ce9929108e873667ec87e0c8eeb413a5311adfe"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba2537ef2163db9e6ccdbeb6f6424282ae4dea43177402152c67ef869cf3978b"}, - {file = "regex-2024.7.24-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:43affe33137fcd679bdae93fb25924979517e011f9dea99163f80b82eadc7e53"}, - {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:c9bb87fdf2ab2370f21e4d5636e5317775e5d51ff32ebff2cf389f71b9b13750"}, - {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:945352286a541406f99b2655c973852da7911b3f4264e010218bbc1cc73168f2"}, - {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:8bc593dcce679206b60a538c302d03c29b18e3d862609317cb560e18b66d10cf"}, - {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3f3b6ca8eae6d6c75a6cff525c8530c60e909a71a15e1b731723233331de4169"}, - {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c51edc3541e11fbe83f0c4d9412ef6c79f664a3745fab261457e84465ec9d5a8"}, - {file = "regex-2024.7.24-cp38-cp38-win32.whl", hash = "sha256:d0a07763776188b4db4c9c7fb1b8c494049f84659bb387b71c73bbc07f189e96"}, - {file = "regex-2024.7.24-cp38-cp38-win_amd64.whl", hash = "sha256:8fd5afd101dcf86a270d254364e0e8dddedebe6bd1ab9d5f732f274fa00499a5"}, - {file = "regex-2024.7.24-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0ffe3f9d430cd37d8fa5632ff6fb36d5b24818c5c986893063b4e5bdb84cdf24"}, - {file = "regex-2024.7.24-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:25419b70ba00a16abc90ee5fce061228206173231f004437730b67ac77323f0d"}, - {file = "regex-2024.7.24-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:33e2614a7ce627f0cdf2ad104797d1f68342d967de3695678c0cb84f530709f8"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d33a0021893ede5969876052796165bab6006559ab845fd7b515a30abdd990dc"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04ce29e2c5fedf296b1a1b0acc1724ba93a36fb14031f3abfb7abda2806c1535"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b16582783f44fbca6fcf46f61347340c787d7530d88b4d590a397a47583f31dd"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:836d3cc225b3e8a943d0b02633fb2f28a66e281290302a79df0e1eaa984ff7c1"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:438d9f0f4bc64e8dea78274caa5af971ceff0f8771e1a2333620969936ba10be"}, - {file = "regex-2024.7.24-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:973335b1624859cb0e52f96062a28aa18f3a5fc77a96e4a3d6d76e29811a0e6e"}, - {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c5e69fd3eb0b409432b537fe3c6f44ac089c458ab6b78dcec14478422879ec5f"}, - {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fbf8c2f00904eaf63ff37718eb13acf8e178cb940520e47b2f05027f5bb34ce3"}, - {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2757ace61bc4061b69af19e4689fa4416e1a04840f33b441034202b5cd02d4"}, - {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:44fc61b99035fd9b3b9453f1713234e5a7c92a04f3577252b45feefe1b327759"}, - {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:84c312cdf839e8b579f504afcd7b65f35d60b6285d892b19adea16355e8343c9"}, - {file = "regex-2024.7.24-cp39-cp39-win32.whl", hash = "sha256:ca5b2028c2f7af4e13fb9fc29b28d0ce767c38c7facdf64f6c2cd040413055f1"}, - {file = "regex-2024.7.24-cp39-cp39-win_amd64.whl", hash = "sha256:7c479f5ae937ec9985ecaf42e2e10631551d909f203e31308c12d703922742f9"}, - {file = "regex-2024.7.24.tar.gz", hash = "sha256:9cfd009eed1a46b27c14039ad5bbc5e71b6367c5b2e6d5f5da0ea91600817506"}, + {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1494fa8725c285a81d01dc8c06b55287a1ee5e0e382d8413adc0a9197aac6408"}, + {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0e12c481ad92d129c78f13a2a3662317e46ee7ef96c94fd332e1c29131875b7d"}, + {file = "regex-2024.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16e13a7929791ac1216afde26f712802e3df7bf0360b32e4914dca3ab8baeea5"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46989629904bad940bbec2106528140a218b4a36bb3042d8406980be1941429c"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a906ed5e47a0ce5f04b2c981af1c9acf9e8696066900bf03b9d7879a6f679fc8"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a091b0550b3b0207784a7d6d0f1a00d1d1c8a11699c1a4d93db3fbefc3ad35"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ddcd9a179c0a6fa8add279a4444015acddcd7f232a49071ae57fa6e278f1f71"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b41e1adc61fa347662b09398e31ad446afadff932a24807d3ceb955ed865cc8"}, + {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ced479f601cd2f8ca1fd7b23925a7e0ad512a56d6e9476f79b8f381d9d37090a"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:635a1d96665f84b292e401c3d62775851aedc31d4f8784117b3c68c4fcd4118d"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c0256beda696edcf7d97ef16b2a33a8e5a875affd6fa6567b54f7c577b30a137"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:3ce4f1185db3fbde8ed8aa223fc9620f276c58de8b0d4f8cc86fd1360829edb6"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:09d77559e80dcc9d24570da3745ab859a9cf91953062e4ab126ba9d5993688ca"}, + {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7a22ccefd4db3f12b526eccb129390942fe874a3a9fdbdd24cf55773a1faab1a"}, + {file = "regex-2024.9.11-cp310-cp310-win32.whl", hash = "sha256:f745ec09bc1b0bd15cfc73df6fa4f726dcc26bb16c23a03f9e3367d357eeedd0"}, + {file = "regex-2024.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:01c2acb51f8a7d6494c8c5eafe3d8e06d76563d8a8a4643b37e9b2dd8a2ff623"}, + {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2cce2449e5927a0bf084d346da6cd5eb016b2beca10d0013ab50e3c226ffc0df"}, + {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b37fa423beefa44919e009745ccbf353d8c981516e807995b2bd11c2c77d268"}, + {file = "regex-2024.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64ce2799bd75039b480cc0360907c4fb2f50022f030bf9e7a8705b636e408fad"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4cc92bb6db56ab0c1cbd17294e14f5e9224f0cc6521167ef388332604e92679"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d05ac6fa06959c4172eccd99a222e1fbf17b5670c4d596cb1e5cde99600674c4"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:040562757795eeea356394a7fb13076ad4f99d3c62ab0f8bdfb21f99a1f85664"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6113c008a7780792efc80f9dfe10ba0cd043cbf8dc9a76ef757850f51b4edc50"}, + {file = "regex-2024.9.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e5fb5f77c8745a60105403a774fe2c1759b71d3e7b4ca237a5e67ad066c7199"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:54d9ff35d4515debf14bc27f1e3b38bfc453eff3220f5bce159642fa762fe5d4"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:df5cbb1fbc74a8305b6065d4ade43b993be03dbe0f8b30032cced0d7740994bd"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7fb89ee5d106e4a7a51bce305ac4efb981536301895f7bdcf93ec92ae0d91c7f"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a738b937d512b30bf75995c0159c0ddf9eec0775c9d72ac0202076c72f24aa96"}, + {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e28f9faeb14b6f23ac55bfbbfd3643f5c7c18ede093977f1df249f73fd22c7b1"}, + {file = "regex-2024.9.11-cp311-cp311-win32.whl", hash = "sha256:18e707ce6c92d7282dfce370cd205098384b8ee21544e7cb29b8aab955b66fa9"}, + {file = "regex-2024.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:313ea15e5ff2a8cbbad96ccef6be638393041b0a7863183c2d31e0c6116688cf"}, + {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b0d0a6c64fcc4ef9c69bd5b3b3626cc3776520a1637d8abaa62b9edc147a58f7"}, + {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:49b0e06786ea663f933f3710a51e9385ce0cba0ea56b67107fd841a55d56a231"}, + {file = "regex-2024.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5b513b6997a0b2f10e4fd3a1313568e373926e8c252bd76c960f96fd039cd28d"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee439691d8c23e76f9802c42a95cfeebf9d47cf4ffd06f18489122dbb0a7ad64"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8f877c89719d759e52783f7fe6e1c67121076b87b40542966c02de5503ace42"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23b30c62d0f16827f2ae9f2bb87619bc4fba2044911e2e6c2eb1af0161cdb766"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85ab7824093d8f10d44330fe1e6493f756f252d145323dd17ab6b48733ff6c0a"}, + {file = "regex-2024.9.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dee5b4810a89447151999428fe096977346cf2f29f4d5e29609d2e19e0199c9"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98eeee2f2e63edae2181c886d7911ce502e1292794f4c5ee71e60e23e8d26b5d"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:57fdd2e0b2694ce6fc2e5ccf189789c3e2962916fb38779d3e3521ff8fe7a822"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d552c78411f60b1fdaafd117a1fca2f02e562e309223b9d44b7de8be451ec5e0"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a0b2b80321c2ed3fcf0385ec9e51a12253c50f146fddb2abbb10f033fe3d049a"}, + {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:18406efb2f5a0e57e3a5881cd9354c1512d3bb4f5c45d96d110a66114d84d23a"}, + {file = "regex-2024.9.11-cp312-cp312-win32.whl", hash = "sha256:e464b467f1588e2c42d26814231edecbcfe77f5ac414d92cbf4e7b55b2c2a776"}, + {file = "regex-2024.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:9e8719792ca63c6b8340380352c24dcb8cd7ec49dae36e963742a275dfae6009"}, + {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c157bb447303070f256e084668b702073db99bbb61d44f85d811025fcf38f784"}, + {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4db21ece84dfeefc5d8a3863f101995de646c6cb0536952c321a2650aa202c36"}, + {file = "regex-2024.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:220e92a30b426daf23bb67a7962900ed4613589bab80382be09b48896d211e92"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1ae19e64c14c7ec1995f40bd932448713d3c73509e82d8cd7744dc00e29e86"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f47cd43a5bfa48f86925fe26fbdd0a488ff15b62468abb5d2a1e092a4fb10e85"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9d4a76b96f398697fe01117093613166e6aa8195d63f1b4ec3f21ab637632963"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ea51dcc0835eea2ea31d66456210a4e01a076d820e9039b04ae8d17ac11dee6"}, + {file = "regex-2024.9.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7aaa315101c6567a9a45d2839322c51c8d6e81f67683d529512f5bcfb99c802"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c57d08ad67aba97af57a7263c2d9006d5c404d721c5f7542f077f109ec2a4a29"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8404bf61298bb6f8224bb9176c1424548ee1181130818fcd2cbffddc768bed8"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dd4490a33eb909ef5078ab20f5f000087afa2a4daa27b4c072ccb3cb3050ad84"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:eee9130eaad130649fd73e5cd92f60e55708952260ede70da64de420cdcad554"}, + {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a2644a93da36c784e546de579ec1806bfd2763ef47babc1b03d765fe560c9f8"}, + {file = "regex-2024.9.11-cp313-cp313-win32.whl", hash = "sha256:e997fd30430c57138adc06bba4c7c2968fb13d101e57dd5bb9355bf8ce3fa7e8"}, + {file = "regex-2024.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:042c55879cfeb21a8adacc84ea347721d3d83a159da6acdf1116859e2427c43f"}, + {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:35f4a6f96aa6cb3f2f7247027b07b15a374f0d5b912c0001418d1d55024d5cb4"}, + {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:55b96e7ce3a69a8449a66984c268062fbaa0d8ae437b285428e12797baefce7e"}, + {file = "regex-2024.9.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb130fccd1a37ed894824b8c046321540263013da72745d755f2d35114b81a60"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:323c1f04be6b2968944d730e5c2091c8c89767903ecaa135203eec4565ed2b2b"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be1c8ed48c4c4065ecb19d882a0ce1afe0745dfad8ce48c49586b90a55f02366"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5b029322e6e7b94fff16cd120ab35a253236a5f99a79fb04fda7ae71ca20ae8"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6fff13ef6b5f29221d6904aa816c34701462956aa72a77f1f151a8ec4f56aeb"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:587d4af3979376652010e400accc30404e6c16b7df574048ab1f581af82065e4"}, + {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:079400a8269544b955ffa9e31f186f01d96829110a3bf79dc338e9910f794fca"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f9268774428ec173654985ce55fc6caf4c6d11ade0f6f914d48ef4719eb05ebb"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:23f9985c8784e544d53fc2930fc1ac1a7319f5d5332d228437acc9f418f2f168"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2941333154baff9838e88aa71c1d84f4438189ecc6021a12c7573728b5838e"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e93f1c331ca8e86fe877a48ad64e77882c0c4da0097f2212873a69bbfea95d0c"}, + {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:846bc79ee753acf93aef4184c040d709940c9d001029ceb7b7a52747b80ed2dd"}, + {file = "regex-2024.9.11-cp38-cp38-win32.whl", hash = "sha256:c94bb0a9f1db10a1d16c00880bdebd5f9faf267273b8f5bd1878126e0fbde771"}, + {file = "regex-2024.9.11-cp38-cp38-win_amd64.whl", hash = "sha256:2b08fce89fbd45664d3df6ad93e554b6c16933ffa9d55cb7e01182baaf971508"}, + {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:07f45f287469039ffc2c53caf6803cd506eb5f5f637f1d4acb37a738f71dd066"}, + {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4838e24ee015101d9f901988001038f7f0d90dc0c3b115541a1365fb439add62"}, + {file = "regex-2024.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6edd623bae6a737f10ce853ea076f56f507fd7726bee96a41ee3d68d347e4d16"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c69ada171c2d0e97a4b5aa78fbb835e0ffbb6b13fc5da968c09811346564f0d3"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02087ea0a03b4af1ed6ebab2c54d7118127fee8d71b26398e8e4b05b78963199"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:69dee6a020693d12a3cf892aba4808fe168d2a4cef368eb9bf74f5398bfd4ee8"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:297f54910247508e6e5cae669f2bc308985c60540a4edd1c77203ef19bfa63ca"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ecea58b43a67b1b79805f1a0255730edaf5191ecef84dbc4cc85eb30bc8b63b9"}, + {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eab4bb380f15e189d1313195b062a6aa908f5bd687a0ceccd47c8211e9cf0d4a"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0cbff728659ce4bbf4c30b2a1be040faafaa9eca6ecde40aaff86f7889f4ab39"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:54c4a097b8bc5bb0dfc83ae498061d53ad7b5762e00f4adaa23bee22b012e6ba"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:73d6d2f64f4d894c96626a75578b0bf7d9e56dcda8c3d037a2118fdfe9b1c664"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:e53b5fbab5d675aec9f0c501274c467c0f9a5d23696cfc94247e1fb56501ed89"}, + {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0ffbcf9221e04502fc35e54d1ce9567541979c3fdfb93d2c554f0ca583a19b35"}, + {file = "regex-2024.9.11-cp39-cp39-win32.whl", hash = "sha256:e4c22e1ac1f1ec1e09f72e6c44d8f2244173db7eb9629cc3a346a8d7ccc31142"}, + {file = "regex-2024.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:faa3c142464efec496967359ca99696c896c591c56c53506bac1ad465f66e919"}, + {file = "regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd"}, ] [[package]] @@ -3391,39 +3427,21 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] -[[package]] -name = "requests-oauthlib" -version = "2.0.0" -description = "OAuthlib authentication support for Requests." -optional = false -python-versions = ">=3.4" -files = [ - {file = "requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9"}, - {file = "requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36"}, -] - -[package.dependencies] -oauthlib = ">=3.0.0" -requests = ">=2.0.0" - -[package.extras] -rsa = ["oauthlib[signedtoken] (>=3.0.0)"] - [[package]] name = "rich" -version = "13.8.0" +version = "13.9.2" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.8.0" files = [ - {file = "rich-13.8.0-py3-none-any.whl", hash = "sha256:2e85306a063b9492dffc86278197a60cbece75bcb766022f3436f567cae11bdc"}, - {file = "rich-13.8.0.tar.gz", hash = "sha256:a5ac1f1cd448ade0d59cc3356f7db7a7ccda2c8cbae9c7a90c28ff463d3e91f4"}, + {file = "rich-13.9.2-py3-none-any.whl", hash = "sha256:8c82a3d3f8dcfe9e734771313e606b39d8247bb6b826e196f4914b333b743cf1"}, + {file = "rich-13.9.2.tar.gz", hash = "sha256:51a2c62057461aaf7152b4d611168f93a9fc73068f8ded2790f29fe2b5366d0c"}, ] [package.dependencies] markdown-it-py = ">=2.2.0" pygments = ">=2.13.0,<3.0.0" -typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""} [package.extras] jupyter = ["ipywidgets (>=7.5.1,<9)"] @@ -3444,148 +3462,153 @@ pyasn1 = ">=0.1.3" [[package]] name = "scikit-image" -version = "0.21.0" +version = "0.24.0" description = "Image processing in Python" optional = false -python-versions = ">=3.8" -files = [ - {file = "scikit_image-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:978ac3302252155a8556cdfe067bad2d18d5ccef4e91c2f727bc564ed75566bc"}, - {file = "scikit_image-0.21.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:82c22e008527e5ee26ab08e3ce919998ef164d538ff30b9e5764b223cfda06b1"}, - {file = "scikit_image-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd29d2631d3e975c377066acfc1f4cb2cc95e2257cf70e7fedfcb96441096e88"}, - {file = "scikit_image-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6c12925ceb9f3aede555921e26642d601b2d37d1617002a2636f2cb5178ae2f"}, - {file = "scikit_image-0.21.0-cp310-cp310-win_amd64.whl", hash = "sha256:1f538d4de77e4f3225d068d9ea2965bed3f7dda7f457a8f89634fa22ffb9ad8c"}, - {file = "scikit_image-0.21.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ec9bab6920ac43037d7434058b67b5778d42c60f67b8679239f48a471e7ed6f8"}, - {file = "scikit_image-0.21.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:a54720430dba833ffbb6dedd93d9f0938c5dd47d20ab9ba3e4e61c19d95f6f19"}, - {file = "scikit_image-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e40dd102da14cdadc09210f930b4556c90ff8f99cd9d8bcccf9f73a86c44245"}, - {file = "scikit_image-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff5719c7eb99596a39c3e1d9b564025bae78ecf1da3ee6842d34f6965b5f1474"}, - {file = "scikit_image-0.21.0-cp311-cp311-win_amd64.whl", hash = "sha256:146c3824253eee9ff346c4ad10cb58376f91aefaf4a4bb2fe11aa21691f7de76"}, - {file = "scikit_image-0.21.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4e1b09f81a99c9c390215929194847b3cd358550b4b65bb6e42c5393d69cb74a"}, - {file = "scikit_image-0.21.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:9f7b5fb4a22f0d5ae0fa13beeb887c925280590145cd6d8b2630794d120ff7c7"}, - {file = "scikit_image-0.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4814033717f0b6491fee252facb9df92058d6a72ab78dd6408a50f3915a88b8"}, - {file = "scikit_image-0.21.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b0d6ed6502cca0c9719c444caafa0b8cda0f9e29e01ca42f621a240073284be"}, - {file = "scikit_image-0.21.0-cp38-cp38-win_amd64.whl", hash = "sha256:9194cb7bc21215fde6c1b1e9685d312d2aa8f65ea9736bc6311126a91c860032"}, - {file = "scikit_image-0.21.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54df1ddc854f37a912d42bc724e456e86858107e94048a81a27720bc588f9937"}, - {file = "scikit_image-0.21.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:c01e3ab0a1fabfd8ce30686d4401b7ed36e6126c9d4d05cb94abf6bdc46f7ac9"}, - {file = "scikit_image-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ef5d8d1099317b7b315b530348cbfa68ab8ce32459de3c074d204166951025c"}, - {file = "scikit_image-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b1e96c59cab640ca5c5b22c501524cfaf34cbe0cb51ba73bd9a9ede3fb6e1d"}, - {file = "scikit_image-0.21.0-cp39-cp39-win_amd64.whl", hash = "sha256:9cffcddd2a5594c0a06de2ae3e1e25d662745a26f94fda31520593669677c010"}, - {file = "scikit_image-0.21.0.tar.gz", hash = "sha256:b33e823c54e6f11873ea390ee49ef832b82b9f70752c8759efd09d5a4e3d87f0"}, +python-versions = ">=3.9" +files = [ + {file = "scikit_image-0.24.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cb3bc0264b6ab30b43c4179ee6156bc18b4861e78bb329dd8d16537b7bbf827a"}, + {file = "scikit_image-0.24.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:9c7a52e20cdd760738da38564ba1fed7942b623c0317489af1a598a8dedf088b"}, + {file = "scikit_image-0.24.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93f46e6ce42e5409f4d09ce1b0c7f80dd7e4373bcec635b6348b63e3c886eac8"}, + {file = "scikit_image-0.24.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39ee0af13435c57351a3397eb379e72164ff85161923eec0c38849fecf1b4764"}, + {file = "scikit_image-0.24.0-cp310-cp310-win_amd64.whl", hash = "sha256:7ac7913b028b8aa780ffae85922894a69e33d1c0bf270ea1774f382fe8bf95e7"}, + {file = "scikit_image-0.24.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:272909e02a59cea3ed4aa03739bb88df2625daa809f633f40b5053cf09241831"}, + {file = "scikit_image-0.24.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:190ebde80b4470fe8838764b9b15f232a964f1a20391663e31008d76f0c696f7"}, + {file = "scikit_image-0.24.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59c98cc695005faf2b79904e4663796c977af22586ddf1b12d6af2fa22842dc2"}, + {file = "scikit_image-0.24.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa27b3a0dbad807b966b8db2d78da734cb812ca4787f7fbb143764800ce2fa9c"}, + {file = "scikit_image-0.24.0-cp311-cp311-win_amd64.whl", hash = "sha256:dacf591ac0c272a111181afad4b788a27fe70d213cfddd631d151cbc34f8ca2c"}, + {file = "scikit_image-0.24.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6fccceb54c9574590abcddc8caf6cefa57c13b5b8b4260ab3ff88ad8f3c252b3"}, + {file = "scikit_image-0.24.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ccc01e4760d655aab7601c1ba7aa4ddd8b46f494ac46ec9c268df6f33ccddf4c"}, + {file = "scikit_image-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18836a18d3a7b6aca5376a2d805f0045826bc6c9fc85331659c33b4813e0b563"}, + {file = "scikit_image-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8579bda9c3f78cb3b3ed8b9425213c53a25fa7e994b7ac01f2440b395babf660"}, + {file = "scikit_image-0.24.0-cp312-cp312-win_amd64.whl", hash = "sha256:82ab903afa60b2da1da2e6f0c8c65e7c8868c60a869464c41971da929b3e82bc"}, + {file = "scikit_image-0.24.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ef04360eda372ee5cd60aebe9be91258639c86ae2ea24093fb9182118008d009"}, + {file = "scikit_image-0.24.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e9aadb442360a7e76f0c5c9d105f79a83d6df0e01e431bd1d5757e2c5871a1f3"}, + {file = "scikit_image-0.24.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e37de6f4c1abcf794e13c258dc9b7d385d5be868441de11c180363824192ff7"}, + {file = "scikit_image-0.24.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4688c18bd7ec33c08d7bf0fd19549be246d90d5f2c1d795a89986629af0a1e83"}, + {file = "scikit_image-0.24.0-cp39-cp39-win_amd64.whl", hash = "sha256:56dab751d20b25d5d3985e95c9b4e975f55573554bd76b0aedf5875217c93e69"}, + {file = "scikit_image-0.24.0.tar.gz", hash = "sha256:5d16efe95da8edbeb363e0c4157b99becbd650a60b77f6e3af5768b66cf007ab"}, ] [package.dependencies] -imageio = ">=2.27" -lazy_loader = ">=0.2" +imageio = ">=2.33" +lazy-loader = ">=0.4" networkx = ">=2.8" -numpy = ">=1.21.1" +numpy = ">=1.23" packaging = ">=21" -pillow = ">=9.0.1" -PyWavelets = ">=1.1.1" -scipy = ">=1.8" +pillow = ">=9.1" +scipy = ">=1.9" tifffile = ">=2022.8.12" [package.extras] -build = ["Cython (>=0.29.32)", "build", "meson-python (>=0.13)", "ninja", "numpy (>=1.21.1)", "packaging (>=21)", "pythran", "setuptools (>=67)", "spin (==0.3)", "wheel"] +build = ["Cython (>=3.0.4)", "build", "meson-python (>=0.15)", "ninja", "numpy (>=2.0.0rc1)", "packaging (>=21)", "pythran", "setuptools (>=67)", "spin (==0.8)", "wheel"] data = ["pooch (>=1.6.0)"] -default = ["PyWavelets (>=1.1.1)", "imageio (>=2.27)", "lazy_loader (>=0.2)", "networkx (>=2.8)", "numpy (>=1.21.1)", "packaging (>=21)", "pillow (>=9.0.1)", "scipy (>=1.8)", "tifffile (>=2022.8.12)"] -developer = ["pre-commit", "rtoml"] -docs = ["dask[array] (>=2022.9.2)", "ipykernel", "ipywidgets", "kaleido", "matplotlib (>=3.5)", "myst-parser", "numpydoc (>=1.5)", "pandas (>=1.5)", "plotly (>=5.10)", "pooch (>=1.6)", "pydata-sphinx-theme (>=0.13)", "pytest-runner", "scikit-learn (>=0.24.0)", "seaborn (>=0.11)", "sphinx (>=5.0)", "sphinx-copybutton", "sphinx-gallery (>=0.11)", "sphinx_design (>=0.3)", "tifffile (>=2022.8.12)"] -optional = ["SimpleITK", "astropy (>=5.0)", "cloudpickle (>=0.2.1)", "dask[array] (>=2021.1.0)", "matplotlib (>=3.5)", "pooch (>=1.6.0)", "pyamg", "scikit-learn (>=0.24.0)"] -test = ["asv", "matplotlib (>=3.5)", "pooch (>=1.6.0)", "pytest (>=7.0)", "pytest-cov (>=2.11.0)", "pytest-faulthandler", "pytest-localserver"] +developer = ["ipython", "pre-commit", "tomli"] +docs = ["PyWavelets (>=1.1.1)", "dask[array] (>=2022.9.2)", "ipykernel", "ipywidgets", "kaleido", "matplotlib (>=3.6)", "myst-parser", "numpydoc (>=1.7)", "pandas (>=1.5)", "plotly (>=5.10)", "pooch (>=1.6)", "pydata-sphinx-theme (>=0.15.2)", "pytest-doctestplus", "pytest-runner", "scikit-learn (>=1.1)", "seaborn (>=0.11)", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-gallery (>=0.14)", "sphinx_design (>=0.5)", "tifffile (>=2022.8.12)"] +optional = ["PyWavelets (>=1.1.1)", "SimpleITK", "astropy (>=5.0)", "cloudpickle (>=0.2.1)", "dask[array] (>=2021.1.0)", "matplotlib (>=3.6)", "pooch (>=1.6.0)", "pyamg", "scikit-learn (>=1.1)"] +test = ["asv", "numpydoc (>=1.7)", "pooch (>=1.6.0)", "pytest (>=7.0)", "pytest-cov (>=2.11.0)", "pytest-doctestplus", "pytest-faulthandler", "pytest-localserver"] [[package]] name = "scikit-learn" -version = "1.3.2" +version = "1.5.2" description = "A set of python modules for machine learning and data mining" optional = false -python-versions = ">=3.8" -files = [ - {file = "scikit-learn-1.3.2.tar.gz", hash = "sha256:a2f54c76accc15a34bfb9066e6c7a56c1e7235dda5762b990792330b52ccfb05"}, - {file = "scikit_learn-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e326c0eb5cf4d6ba40f93776a20e9a7a69524c4db0757e7ce24ba222471ee8a1"}, - {file = "scikit_learn-1.3.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:535805c2a01ccb40ca4ab7d081d771aea67e535153e35a1fd99418fcedd1648a"}, - {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1215e5e58e9880b554b01187b8c9390bf4dc4692eedeaf542d3273f4785e342c"}, - {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ee107923a623b9f517754ea2f69ea3b62fc898a3641766cb7deb2f2ce450161"}, - {file = "scikit_learn-1.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:35a22e8015048c628ad099da9df5ab3004cdbf81edc75b396fd0cff8699ac58c"}, - {file = "scikit_learn-1.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6fb6bc98f234fda43163ddbe36df8bcde1d13ee176c6dc9b92bb7d3fc842eb66"}, - {file = "scikit_learn-1.3.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:18424efee518a1cde7b0b53a422cde2f6625197de6af36da0b57ec502f126157"}, - {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3271552a5eb16f208a6f7f617b8cc6d1f137b52c8a1ef8edf547db0259b2c9fb"}, - {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4144a5004a676d5022b798d9e573b05139e77f271253a4703eed295bde0433"}, - {file = "scikit_learn-1.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:67f37d708f042a9b8d59551cf94d30431e01374e00dc2645fa186059c6c5d78b"}, - {file = "scikit_learn-1.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8db94cd8a2e038b37a80a04df8783e09caac77cbe052146432e67800e430c028"}, - {file = "scikit_learn-1.3.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:61a6efd384258789aa89415a410dcdb39a50e19d3d8410bd29be365bcdd512d5"}, - {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb06f8dce3f5ddc5dee1715a9b9f19f20d295bed8e3cd4fa51e1d050347de525"}, - {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b2de18d86f630d68fe1f87af690d451388bb186480afc719e5f770590c2ef6c"}, - {file = "scikit_learn-1.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:0402638c9a7c219ee52c94cbebc8fcb5eb9fe9c773717965c1f4185588ad3107"}, - {file = "scikit_learn-1.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a19f90f95ba93c1a7f7924906d0576a84da7f3b2282ac3bfb7a08a32801add93"}, - {file = "scikit_learn-1.3.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:b8692e395a03a60cd927125eef3a8e3424d86dde9b2370d544f0ea35f78a8073"}, - {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15e1e94cc23d04d39da797ee34236ce2375ddea158b10bee3c343647d615581d"}, - {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:785a2213086b7b1abf037aeadbbd6d67159feb3e30263434139c98425e3dcfcf"}, - {file = "scikit_learn-1.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:64381066f8aa63c2710e6b56edc9f0894cc7bf59bd71b8ce5613a4559b6145e0"}, - {file = "scikit_learn-1.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6c43290337f7a4b969d207e620658372ba3c1ffb611f8bc2b6f031dc5c6d1d03"}, - {file = "scikit_learn-1.3.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:dc9002fc200bed597d5d34e90c752b74df516d592db162f756cc52836b38fe0e"}, - {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d08ada33e955c54355d909b9c06a4789a729977f165b8bae6f225ff0a60ec4a"}, - {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763f0ae4b79b0ff9cca0bf3716bcc9915bdacff3cebea15ec79652d1cc4fa5c9"}, - {file = "scikit_learn-1.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:ed932ea780517b00dae7431e031faae6b49b20eb6950918eb83bd043237950e0"}, +python-versions = ">=3.9" +files = [ + {file = "scikit_learn-1.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:299406827fb9a4f862626d0fe6c122f5f87f8910b86fe5daa4c32dcd742139b6"}, + {file = "scikit_learn-1.5.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:2d4cad1119c77930b235579ad0dc25e65c917e756fe80cab96aa3b9428bd3fb0"}, + {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c412ccc2ad9bf3755915e3908e677b367ebc8d010acbb3f182814524f2e5540"}, + {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a686885a4b3818d9e62904d91b57fa757fc2bed3e465c8b177be652f4dd37c8"}, + {file = "scikit_learn-1.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:c15b1ca23d7c5f33cc2cb0a0d6aaacf893792271cddff0edbd6a40e8319bc113"}, + {file = "scikit_learn-1.5.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03b6158efa3faaf1feea3faa884c840ebd61b6484167c711548fce208ea09445"}, + {file = "scikit_learn-1.5.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1ff45e26928d3b4eb767a8f14a9a6efbf1cbff7c05d1fb0f95f211a89fd4f5de"}, + {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f763897fe92d0e903aa4847b0aec0e68cadfff77e8a0687cabd946c89d17e675"}, + {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8b0ccd4a902836493e026c03256e8b206656f91fbcc4fde28c57a5b752561f1"}, + {file = "scikit_learn-1.5.2-cp311-cp311-win_amd64.whl", hash = "sha256:6c16d84a0d45e4894832b3c4d0bf73050939e21b99b01b6fd59cbb0cf39163b6"}, + {file = "scikit_learn-1.5.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f932a02c3f4956dfb981391ab24bda1dbd90fe3d628e4b42caef3e041c67707a"}, + {file = "scikit_learn-1.5.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:3b923d119d65b7bd555c73be5423bf06c0105678ce7e1f558cb4b40b0a5502b1"}, + {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f60021ec1574e56632be2a36b946f8143bf4e5e6af4a06d85281adc22938e0dd"}, + {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:394397841449853c2290a32050382edaec3da89e35b3e03d6cc966aebc6a8ae6"}, + {file = "scikit_learn-1.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:57cc1786cfd6bd118220a92ede80270132aa353647684efa385a74244a41e3b1"}, + {file = "scikit_learn-1.5.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9a702e2de732bbb20d3bad29ebd77fc05a6b427dc49964300340e4c9328b3f5"}, + {file = "scikit_learn-1.5.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:b0768ad641981f5d3a198430a1d31c3e044ed2e8a6f22166b4d546a5116d7908"}, + {file = "scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:178ddd0a5cb0044464fc1bfc4cca5b1833bfc7bb022d70b05db8530da4bb3dd3"}, + {file = "scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7284ade780084d94505632241bf78c44ab3b6f1e8ccab3d2af58e0e950f9c12"}, + {file = "scikit_learn-1.5.2-cp313-cp313-win_amd64.whl", hash = "sha256:b7b0f9a0b1040830d38c39b91b3a44e1b643f4b36e36567b80b7c6bd2202a27f"}, + {file = "scikit_learn-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:757c7d514ddb00ae249832fe87100d9c73c6ea91423802872d9e74970a0e40b9"}, + {file = "scikit_learn-1.5.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:52788f48b5d8bca5c0736c175fa6bdaab2ef00a8f536cda698db61bd89c551c1"}, + {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:643964678f4b5fbdc95cbf8aec638acc7aa70f5f79ee2cdad1eec3df4ba6ead8"}, + {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca64b3089a6d9b9363cd3546f8978229dcbb737aceb2c12144ee3f70f95684b7"}, + {file = "scikit_learn-1.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:3bed4909ba187aca80580fe2ef370d9180dcf18e621a27c4cf2ef10d279a7efe"}, + {file = "scikit_learn-1.5.2.tar.gz", hash = "sha256:b4237ed7b3fdd0a4882792e68ef2545d5baa50aca3bb45aa7df468138ad8f94d"}, ] [package.dependencies] -joblib = ">=1.1.1" -numpy = ">=1.17.3,<2.0" -scipy = ">=1.5.0" -threadpoolctl = ">=2.0.0" +joblib = ">=1.2.0" +numpy = ">=1.19.5" +scipy = ">=1.6.0" +threadpoolctl = ">=3.1.0" [package.extras] -benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"] -docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.10.1)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] -examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"] -tests = ["black (>=23.3.0)", "matplotlib (>=3.1.3)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.16.2)"] +benchmark = ["matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "pandas (>=1.1.5)"] +build = ["cython (>=3.0.10)", "meson-python (>=0.16.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.5.0)", "sphinx-design (>=0.6.0)", "sphinx-gallery (>=0.16.0)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)"] +examples = ["matplotlib (>=3.3.4)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)"] +install = ["joblib (>=1.2.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)", "threadpoolctl (>=3.1.0)"] +maintenance = ["conda-lock (==2.5.6)"] +tests = ["black (>=24.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.9)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.2.1)", "scikit-image (>=0.17.2)"] [[package]] name = "scipy" -version = "1.10.1" +version = "1.13.1" description = "Fundamental algorithms for scientific computing in Python" optional = false -python-versions = "<3.12,>=3.8" -files = [ - {file = "scipy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7354fd7527a4b0377ce55f286805b34e8c54b91be865bac273f527e1b839019"}, - {file = "scipy-1.10.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4b3f429188c66603a1a5c549fb414e4d3bdc2a24792e061ffbd607d3d75fd84e"}, - {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1553b5dcddd64ba9a0d95355e63fe6c3fc303a8fd77c7bc91e77d61363f7433f"}, - {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c0ff64b06b10e35215abce517252b375e580a6125fd5fdf6421b98efbefb2d2"}, - {file = "scipy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:fae8a7b898c42dffe3f7361c40d5952b6bf32d10c4569098d276b4c547905ee1"}, - {file = "scipy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f1564ea217e82c1bbe75ddf7285ba0709ecd503f048cb1236ae9995f64217bd"}, - {file = "scipy-1.10.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d925fa1c81b772882aa55bcc10bf88324dadb66ff85d548c71515f6689c6dac5"}, - {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaea0a6be54462ec027de54fca511540980d1e9eea68b2d5c1dbfe084797be35"}, - {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15a35c4242ec5f292c3dd364a7c71a61be87a3d4ddcc693372813c0b73c9af1d"}, - {file = "scipy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:43b8e0bcb877faf0abfb613d51026cd5cc78918e9530e375727bf0625c82788f"}, - {file = "scipy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5678f88c68ea866ed9ebe3a989091088553ba12c6090244fdae3e467b1139c35"}, - {file = "scipy-1.10.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:39becb03541f9e58243f4197584286e339029e8908c46f7221abeea4b749fa88"}, - {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bce5869c8d68cf383ce240e44c1d9ae7c06078a9396df68ce88a1230f93a30c1"}, - {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07c3457ce0b3ad5124f98a86533106b643dd811dd61b548e78cf4c8786652f6f"}, - {file = "scipy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:049a8bbf0ad95277ffba9b3b7d23e5369cc39e66406d60422c8cfef40ccc8415"}, - {file = "scipy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cd9f1027ff30d90618914a64ca9b1a77a431159df0e2a195d8a9e8a04c78abf9"}, - {file = "scipy-1.10.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:79c8e5a6c6ffaf3a2262ef1be1e108a035cf4f05c14df56057b64acc5bebffb6"}, - {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51af417a000d2dbe1ec6c372dfe688e041a7084da4fdd350aeb139bd3fb55353"}, - {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b4735d6c28aad3cdcf52117e0e91d6b39acd4272f3f5cd9907c24ee931ad601"}, - {file = "scipy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ff7f37b1bf4417baca958d254e8e2875d0cc23aaadbe65b3d5b3077b0eb23ea"}, - {file = "scipy-1.10.1.tar.gz", hash = "sha256:2cf9dfb80a7b4589ba4c40ce7588986d6d5cebc5457cad2c2880f6bc2d42f3a5"}, +python-versions = ">=3.9" +files = [ + {file = "scipy-1.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:20335853b85e9a49ff7572ab453794298bcf0354d8068c5f6775a0eabf350aca"}, + {file = "scipy-1.13.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d605e9c23906d1994f55ace80e0125c587f96c020037ea6aa98d01b4bd2e222f"}, + {file = "scipy-1.13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cfa31f1def5c819b19ecc3a8b52d28ffdcc7ed52bb20c9a7589669dd3c250989"}, + {file = "scipy-1.13.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26264b282b9da0952a024ae34710c2aff7d27480ee91a2e82b7b7073c24722f"}, + {file = "scipy-1.13.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:eccfa1906eacc02de42d70ef4aecea45415f5be17e72b61bafcfd329bdc52e94"}, + {file = "scipy-1.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:2831f0dc9c5ea9edd6e51e6e769b655f08ec6db6e2e10f86ef39bd32eb11da54"}, + {file = "scipy-1.13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:27e52b09c0d3a1d5b63e1105f24177e544a222b43611aaf5bc44d4a0979e32f9"}, + {file = "scipy-1.13.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:54f430b00f0133e2224c3ba42b805bfd0086fe488835effa33fa291561932326"}, + {file = "scipy-1.13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e89369d27f9e7b0884ae559a3a956e77c02114cc60a6058b4e5011572eea9299"}, + {file = "scipy-1.13.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a78b4b3345f1b6f68a763c6e25c0c9a23a9fd0f39f5f3d200efe8feda560a5fa"}, + {file = "scipy-1.13.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:45484bee6d65633752c490404513b9ef02475b4284c4cfab0ef946def50b3f59"}, + {file = "scipy-1.13.1-cp311-cp311-win_amd64.whl", hash = "sha256:5713f62f781eebd8d597eb3f88b8bf9274e79eeabf63afb4a737abc6c84ad37b"}, + {file = "scipy-1.13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5d72782f39716b2b3509cd7c33cdc08c96f2f4d2b06d51e52fb45a19ca0c86a1"}, + {file = "scipy-1.13.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:017367484ce5498445aade74b1d5ab377acdc65e27095155e448c88497755a5d"}, + {file = "scipy-1.13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:949ae67db5fa78a86e8fa644b9a6b07252f449dcf74247108c50e1d20d2b4627"}, + {file = "scipy-1.13.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de3ade0e53bc1f21358aa74ff4830235d716211d7d077e340c7349bc3542e884"}, + {file = "scipy-1.13.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2ac65fb503dad64218c228e2dc2d0a0193f7904747db43014645ae139c8fad16"}, + {file = "scipy-1.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:cdd7dacfb95fea358916410ec61bbc20440f7860333aee6d882bb8046264e949"}, + {file = "scipy-1.13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:436bbb42a94a8aeef855d755ce5a465479c721e9d684de76bf61a62e7c2b81d5"}, + {file = "scipy-1.13.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:8335549ebbca860c52bf3d02f80784e91a004b71b059e3eea9678ba994796a24"}, + {file = "scipy-1.13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d533654b7d221a6a97304ab63c41c96473ff04459e404b83275b60aa8f4b7004"}, + {file = "scipy-1.13.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:637e98dcf185ba7f8e663e122ebf908c4702420477ae52a04f9908707456ba4d"}, + {file = "scipy-1.13.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a014c2b3697bde71724244f63de2476925596c24285c7a637364761f8710891c"}, + {file = "scipy-1.13.1-cp39-cp39-win_amd64.whl", hash = "sha256:392e4ec766654852c25ebad4f64e4e584cf19820b980bc04960bca0b0cd6eaa2"}, + {file = "scipy-1.13.1.tar.gz", hash = "sha256:095a87a0312b08dfd6a6155cbbd310a8c51800fc931b8c0b84003014b874ed3c"}, ] [package.dependencies] -numpy = ">=1.19.5,<1.27.0" +numpy = ">=1.22.4,<2.3" [package.extras] -dev = ["click", "doit (>=0.36.0)", "flake8", "mypy", "pycodestyle", "pydevtool", "rich-click", "typing_extensions"] -doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] -test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] +dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"] +doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.12.0)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0)", "sphinx-design (>=0.4.0)"] +test = ["array-api-strict", "asv", "gmpy2", "hypothesis (>=6.30)", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] [[package]] name = "sentry-sdk" -version = "2.13.0" +version = "2.16.0" description = "Python client for Sentry (https://sentry.io)" optional = false python-versions = ">=3.6" files = [ - {file = "sentry_sdk-2.13.0-py2.py3-none-any.whl", hash = "sha256:6beede8fc2ab4043da7f69d95534e320944690680dd9a963178a49de71d726c6"}, - {file = "sentry_sdk-2.13.0.tar.gz", hash = "sha256:8d4a576f7a98eb2fdb40e13106e41f330e5c79d72a68be1316e7852cf4995260"}, + {file = "sentry_sdk-2.16.0-py2.py3-none-any.whl", hash = "sha256:49139c31ebcd398f4f6396b18910610a0c1602f6e67083240c33019d1f6aa30c"}, + {file = "sentry_sdk-2.16.0.tar.gz", hash = "sha256:90f733b32e15dfc1999e6b7aca67a38688a567329de4d6e184154a73f96c6892"}, ] [package.dependencies] @@ -3608,6 +3631,7 @@ falcon = ["falcon (>=1.4)"] fastapi = ["fastapi (>=0.79.0)"] flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"] grpcio = ["grpcio (>=1.21.1)", "protobuf (>=3.8.0)"] +http2 = ["httpcore[http2] (==1.*)"] httpx = ["httpx (>=0.16.0)"] huey = ["huey (>=2)"] huggingface-hub = ["huggingface-hub (>=0.22)"] @@ -3730,18 +3754,18 @@ test = ["pytest"] [[package]] name = "setuptools" -version = "74.1.0" +version = "75.2.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-74.1.0-py3-none-any.whl", hash = "sha256:cee604bd76cc092355a4e43ec17aee5369095974f41f088676724dc6bc2c9ef8"}, - {file = "setuptools-74.1.0.tar.gz", hash = "sha256:bea195a800f510ba3a2bc65645c88b7e016fe36709fefc58a880c4ae8a0138d7"}, + {file = "setuptools-75.2.0-py3-none-any.whl", hash = "sha256:a7fcb66f68b4d9e8e66b42f9876150a3371558f98fa32222ffaa5bced76406f8"}, + {file = "setuptools-75.2.0.tar.gz", hash = "sha256:753bb6ebf1f465a1912e19ed1d41f403a79173a9acf66a42e7e6aec45c3c16ec"}, ] [package.extras] check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"] -core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.text (>=3.7)", "more-itertools (>=8.8)", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] @@ -3812,60 +3836,68 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.32" +version = "2.0.36" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.32-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0c9045ecc2e4db59bfc97b20516dfdf8e41d910ac6fb667ebd3a79ea54084619"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1467940318e4a860afd546ef61fefb98a14d935cd6817ed07a228c7f7c62f389"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5954463675cb15db8d4b521f3566a017c8789222b8316b1e6934c811018ee08b"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:167e7497035c303ae50651b351c28dc22a40bb98fbdb8468cdc971821b1ae533"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b27dfb676ac02529fb6e343b3a482303f16e6bc3a4d868b73935b8792edb52d0"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bf2360a5e0f7bd75fa80431bf8ebcfb920c9f885e7956c7efde89031695cafb8"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-win32.whl", hash = "sha256:306fe44e754a91cd9d600a6b070c1f2fadbb4a1a257b8781ccf33c7067fd3e4d"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-win_amd64.whl", hash = "sha256:99db65e6f3ab42e06c318f15c98f59a436f1c78179e6a6f40f529c8cc7100b22"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:21b053be28a8a414f2ddd401f1be8361e41032d2ef5884b2f31d31cb723e559f"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b178e875a7a25b5938b53b006598ee7645172fccafe1c291a706e93f48499ff5"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723a40ee2cc7ea653645bd4cf024326dea2076673fc9d3d33f20f6c81db83e1d"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:295ff8689544f7ee7e819529633d058bd458c1fd7f7e3eebd0f9268ebc56c2a0"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:49496b68cd190a147118af585173ee624114dfb2e0297558c460ad7495f9dfe2"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:acd9b73c5c15f0ec5ce18128b1fe9157ddd0044abc373e6ecd5ba376a7e5d961"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-win32.whl", hash = "sha256:9365a3da32dabd3e69e06b972b1ffb0c89668994c7e8e75ce21d3e5e69ddef28"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-win_amd64.whl", hash = "sha256:8bd63d051f4f313b102a2af1cbc8b80f061bf78f3d5bd0843ff70b5859e27924"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6bab3db192a0c35e3c9d1560eb8332463e29e5507dbd822e29a0a3c48c0a8d92"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:19d98f4f58b13900d8dec4ed09dd09ef292208ee44cc9c2fe01c1f0a2fe440e9"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd33c61513cb1b7371fd40cf221256456d26a56284e7d19d1f0b9f1eb7dd7e8"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d6ba0497c1d066dd004e0f02a92426ca2df20fac08728d03f67f6960271feec"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2b6be53e4fde0065524f1a0a7929b10e9280987b320716c1509478b712a7688c"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:916a798f62f410c0b80b63683c8061f5ebe237b0f4ad778739304253353bc1cb"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-win32.whl", hash = "sha256:31983018b74908ebc6c996a16ad3690301a23befb643093fcfe85efd292e384d"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-win_amd64.whl", hash = "sha256:4363ed245a6231f2e2957cccdda3c776265a75851f4753c60f3004b90e69bfeb"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b8afd5b26570bf41c35c0121801479958b4446751a3971fb9a480c1afd85558e"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c750987fc876813f27b60d619b987b057eb4896b81117f73bb8d9918c14f1cad"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ada0102afff4890f651ed91120c1120065663506b760da4e7823913ebd3258be"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:78c03d0f8a5ab4f3034c0e8482cfcc415a3ec6193491cfa1c643ed707d476f16"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:3bd1cae7519283ff525e64645ebd7a3e0283f3c038f461ecc1c7b040a0c932a1"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-win32.whl", hash = "sha256:01438ebcdc566d58c93af0171c74ec28efe6a29184b773e378a385e6215389da"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-win_amd64.whl", hash = "sha256:4979dc80fbbc9d2ef569e71e0896990bc94df2b9fdbd878290bd129b65ab579c"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c742be912f57586ac43af38b3848f7688863a403dfb220193a882ea60e1ec3a"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:62e23d0ac103bcf1c5555b6c88c114089587bc64d048fef5bbdb58dfd26f96da"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:251f0d1108aab8ea7b9aadbd07fb47fb8e3a5838dde34aa95a3349876b5a1f1d"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ef18a84e5116340e38eca3e7f9eeaaef62738891422e7c2a0b80feab165905f"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3eb6a97a1d39976f360b10ff208c73afb6a4de86dd2a6212ddf65c4a6a2347d5"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0c1c9b673d21477cec17ab10bc4decb1322843ba35b481585facd88203754fc5"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-win32.whl", hash = "sha256:c41a2b9ca80ee555decc605bd3c4520cc6fef9abde8fd66b1cf65126a6922d65"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-win_amd64.whl", hash = "sha256:8a37e4d265033c897892279e8adf505c8b6b4075f2b40d77afb31f7185cd6ecd"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:52fec964fba2ef46476312a03ec8c425956b05c20220a1a03703537824b5e8e1"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:328429aecaba2aee3d71e11f2477c14eec5990fb6d0e884107935f7fb6001632"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85a01b5599e790e76ac3fe3aa2f26e1feba56270023d6afd5550ed63c68552b3"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaf04784797dcdf4c0aa952c8d234fa01974c4729db55c45732520ce12dd95b4"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4488120becf9b71b3ac718f4138269a6be99a42fe023ec457896ba4f80749525"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:14e09e083a5796d513918a66f3d6aedbc131e39e80875afe81d98a03312889e6"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-win32.whl", hash = "sha256:0d322cc9c9b2154ba7e82f7bf25ecc7c36fbe2d82e2933b3642fc095a52cfc78"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-win_amd64.whl", hash = "sha256:7dd8583df2f98dea28b5cd53a1beac963f4f9d087888d75f22fcc93a07cf8d84"}, - {file = "SQLAlchemy-2.0.32-py3-none-any.whl", hash = "sha256:e567a8793a692451f706b363ccf3c45e056b67d90ead58c3bc9471af5d212202"}, - {file = "SQLAlchemy-2.0.32.tar.gz", hash = "sha256:c1b88cc8b02b6a5f0efb0345a03672d4c897dc7d92585176f88c67346f565ea8"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59b8f3adb3971929a3e660337f5dacc5942c2cdb760afcabb2614ffbda9f9f72"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37350015056a553e442ff672c2d20e6f4b6d0b2495691fa239d8aa18bb3bc908"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8318f4776c85abc3f40ab185e388bee7a6ea99e7fa3a30686580b209eaa35c08"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c245b1fbade9c35e5bd3b64270ab49ce990369018289ecfde3f9c318411aaa07"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69f93723edbca7342624d09f6704e7126b152eaed3cdbb634cb657a54332a3c5"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f9511d8dd4a6e9271d07d150fb2f81874a3c8c95e11ff9af3a2dfc35fe42ee44"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-win32.whl", hash = "sha256:c3f3631693003d8e585d4200730616b78fafd5a01ef8b698f6967da5c605b3fa"}, + {file = "SQLAlchemy-2.0.36-cp310-cp310-win_amd64.whl", hash = "sha256:a86bfab2ef46d63300c0f06936bd6e6c0105faa11d509083ba8f2f9d237fb5b5"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fd3a55deef00f689ce931d4d1b23fa9f04c880a48ee97af488fd215cf24e2a6c"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f5e9cd989b45b73bd359f693b935364f7e1f79486e29015813c338450aa5a71"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-win32.whl", hash = "sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-win_amd64.whl", hash = "sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7b64e6ec3f02c35647be6b4851008b26cff592a95ecb13b6788a54ef80bbdd4"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46331b00096a6db1fdc052d55b101dbbfc99155a548e20a0e4a8e5e4d1362855"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:be9812b766cad94a25bc63bec11f88c4ad3629a0cec1cd5d4ba48dc23860486b"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aae840ebbd6cdd41af1c14590e5741665e5272d2fee999306673a1bb1fdb4d"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4557e1f11c5f653ebfdd924f3f9d5ebfc718283b0b9beebaa5dd6b77ec290971"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07b441f7d03b9a66299ce7ccf3ef2900abc81c0db434f42a5694a37bd73870f2"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:28120ef39c92c2dd60f2721af9328479516844c6b550b077ca450c7d7dc68575"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-win32.whl", hash = "sha256:b81ee3d84803fd42d0b154cb6892ae57ea6b7c55d8359a02379965706c7efe6c"}, + {file = "SQLAlchemy-2.0.36-cp37-cp37m-win_amd64.whl", hash = "sha256:f942a799516184c855e1a32fbc7b29d7e571b52612647866d4ec1c3242578fcb"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3d6718667da04294d7df1670d70eeddd414f313738d20a6f1d1f379e3139a545"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:72c28b84b174ce8af8504ca28ae9347d317f9dba3999e5981a3cd441f3712e24"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b11d0cfdd2b095e7b0686cf5fabeb9c67fae5b06d265d8180715b8cfa86522e3"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e32092c47011d113dc01ab3e1d3ce9f006a47223b18422c5c0d150af13a00687"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6a440293d802d3011028e14e4226da1434b373cbaf4a4bbb63f845761a708346"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c54a1e53a0c308a8e8a7dffb59097bff7facda27c70c286f005327f21b2bd6b1"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-win32.whl", hash = "sha256:1e0d612a17581b6616ff03c8e3d5eff7452f34655c901f75d62bd86449d9750e"}, + {file = "SQLAlchemy-2.0.36-cp38-cp38-win_amd64.whl", hash = "sha256:8958b10490125124463095bbdadda5aa22ec799f91958e410438ad6c97a7b793"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dc022184d3e5cacc9579e41805a681187650e170eb2fd70e28b86192a479dcaa"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b817d41d692bf286abc181f8af476c4fbef3fd05e798777492618378448ee689"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4e46a888b54be23d03a89be510f24a7652fe6ff660787b96cd0e57a4ebcb46d"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4ae3005ed83f5967f961fd091f2f8c5329161f69ce8480aa8168b2d7fe37f06"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03e08af7a5f9386a43919eda9de33ffda16b44eb11f3b313e6822243770e9763"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3dbb986bad3ed5ceaf090200eba750b5245150bd97d3e67343a3cfed06feecf7"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-win32.whl", hash = "sha256:9fe53b404f24789b5ea9003fc25b9a3988feddebd7e7b369c8fac27ad6f52f28"}, + {file = "SQLAlchemy-2.0.36-cp39-cp39-win_amd64.whl", hash = "sha256:af148a33ff0349f53512a049c6406923e4e02bf2f26c5fb285f143faf4f0e46a"}, + {file = "SQLAlchemy-2.0.36-py3-none-any.whl", hash = "sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e"}, + {file = "sqlalchemy-2.0.36.tar.gz", hash = "sha256:7f2767680b6d2398aea7082e45a774b2b0767b5c8d8ffb9c8b683088ea9b29c5"}, ] [package.dependencies] @@ -3878,7 +3910,7 @@ aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] asyncio = ["greenlet (!=0.4.17)"] asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] -mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10)"] mssql = ["pyodbc"] mssql-pymssql = ["pymssql"] mssql-pyodbc = ["pyodbc"] @@ -3914,13 +3946,13 @@ doc = ["sphinx"] [[package]] name = "sympy" -version = "1.13.2" +version = "1.13.3" description = "Computer algebra system (CAS) in Python" optional = false python-versions = ">=3.8" files = [ - {file = "sympy-1.13.2-py3-none-any.whl", hash = "sha256:c51d75517712f1aed280d4ce58506a4a88d635d6b5dd48b39102a7ae1f3fcfe9"}, - {file = "sympy-1.13.2.tar.gz", hash = "sha256:401449d84d07be9d0c7a46a64bd54fe097667d5e7181bfe67ec777be9e01cb13"}, + {file = "sympy-1.13.3-py3-none-any.whl", hash = "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73"}, + {file = "sympy-1.13.3.tar.gz", hash = "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9"}, ] [package.dependencies] @@ -3931,27 +3963,25 @@ dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"] [[package]] name = "tensorboard" -version = "2.14.0" +version = "2.18.0" description = "TensorBoard lets you watch Tensors Flow" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "tensorboard-2.14.0-py3-none-any.whl", hash = "sha256:3667f9745d99280836ad673022362c840f60ed8fefd5a3e30bf071f5a8fd0017"}, + {file = "tensorboard-2.18.0-py3-none-any.whl", hash = "sha256:107ca4821745f73e2aefa02c50ff70a9b694f39f790b11e6f682f7d326745eab"}, ] [package.dependencies] absl-py = ">=0.4" -google-auth = ">=1.6.3,<3" -google-auth-oauthlib = ">=0.5,<1.1" grpcio = ">=1.48.2" markdown = ">=2.6.8" numpy = ">=1.12.0" -protobuf = ">=3.19.6" -requests = ">=2.21.0,<3" +packaging = "*" +protobuf = ">=3.19.6,<4.24.0 || >4.24.0" setuptools = ">=41.0.0" +six = ">1.9" tensorboard-data-server = ">=0.7.0,<0.8.0" werkzeug = ">=1.0.1" -wheel = ">=0.26" [[package]] name = "tensorboard-data-server" @@ -3978,20 +4008,25 @@ files = [ [[package]] name = "tifffile" -version = "2023.7.10" +version = "2024.8.30" description = "Read and write TIFF files" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "tifffile-2023.7.10-py3-none-any.whl", hash = "sha256:94dfdec321ace96abbfe872a66cfd824800c099a2db558443453eebc2c11b304"}, - {file = "tifffile-2023.7.10.tar.gz", hash = "sha256:c06ec460926d16796eeee249a560bcdddf243daae36ac62af3c84a953cd60b4a"}, + {file = "tifffile-2024.8.30-py3-none-any.whl", hash = "sha256:8bc59a8f02a2665cd50a910ec64961c5373bee0b8850ec89d3b7b485bf7be7ad"}, + {file = "tifffile-2024.8.30.tar.gz", hash = "sha256:2c9508fe768962e30f87def61819183fb07692c258cb175b3c114828368485a4"}, ] [package.dependencies] numpy = "*" [package.extras] -all = ["defusedxml", "fsspec", "imagecodecs (>=2023.1.23)", "lxml", "matplotlib", "zarr"] +all = ["defusedxml", "fsspec", "imagecodecs (>=2023.8.12)", "lxml", "matplotlib", "zarr"] +codecs = ["imagecodecs (>=2023.8.12)"] +plot = ["matplotlib"] +test = ["cmapfile", "czifile", "dask", "defusedxml", "fsspec", "imagecodecs", "lfdfiles", "lxml", "ndtiff", "oiffile", "psdtags", "pytest", "roifile", "xarray", "zarr"] +xml = ["defusedxml", "lxml"] +zarr = ["fsspec", "zarr"] [[package]] name = "toml" @@ -4006,42 +4041,42 @@ files = [ [[package]] name = "tomli" -version = "2.0.1" +version = "2.0.2" description = "A lil' TOML parser" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, + {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, + {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, ] [[package]] name = "torch" -version = "2.4.0" +version = "2.4.1" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" optional = false python-versions = ">=3.8.0" files = [ - {file = "torch-2.4.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:4ed94583e244af51d6a8d28701ca5a9e02d1219e782f5a01dd401f90af17d8ac"}, - {file = "torch-2.4.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:c4ca297b7bd58b506bfd6e78ffd14eb97c0e7797dcd7965df62f50bb575d8954"}, - {file = "torch-2.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:2497cbc7b3c951d69b276ca51fe01c2865db67040ac67f5fc20b03e41d16ea4a"}, - {file = "torch-2.4.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:685418ab93730efbee71528821ff54005596970dd497bf03c89204fb7e3f71de"}, - {file = "torch-2.4.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:e743adadd8c8152bb8373543964551a7cb7cc20ba898dc8f9c0cdbe47c283de0"}, - {file = "torch-2.4.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:7334325c0292cbd5c2eac085f449bf57d3690932eac37027e193ba775703c9e6"}, - {file = "torch-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:97730014da4c57ffacb3c09298c6ce05400606e890bd7a05008d13dd086e46b1"}, - {file = "torch-2.4.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:f169b4ea6dc93b3a33319611fcc47dc1406e4dd539844dcbd2dec4c1b96e166d"}, - {file = "torch-2.4.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:997084a0f9784d2a89095a6dc67c7925e21bf25dea0b3d069b41195016ccfcbb"}, - {file = "torch-2.4.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:bc3988e8b36d1e8b998d143255d9408d8c75da4ab6dd0dcfd23b623dfb0f0f57"}, - {file = "torch-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:3374128bbf7e62cdaed6c237bfd39809fbcfaa576bee91e904706840c3f2195c"}, - {file = "torch-2.4.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:91aaf00bfe1ffa44dc5b52809d9a95129fca10212eca3ac26420eb11727c6288"}, - {file = "torch-2.4.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:cc30457ea5489c62747d3306438af00c606b509d78822a88f804202ba63111ed"}, - {file = "torch-2.4.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:a046491aaf96d1215e65e1fa85911ef2ded6d49ea34c8df4d0638879f2402eef"}, - {file = "torch-2.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:688eec9240f3ce775f22e1e1a5ab9894f3d5fe60f3f586deb7dbd23a46a83916"}, - {file = "torch-2.4.0-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:3af4de2a618fb065e78404c4ba27a818a7b7957eaeff28c6c66ce7fb504b68b8"}, - {file = "torch-2.4.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:618808d3f610d5f180e47a697d4ec90b810953bb1e020f424b2ac7fb0884b545"}, - {file = "torch-2.4.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:ed765d232d23566052ba83632ec73a4fccde00b4c94ad45d63b471b09d63b7a7"}, - {file = "torch-2.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:a2feb98ac470109472fb10dfef38622a7ee08482a16c357863ebc7bc7db7c8f7"}, - {file = "torch-2.4.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:8940fc8b97a4c61fdb5d46a368f21f4a3a562a17879e932eb51a5ec62310cb31"}, + {file = "torch-2.4.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:362f82e23a4cd46341daabb76fba08f04cd646df9bfaf5da50af97cb60ca4971"}, + {file = "torch-2.4.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:e8ac1985c3ff0f60d85b991954cfc2cc25f79c84545aead422763148ed2759e3"}, + {file = "torch-2.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:91e326e2ccfb1496e3bee58f70ef605aeb27bd26be07ba64f37dcaac3d070ada"}, + {file = "torch-2.4.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:d36a8ef100f5bff3e9c3cea934b9e0d7ea277cb8210c7152d34a9a6c5830eadd"}, + {file = "torch-2.4.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:0b5f88afdfa05a335d80351e3cea57d38e578c8689f751d35e0ff36bce872113"}, + {file = "torch-2.4.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:ef503165f2341942bfdf2bd520152f19540d0c0e34961232f134dc59ad435be8"}, + {file = "torch-2.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:092e7c2280c860eff762ac08c4bdcd53d701677851670695e0c22d6d345b269c"}, + {file = "torch-2.4.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:ddddbd8b066e743934a4200b3d54267a46db02106876d21cf31f7da7a96f98ea"}, + {file = "torch-2.4.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:fdc4fe11db3eb93c1115d3e973a27ac7c1a8318af8934ffa36b0370efe28e042"}, + {file = "torch-2.4.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:18835374f599207a9e82c262153c20ddf42ea49bc76b6eadad8e5f49729f6e4d"}, + {file = "torch-2.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:ebea70ff30544fc021d441ce6b219a88b67524f01170b1c538d7d3ebb5e7f56c"}, + {file = "torch-2.4.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:72b484d5b6cec1a735bf3fa5a1c4883d01748698c5e9cfdbeb4ffab7c7987e0d"}, + {file = "torch-2.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:c99e1db4bf0c5347107845d715b4aa1097e601bdc36343d758963055e9599d93"}, + {file = "torch-2.4.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:b57f07e92858db78c5b72857b4f0b33a65b00dc5d68e7948a8494b0314efb880"}, + {file = "torch-2.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:f18197f3f7c15cde2115892b64f17c80dbf01ed72b008020e7da339902742cf6"}, + {file = "torch-2.4.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:5fc1d4d7ed265ef853579caf272686d1ed87cebdcd04f2a498f800ffc53dab71"}, + {file = "torch-2.4.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:40f6d3fe3bae74efcf08cb7f8295eaddd8a838ce89e9d26929d4edd6d5e4329d"}, + {file = "torch-2.4.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:c9299c16c9743001ecef515536ac45900247f4338ecdf70746f2461f9e4831db"}, + {file = "torch-2.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:6bce130f2cd2d52ba4e2c6ada461808de7e5eccbac692525337cfb4c19421846"}, + {file = "torch-2.4.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:a38de2803ee6050309aac032676536c3d3b6a9804248537e38e098d0e14817ec"}, ] [package.dependencies] @@ -4060,6 +4095,7 @@ nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \" nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-nccl-cu12 = {version = "2.20.5", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +setuptools = "*" sympy = "*" triton = {version = "3.0.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.13\""} typing-extensions = ">=4.8.0" @@ -4099,42 +4135,37 @@ plot = ["matplotlib"] [[package]] name = "torchvision" -version = "0.19.0" +version = "0.19.1" description = "image and video datasets and models for torch deep learning" optional = false python-versions = ">=3.8" files = [ - {file = "torchvision-0.19.0-1-cp310-cp310-win_amd64.whl", hash = "sha256:6ed066aae5c50465d7c4761357aefe5dbd2eb7075a33ab8c14b352fc2353ad4c"}, - {file = "torchvision-0.19.0-1-cp311-cp311-win_amd64.whl", hash = "sha256:6b1bce2e4c003d890a18f14ff289528707d918e38539ff890ef02aa31dae1b56"}, - {file = "torchvision-0.19.0-1-cp312-cp312-win_amd64.whl", hash = "sha256:13aee7a46e049c8c1e7d35a0394b0587a7e62ff3d1a822cd2bbbacb675ac4a09"}, - {file = "torchvision-0.19.0-1-cp38-cp38-win_amd64.whl", hash = "sha256:2acc436d043d4f81b3bc6929cbfa4ef1cdae4d8a0b04ec72ec30a497e9a38179"}, - {file = "torchvision-0.19.0-1-cp39-cp39-win_amd64.whl", hash = "sha256:b5f70f5a8bd9c8b00a076bf466b39b5cd679ef62587c47cc048adb04d9c5f155"}, - {file = "torchvision-0.19.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ec874ef85dcb24c69e600f6e276af892c80cde3ffdaeb7275efda463242bc2a8"}, - {file = "torchvision-0.19.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:106842b1e475b14d9a04ee0d6f5477d43100e3bb78e9d31e37422384d0d84179"}, - {file = "torchvision-0.19.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d467d434005fd05a227a2ba7af4c591bb67e6d4a97bbd06eda8da83f43e9fd07"}, - {file = "torchvision-0.19.0-cp310-cp310-win_amd64.whl", hash = "sha256:f77ac31f7337d0f6f4b58e65582c6c93b9d9eeec7dfd7478896b5cdc19a2d60d"}, - {file = "torchvision-0.19.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dbf3aa71a3899244fc884303ed3c4604a160824fefac77e82317a5463efc1d9b"}, - {file = "torchvision-0.19.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:ec4162dc71d9db7f0b51d0f92491929c1419605ff436e1305e50de13504a1c30"}, - {file = "torchvision-0.19.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:4e6aa4fa3f0bc3599fa071c149e651a3e6bdd67c9161794478f9f91471c406a2"}, - {file = "torchvision-0.19.0-cp311-cp311-win_amd64.whl", hash = "sha256:ac5525d5cc09e425b5cf5752ecf66eefbbbd8c8cd945198ce35eb01a694e6069"}, - {file = "torchvision-0.19.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c09ef8ed184fa877f6251b620226e74f682b8f1d6b341456428d4955b8d9c670"}, - {file = "torchvision-0.19.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:02f1dd5cfc897957535b41b0258ec452d30de044e20c2de2c75869f7708e7656"}, - {file = "torchvision-0.19.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:be0f27a28b8e9f2ae98a31af34a4bdd2a5bf154d92bd73a5797c8d2156fb3ab6"}, - {file = "torchvision-0.19.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6ba7756f75c80212e51d3576f85ea204589e0c16efdb9b835dd677bc8929a67"}, - {file = "torchvision-0.19.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:854e967a16a9409e941b5bbe5aa357b23f7158bccb9de35ae20fd4945f05ecd1"}, - {file = "torchvision-0.19.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:d9afb8a3c3ce99a161a64c2a3b91cb545632a72118053cbfb84e87a02a8dcd02"}, - {file = "torchvision-0.19.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:079a696e0b2cb52e4be30afa8e9b3d7d280f02a2b5ffedd7e821fa1efd1a5a8d"}, - {file = "torchvision-0.19.0-cp38-cp38-win_amd64.whl", hash = "sha256:aaa338ff3a55a8c0f94e0e64eff6fe2af1fc933a95fd43812760e72ea66e986b"}, - {file = "torchvision-0.19.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd1279571d4b68d5a53d9b7a35aedf91c4cb1e0b08099f6a1effa7b25b8c95e7"}, - {file = "torchvision-0.19.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:4d54b5e19b7ebebca7d0b08497b4c6335264cad04c94c05fa35988d9e9eed0c4"}, - {file = "torchvision-0.19.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:5f9a598dcf82bdfc8e4436ce74763b3877dabec3b33f94613b94ede13e3e4dee"}, - {file = "torchvision-0.19.0-cp39-cp39-win_amd64.whl", hash = "sha256:ec1281c10402234d470bfd4d53663d81f4364f293b2f8fe24d4a7a1adc78c90c"}, + {file = "torchvision-0.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:54e8513099e6f586356c70f809d34f391af71ad182fe071cc328a28af2c40608"}, + {file = "torchvision-0.19.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:20a1f5e02bfdad7714e55fa3fa698347c11d829fa65e11e5a84df07d93350eed"}, + {file = "torchvision-0.19.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:7b063116164be52fc6deb4762de7f8c90bfa3a65f8d5caf17f8e2d5aadc75a04"}, + {file = "torchvision-0.19.1-cp310-cp310-win_amd64.whl", hash = "sha256:f40b6acabfa886da1bc3768f47679c61feee6bde90deb979d9f300df8c8a0145"}, + {file = "torchvision-0.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:40514282b4896d62765b8e26d7091c32e17c35817d00ec4be2362ea3ba3d1787"}, + {file = "torchvision-0.19.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:5a91be061ae5d6d5b95e833b93e57ca4d3c56c5a57444dd15da2e3e7fba96050"}, + {file = "torchvision-0.19.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:d71a6a6fe3a5281ca3487d4c56ad4aad20ff70f82f1d7c79bcb6e7b0c2af00c8"}, + {file = "torchvision-0.19.1-cp311-cp311-win_amd64.whl", hash = "sha256:70dea324174f5e9981b68e4b7cd524512c106ba64aedef560a86a0bbf2fbf62c"}, + {file = "torchvision-0.19.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:27ece277ff0f6cdc7fed0627279c632dcb2e58187da771eca24b0fbcf3f8590d"}, + {file = "torchvision-0.19.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:c659ff92a61f188a1a7baef2850f3c0b6c85685447453c03d0e645ba8f1dcc1c"}, + {file = "torchvision-0.19.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:c07bf43c2a145d792ecd9d0503d6c73577147ece508d45600d8aac77e4cdfcf9"}, + {file = "torchvision-0.19.1-cp312-cp312-win_amd64.whl", hash = "sha256:b4283d283675556bb0eae31d29996f53861b17cbdcdf3509e6bc050414ac9289"}, + {file = "torchvision-0.19.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c4e4f5b24ea6b087b02ed492ab1e21bba3352c4577e2def14248cfc60732338"}, + {file = "torchvision-0.19.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:9281d63ead929bb19143731154cd1d8bf0b5e9873dff8578a40e90a6bec3c6fa"}, + {file = "torchvision-0.19.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:4d10bc9083c4d5fadd7edd7b729700a7be48dab4f62278df3bc73fa48e48a155"}, + {file = "torchvision-0.19.1-cp38-cp38-win_amd64.whl", hash = "sha256:ccf085ef1824fb9e16f1901285bf89c298c62dfd93267a39e8ee42c71255242f"}, + {file = "torchvision-0.19.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:731f434d91586769e255b5d70ed1a4457e0a1394a95f4aacf0e1e7e21f80c098"}, + {file = "torchvision-0.19.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:febe4f14d4afcb47cc861d8be7760ab6a123cd0817f97faf5771488cb6aa90f4"}, + {file = "torchvision-0.19.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:e328309b8670a2e889b2fe76a1c2744a099c11c984da9a822357bd9debd699a5"}, + {file = "torchvision-0.19.1-cp39-cp39-win_amd64.whl", hash = "sha256:6616f12e00a22e7f3fedbd0fccb0804c05e8fe22871668f10eae65cf3f283614"}, ] [package.dependencies] numpy = "*" pillow = ">=5.3.0,<8.3.dev0 || >=8.4.dev0" -torch = "2.4.0" +torch = "2.4.1" [package.extras] gdown = ["gdown (>=4.7.3)"] @@ -4201,13 +4232,13 @@ typing-extensions = ">=3.7.4.3" [[package]] name = "types-python-dateutil" -version = "2.9.0.20240821" +version = "2.9.0.20241003" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" files = [ - {file = "types-python-dateutil-2.9.0.20240821.tar.gz", hash = "sha256:9649d1dcb6fef1046fb18bebe9ea2aa0028b160918518c34589a46045f6ebd98"}, - {file = "types_python_dateutil-2.9.0.20240821-py3-none-any.whl", hash = "sha256:f5889fcb4e63ed4aaa379b44f93c32593d50b9a94c9a60a0c854d8cc3511cd57"}, + {file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"}, + {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, ] [[package]] @@ -4221,6 +4252,17 @@ files = [ {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] +[[package]] +name = "tzdata" +version = "2024.2" +description = "Provider of IANA time zone data" +optional = false +python-versions = ">=2" +files = [ + {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, + {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, +] + [[package]] name = "urllib3" version = "1.26.20" @@ -4239,13 +4281,13 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "virtualenv" -version = "20.26.3" +version = "20.26.6" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" files = [ - {file = "virtualenv-20.26.3-py3-none-any.whl", hash = "sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589"}, - {file = "virtualenv-20.26.3.tar.gz", hash = "sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a"}, + {file = "virtualenv-20.26.6-py3-none-any.whl", hash = "sha256:7345cc5b25405607a624d8418154577459c3e0277f5466dd79c49d5e492995f2"}, + {file = "virtualenv-20.26.6.tar.gz", hash = "sha256:280aede09a2a5c317e409a00102e7077c6432c5a38f0ef938e643805a7ad2c48"}, ] [package.dependencies] @@ -4290,7 +4332,6 @@ docker-pycreds = ">=0.4.0" GitPython = ">=1.0.0,<3.1.29 || >3.1.29" pathtools = "*" protobuf = [ - {version = ">=3.12.0,<4.21.0 || >4.21.0,<5", markers = "python_version < \"3.9\" and sys_platform == \"linux\""}, {version = ">=3.15.0,<4.21.0 || >4.21.0,<5", markers = "python_version == \"3.9\" and sys_platform == \"linux\""}, {version = ">=3.19.0,<4.21.0 || >4.21.0,<5", markers = "python_version > \"3.9\" or sys_platform != \"linux\""}, ] @@ -4317,46 +4358,41 @@ sweeps = ["sweeps (>=0.2.0)"] [[package]] name = "watchdog" -version = "4.0.2" +version = "5.0.3" description = "Filesystem events monitoring" optional = false -python-versions = ">=3.8" -files = [ - {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ede7f010f2239b97cc79e6cb3c249e72962404ae3865860855d5cbe708b0fd22"}, - {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a2cffa171445b0efa0726c561eca9a27d00a1f2b83846dbd5a4f639c4f8ca8e1"}, - {file = "watchdog-4.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c50f148b31b03fbadd6d0b5980e38b558046b127dc483e5e4505fcef250f9503"}, - {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7c7d4bf585ad501c5f6c980e7be9c4f15604c7cc150e942d82083b31a7548930"}, - {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:914285126ad0b6eb2258bbbcb7b288d9dfd655ae88fa28945be05a7b475a800b"}, - {file = "watchdog-4.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:984306dc4720da5498b16fc037b36ac443816125a3705dfde4fd90652d8028ef"}, - {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1cdcfd8142f604630deef34722d695fb455d04ab7cfe9963055df1fc69e6727a"}, - {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7ab624ff2f663f98cd03c8b7eedc09375a911794dfea6bf2a359fcc266bff29"}, - {file = "watchdog-4.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:132937547a716027bd5714383dfc40dc66c26769f1ce8a72a859d6a48f371f3a"}, - {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:cd67c7df93eb58f360c43802acc945fa8da70c675b6fa37a241e17ca698ca49b"}, - {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcfd02377be80ef3b6bc4ce481ef3959640458d6feaae0bd43dd90a43da90a7d"}, - {file = "watchdog-4.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:980b71510f59c884d684b3663d46e7a14b457c9611c481e5cef08f4dd022eed7"}, - {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:aa160781cafff2719b663c8a506156e9289d111d80f3387cf3af49cedee1f040"}, - {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f6ee8dedd255087bc7fe82adf046f0b75479b989185fb0bdf9a98b612170eac7"}, - {file = "watchdog-4.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0b4359067d30d5b864e09c8597b112fe0a0a59321a0f331498b013fb097406b4"}, - {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:770eef5372f146997638d737c9a3c597a3b41037cfbc5c41538fc27c09c3a3f9"}, - {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eeea812f38536a0aa859972d50c76e37f4456474b02bd93674d1947cf1e39578"}, - {file = "watchdog-4.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b2c45f6e1e57ebb4687690c05bc3a2c1fb6ab260550c4290b8abb1335e0fd08b"}, - {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:10b6683df70d340ac3279eff0b2766813f00f35a1d37515d2c99959ada8f05fa"}, - {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:f7c739888c20f99824f7aa9d31ac8a97353e22d0c0e54703a547a218f6637eb3"}, - {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c100d09ac72a8a08ddbf0629ddfa0b8ee41740f9051429baa8e31bb903ad7508"}, - {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f5315a8c8dd6dd9425b974515081fc0aadca1d1d61e078d2246509fd756141ee"}, - {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:2d468028a77b42cc685ed694a7a550a8d1771bb05193ba7b24006b8241a571a1"}, - {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f15edcae3830ff20e55d1f4e743e92970c847bcddc8b7509bcd172aa04de506e"}, - {file = "watchdog-4.0.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:936acba76d636f70db8f3c66e76aa6cb5136a936fc2a5088b9ce1c7a3508fc83"}, - {file = "watchdog-4.0.2-py3-none-manylinux2014_armv7l.whl", hash = "sha256:e252f8ca942a870f38cf785aef420285431311652d871409a64e2a0a52a2174c"}, - {file = "watchdog-4.0.2-py3-none-manylinux2014_i686.whl", hash = "sha256:0e83619a2d5d436a7e58a1aea957a3c1ccbf9782c43c0b4fed80580e5e4acd1a"}, - {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64.whl", hash = "sha256:88456d65f207b39f1981bf772e473799fcdc10801062c36fd5ad9f9d1d463a73"}, - {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:32be97f3b75693a93c683787a87a0dc8db98bb84701539954eef991fb35f5fbc"}, - {file = "watchdog-4.0.2-py3-none-manylinux2014_s390x.whl", hash = "sha256:c82253cfc9be68e3e49282831afad2c1f6593af80c0daf1287f6a92657986757"}, - {file = "watchdog-4.0.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c0b14488bd336c5b1845cee83d3e631a1f8b4e9c5091ec539406e4a324f882d8"}, - {file = "watchdog-4.0.2-py3-none-win32.whl", hash = "sha256:0d8a7e523ef03757a5aa29f591437d64d0d894635f8a50f370fe37f913ce4e19"}, - {file = "watchdog-4.0.2-py3-none-win_amd64.whl", hash = "sha256:c344453ef3bf875a535b0488e3ad28e341adbd5a9ffb0f7d62cefacc8824ef2b"}, - {file = "watchdog-4.0.2-py3-none-win_ia64.whl", hash = "sha256:baececaa8edff42cd16558a639a9b0ddf425f93d892e8392a56bf904f5eff22c"}, - {file = "watchdog-4.0.2.tar.gz", hash = "sha256:b4dfbb6c49221be4535623ea4474a4d6ee0a9cef4a80b20c28db4d858b64e270"}, +python-versions = ">=3.9" +files = [ + {file = "watchdog-5.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:85527b882f3facda0579bce9d743ff7f10c3e1e0db0a0d0e28170a7d0e5ce2ea"}, + {file = "watchdog-5.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:53adf73dcdc0ef04f7735066b4a57a4cd3e49ef135daae41d77395f0b5b692cb"}, + {file = "watchdog-5.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e25adddab85f674acac303cf1f5835951345a56c5f7f582987d266679979c75b"}, + {file = "watchdog-5.0.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f01f4a3565a387080dc49bdd1fefe4ecc77f894991b88ef927edbfa45eb10818"}, + {file = "watchdog-5.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:91b522adc25614cdeaf91f7897800b82c13b4b8ac68a42ca959f992f6990c490"}, + {file = "watchdog-5.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d52db5beb5e476e6853da2e2d24dbbbed6797b449c8bf7ea118a4ee0d2c9040e"}, + {file = "watchdog-5.0.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:94d11b07c64f63f49876e0ab8042ae034674c8653bfcdaa8c4b32e71cfff87e8"}, + {file = "watchdog-5.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:349c9488e1d85d0a58e8cb14222d2c51cbc801ce11ac3936ab4c3af986536926"}, + {file = "watchdog-5.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:53a3f10b62c2d569e260f96e8d966463dec1a50fa4f1b22aec69e3f91025060e"}, + {file = "watchdog-5.0.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:950f531ec6e03696a2414b6308f5c6ff9dab7821a768c9d5788b1314e9a46ca7"}, + {file = "watchdog-5.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae6deb336cba5d71476caa029ceb6e88047fc1dc74b62b7c4012639c0b563906"}, + {file = "watchdog-5.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1021223c08ba8d2d38d71ec1704496471ffd7be42cfb26b87cd5059323a389a1"}, + {file = "watchdog-5.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:752fb40efc7cc8d88ebc332b8f4bcbe2b5cc7e881bccfeb8e25054c00c994ee3"}, + {file = "watchdog-5.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a2e8f3f955d68471fa37b0e3add18500790d129cc7efe89971b8a4cc6fdeb0b2"}, + {file = "watchdog-5.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b8ca4d854adcf480bdfd80f46fdd6fb49f91dd020ae11c89b3a79e19454ec627"}, + {file = "watchdog-5.0.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:90a67d7857adb1d985aca232cc9905dd5bc4803ed85cfcdcfcf707e52049eda7"}, + {file = "watchdog-5.0.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:720ef9d3a4f9ca575a780af283c8fd3a0674b307651c1976714745090da5a9e8"}, + {file = "watchdog-5.0.3-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:223160bb359281bb8e31c8f1068bf71a6b16a8ad3d9524ca6f523ac666bb6a1e"}, + {file = "watchdog-5.0.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:560135542c91eaa74247a2e8430cf83c4342b29e8ad4f520ae14f0c8a19cfb5b"}, + {file = "watchdog-5.0.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dd021efa85970bd4824acacbb922066159d0f9e546389a4743d56919b6758b91"}, + {file = "watchdog-5.0.3-py3-none-manylinux2014_armv7l.whl", hash = "sha256:78864cc8f23dbee55be34cc1494632a7ba30263951b5b2e8fc8286b95845f82c"}, + {file = "watchdog-5.0.3-py3-none-manylinux2014_i686.whl", hash = "sha256:1e9679245e3ea6498494b3028b90c7b25dbb2abe65c7d07423ecfc2d6218ff7c"}, + {file = "watchdog-5.0.3-py3-none-manylinux2014_ppc64.whl", hash = "sha256:9413384f26b5d050b6978e6fcd0c1e7f0539be7a4f1a885061473c5deaa57221"}, + {file = "watchdog-5.0.3-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:294b7a598974b8e2c6123d19ef15de9abcd282b0fbbdbc4d23dfa812959a9e05"}, + {file = "watchdog-5.0.3-py3-none-manylinux2014_s390x.whl", hash = "sha256:26dd201857d702bdf9d78c273cafcab5871dd29343748524695cecffa44a8d97"}, + {file = "watchdog-5.0.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:0f9332243355643d567697c3e3fa07330a1d1abf981611654a1f2bf2175612b7"}, + {file = "watchdog-5.0.3-py3-none-win32.whl", hash = "sha256:c66f80ee5b602a9c7ab66e3c9f36026590a0902db3aea414d59a2f55188c1f49"}, + {file = "watchdog-5.0.3-py3-none-win_amd64.whl", hash = "sha256:f00b4cf737f568be9665563347a910f8bdc76f88c2970121c86243c8cfdf90e9"}, + {file = "watchdog-5.0.3-py3-none-win_ia64.whl", hash = "sha256:49f4d36cb315c25ea0d946e018c01bb028048023b9e103d3d3943f58e109dd45"}, + {file = "watchdog-5.0.3.tar.gz", hash = "sha256:108f42a7f0345042a854d4d0ad0834b741d421330d5f575b81cb27b883500176"}, ] [package.extras] @@ -4390,20 +4426,6 @@ MarkupSafe = ">=2.1.1" [package.extras] watchdog = ["watchdog (>=2.3)"] -[[package]] -name = "wheel" -version = "0.44.0" -description = "A built-package format for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "wheel-0.44.0-py3-none-any.whl", hash = "sha256:2376a90c98cc337d18623527a97c31797bd02bad0033d41547043a1cbfbe448f"}, - {file = "wheel-0.44.0.tar.gz", hash = "sha256:a29c3f2817e95ab89aa4660681ad547c0e9547f20e75b0562fe7723c9a2a9d49"}, -] - -[package.extras] -test = ["pytest (>=6.0.0)", "setuptools (>=65)"] - [[package]] name = "wrapt" version = "1.16.0" @@ -4485,13 +4507,13 @@ files = [ [[package]] name = "zipp" -version = "3.20.1" +version = "3.20.2" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" files = [ - {file = "zipp-3.20.1-py3-none-any.whl", hash = "sha256:9960cd8967c8f85a56f920d5d507274e74f9ff813a0ab8889a5b5be2daf44064"}, - {file = "zipp-3.20.1.tar.gz", hash = "sha256:c22b14cc4763c5a5b04134207736c107db42e9d3ef2d9779d465f5f1bcba572b"}, + {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"}, + {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"}, ] [package.extras] @@ -4504,5 +4526,5 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" -python-versions = ">=3.8,<3.12" -content-hash = "b5097c7c1112ac5503db1fd169eb8b6e3fc67f01f85dbf1fb163aa97e0dbb8d0" +python-versions = ">=3.9,<3.13" +content-hash = "dcae373e463fd4168abbcdea643d9e1efd8cc185f58c8de0d5c645ce88a8c28b" diff --git a/pyproject.toml b/pyproject.toml index ae7e9e033..273fdb370 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,15 +27,15 @@ classifiers = [ ] [tool.poetry.dependencies] -python = ">=3.8,<3.12" -torch = "^2.1.0" +python = ">=3.9,<3.13" +torch = "^2.3.0" torchvision = "*" tensorboard = "*" toml = "*" -pandas = "^1.2" -numpy = "^1.17" -scikit-learn = "^1.0" -scikit-image = "^0.21" +pandas = "^2" +numpy = "^2" +scikit-learn = "^1" +scikit-image = "^0.24" joblib = "^1.2.0" click = "^8" click-option-group = "^0.5" From 8182466b49316eb45ea7812b9e022e72b5d49315 Mon Sep 17 00:00:00 2001 From: thibaultdvx Date: Wed, 16 Oct 2024 15:07:45 +0200 Subject: [PATCH 04/16] fix issue with enum --- clinicadl/caps_dataset/data.py | 4 ++-- clinicadl/trainer/tasks_utils.py | 23 ++++++----------------- 2 files changed, 8 insertions(+), 19 deletions(-) diff --git a/clinicadl/caps_dataset/data.py b/clinicadl/caps_dataset/data.py index 638f49e9d..78553bcf9 100644 --- a/clinicadl/caps_dataset/data.py +++ b/clinicadl/caps_dataset/data.py @@ -580,11 +580,11 @@ def _get_mask_paths_and_tensors( else: for template_ in Template: if preprocessing_.name == template_.name: - template_name = template_ + template_name = template_.value for pattern_ in Pattern: if preprocessing_.name == pattern_.name: - pattern = pattern_ + pattern = pattern_.value mask_location = caps_directory / "masks" / f"tpl-{template_name}" diff --git a/clinicadl/trainer/tasks_utils.py b/clinicadl/trainer/tasks_utils.py index dc28d0acd..b7e65234f 100644 --- a/clinicadl/trainer/tasks_utils.py +++ b/clinicadl/trainer/tasks_utils.py @@ -1,31 +1,20 @@ -from abc import abstractmethod from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union import numpy as np import pandas as pd import torch -import torch.distributed as dist -from pydantic import ( - BaseModel, - ConfigDict, - computed_field, - model_validator, -) from torch import Tensor, nn -from torch.amp import autocast from torch.nn.functional import softmax -from torch.nn.modules.loss import _Loss -from torch.utils.data import DataLoader, Sampler, sampler +from torch.utils.data import Sampler, sampler from torch.utils.data.distributed import DistributedSampler from clinicadl.caps_dataset.data import CapsDataset from clinicadl.metrics.metric_module import MetricModule -from clinicadl.network.network import Network from clinicadl.trainer.config.train import TrainConfig -from clinicadl.utils import cluster from clinicadl.utils.enum import ( ClassificationLoss, ClassificationMetric, + Mode, ReconstructionLoss, ReconstructionMetric, RegressionLoss, @@ -249,7 +238,7 @@ def save_outputs(network_task: Union[str, Task]): def generate_test_row( network_task: Union[str, Task], - mode: str, + mode: Mode, metrics_module, n_classes: int, idx: int, @@ -274,7 +263,7 @@ def generate_test_row( [ data["participant_id"][idx], data["session_id"][idx], - data[f"{mode}_id"][idx].item(), + data[f"{mode.value}_id"][idx].item(), data["label"][idx].item(), prediction, ] @@ -286,7 +275,7 @@ def generate_test_row( [ data["participant_id"][idx], data["session_id"][idx], - data[f"{mode}_id"][idx].item(), + data[f"{mode.value}_id"][idx].item(), data["label"][idx].item(), outputs[idx].item(), ] @@ -298,7 +287,7 @@ def generate_test_row( row = [ data["participant_id"][idx], data["session_id"][idx], - data[f"{mode}_id"][idx].item(), + data[f"{mode.value}_id"][idx].item(), ] for metric in evaluation_metrics(Task.RECONSTRUCTION): From 06cf5e802172c4115316a4d7b8da1104c9b2becd Mon Sep 17 00:00:00 2001 From: thibaultdvx Date: Wed, 16 Oct 2024 15:11:27 +0200 Subject: [PATCH 05/16] fix numpy issue --- clinicadl/network/cnn/random.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clinicadl/network/cnn/random.py b/clinicadl/network/cnn/random.py index 897a014d1..a75727af0 100644 --- a/clinicadl/network/cnn/random.py +++ b/clinicadl/network/cnn/random.py @@ -208,7 +208,7 @@ def fc_dict_design(n_fcblocks, convolutions, initial_shape, n_classes=2): out_channels = last_conv["out_channels"] flattened_shape = np.ceil(np.array(initial_shape) / 2**n_conv) flattened_shape[0] = out_channels - in_features = np.product(flattened_shape) + in_features = np.prod(flattened_shape) # Sample number of FC layers ratio = (in_features / n_classes) ** (1 / n_fcblocks) From 663e4897d40fb8ba5acd8dbbc492bf8780474cb7 Mon Sep 17 00:00:00 2001 From: Thibault de Varax <154365476+thibaultdvx@users.noreply.github.com> Date: Wed, 16 Oct 2024 18:09:00 +0200 Subject: [PATCH 06/16] Revert unwanted merge (#672) --- .github/workflows/test.yml | 2 +- clinicadl/caps_dataset/data.py | 4 +- clinicadl/network/cnn/random.py | 2 +- clinicadl/trainer/tasks_utils.py | 23 +- docs/Installation.md | 2 +- environment.yml | 2 +- poetry.lock | 3224 +++++++++++++++--------------- pyproject.toml | 12 +- 8 files changed, 1630 insertions(+), 1641 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 05af7550c..219e86c2b 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -17,7 +17,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest] - python-version: ['3.9', '3.10', '3.11', '3.12'] + python-version: ['3.8', '3.9', '3.10', '3.11'] steps: - uses: actions/checkout@v4 - uses: snok/install-poetry@v1 diff --git a/clinicadl/caps_dataset/data.py b/clinicadl/caps_dataset/data.py index 78553bcf9..638f49e9d 100644 --- a/clinicadl/caps_dataset/data.py +++ b/clinicadl/caps_dataset/data.py @@ -580,11 +580,11 @@ def _get_mask_paths_and_tensors( else: for template_ in Template: if preprocessing_.name == template_.name: - template_name = template_.value + template_name = template_ for pattern_ in Pattern: if preprocessing_.name == pattern_.name: - pattern = pattern_.value + pattern = pattern_ mask_location = caps_directory / "masks" / f"tpl-{template_name}" diff --git a/clinicadl/network/cnn/random.py b/clinicadl/network/cnn/random.py index a75727af0..897a014d1 100644 --- a/clinicadl/network/cnn/random.py +++ b/clinicadl/network/cnn/random.py @@ -208,7 +208,7 @@ def fc_dict_design(n_fcblocks, convolutions, initial_shape, n_classes=2): out_channels = last_conv["out_channels"] flattened_shape = np.ceil(np.array(initial_shape) / 2**n_conv) flattened_shape[0] = out_channels - in_features = np.prod(flattened_shape) + in_features = np.product(flattened_shape) # Sample number of FC layers ratio = (in_features / n_classes) ** (1 / n_fcblocks) diff --git a/clinicadl/trainer/tasks_utils.py b/clinicadl/trainer/tasks_utils.py index b7e65234f..dc28d0acd 100644 --- a/clinicadl/trainer/tasks_utils.py +++ b/clinicadl/trainer/tasks_utils.py @@ -1,20 +1,31 @@ +from abc import abstractmethod from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union import numpy as np import pandas as pd import torch +import torch.distributed as dist +from pydantic import ( + BaseModel, + ConfigDict, + computed_field, + model_validator, +) from torch import Tensor, nn +from torch.amp import autocast from torch.nn.functional import softmax -from torch.utils.data import Sampler, sampler +from torch.nn.modules.loss import _Loss +from torch.utils.data import DataLoader, Sampler, sampler from torch.utils.data.distributed import DistributedSampler from clinicadl.caps_dataset.data import CapsDataset from clinicadl.metrics.metric_module import MetricModule +from clinicadl.network.network import Network from clinicadl.trainer.config.train import TrainConfig +from clinicadl.utils import cluster from clinicadl.utils.enum import ( ClassificationLoss, ClassificationMetric, - Mode, ReconstructionLoss, ReconstructionMetric, RegressionLoss, @@ -238,7 +249,7 @@ def save_outputs(network_task: Union[str, Task]): def generate_test_row( network_task: Union[str, Task], - mode: Mode, + mode: str, metrics_module, n_classes: int, idx: int, @@ -263,7 +274,7 @@ def generate_test_row( [ data["participant_id"][idx], data["session_id"][idx], - data[f"{mode.value}_id"][idx].item(), + data[f"{mode}_id"][idx].item(), data["label"][idx].item(), prediction, ] @@ -275,7 +286,7 @@ def generate_test_row( [ data["participant_id"][idx], data["session_id"][idx], - data[f"{mode.value}_id"][idx].item(), + data[f"{mode}_id"][idx].item(), data["label"][idx].item(), outputs[idx].item(), ] @@ -287,7 +298,7 @@ def generate_test_row( row = [ data["participant_id"][idx], data["session_id"][idx], - data[f"{mode.value}_id"][idx].item(), + data[f"{mode}_id"][idx].item(), ] for metric in evaluation_metrics(Task.RECONSTRUCTION): diff --git a/docs/Installation.md b/docs/Installation.md index 4ccf75640..3f8c7dc22 100644 --- a/docs/Installation.md +++ b/docs/Installation.md @@ -30,7 +30,7 @@ bash /tmp/miniconda-installer.sh The latest release of ClinicaDL can be installed using `pip` as follows: ```{.sourceCode .bash} -conda create --name clinicadlEnv python=3.11 +conda create --name clinicadlEnv python=3.8 conda activate clinicadlEnv pip install clinicadl ``` diff --git a/environment.yml b/environment.yml index cc36c6c17..34f633da4 100644 --- a/environment.yml +++ b/environment.yml @@ -3,4 +3,4 @@ channels: - defaults - conda-forge dependencies: - - python=3.11 + - python=3.9 diff --git a/poetry.lock b/poetry.lock index 71072daf2..34b282b1c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -13,16 +13,18 @@ files = [ [[package]] name = "alembic" -version = "1.13.3" +version = "1.13.2" description = "A database migration tool for SQLAlchemy." optional = false python-versions = ">=3.8" files = [ - {file = "alembic-1.13.3-py3-none-any.whl", hash = "sha256:908e905976d15235fae59c9ac42c4c5b75cfcefe3d27c0fbf7ae15a37715d80e"}, - {file = "alembic-1.13.3.tar.gz", hash = "sha256:203503117415561e203aa14541740643a611f641517f0209fcae63e9fa09f1a2"}, + {file = "alembic-1.13.2-py3-none-any.whl", hash = "sha256:6b8733129a6224a9a711e17c99b08462dbf7cc9670ba8f2e2ae9af860ceb1953"}, + {file = "alembic-1.13.2.tar.gz", hash = "sha256:1ff0ae32975f4fd96028c39ed9bb3c867fe3af956bd7bb37343b54c9fe7445ef"}, ] [package.dependencies] +importlib-metadata = {version = "*", markers = "python_version < \"3.9\""} +importlib-resources = {version = "*", markers = "python_version < \"3.9\""} Mako = "*" SQLAlchemy = ">=1.3.0" typing-extensions = ">=4" @@ -55,6 +57,9 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + [[package]] name = "appdirs" version = "1.4.4" @@ -96,6 +101,9 @@ files = [ {file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"}, ] +[package.dependencies] +pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} + [package.extras] dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] @@ -145,116 +153,101 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.4.0" +version = "3.3.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" files = [ - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, - {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, - {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, ] [[package]] @@ -292,24 +285,24 @@ tests-cov = ["coverage", "coveralls", "pytest", "pytest-cov"] [[package]] name = "cloudpickle" -version = "3.1.0" +version = "3.0.0" description = "Pickler class to extend the standard pickle.Pickler functionality" optional = false python-versions = ">=3.8" files = [ - {file = "cloudpickle-3.1.0-py3-none-any.whl", hash = "sha256:fe11acda67f61aaaec473e3afe030feb131d78a43461b718185363384f1ba12e"}, - {file = "cloudpickle-3.1.0.tar.gz", hash = "sha256:81a929b6e3c7335c863c771d673d105f02efdb89dfaba0c90495d1c64796601b"}, + {file = "cloudpickle-3.0.0-py3-none-any.whl", hash = "sha256:246ee7d0c295602a036e86369c77fecda4ab17b506496730f2f576d9016fd9c7"}, + {file = "cloudpickle-3.0.0.tar.gz", hash = "sha256:996d9a482c6fb4f33c1a35335cf8afd065d2a56e973270364840712d9131a882"}, ] [[package]] name = "codecarbon" -version = "2.7.1" +version = "2.6.0" description = "" optional = false python-versions = ">=3.7" files = [ - {file = "codecarbon-2.7.1-py3-none-any.whl", hash = "sha256:d056e3a422b956f7902bb1b7c910487134d6079cdd42f0f6b99abaa9b7302d09"}, - {file = "codecarbon-2.7.1.tar.gz", hash = "sha256:b1e15cb6746a1b3760719f3534c458f59f22ee0f644a6d0070011093b1c84ee1"}, + {file = "codecarbon-2.6.0-py3-none-any.whl", hash = "sha256:4e11467f7f844894512dd4cd623da27598c5e803941b45eed8438b06e9410c55"}, + {file = "codecarbon-2.6.0.tar.gz", hash = "sha256:05b0d39c60650ffa2e4b3eb72fdcbfe29c7853aa2e4c8ab37abea264a749057b"}, ] [package.dependencies] @@ -342,157 +335,154 @@ files = [ [[package]] name = "contourpy" -version = "1.3.0" +version = "1.1.1" description = "Python library for calculating contours of 2D quadrilateral grids" optional = false -python-versions = ">=3.9" -files = [ - {file = "contourpy-1.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:880ea32e5c774634f9fcd46504bf9f080a41ad855f4fef54f5380f5133d343c7"}, - {file = "contourpy-1.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:76c905ef940a4474a6289c71d53122a4f77766eef23c03cd57016ce19d0f7b42"}, - {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92f8557cbb07415a4d6fa191f20fd9d2d9eb9c0b61d1b2f52a8926e43c6e9af7"}, - {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36f965570cff02b874773c49bfe85562b47030805d7d8360748f3eca570f4cab"}, - {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cacd81e2d4b6f89c9f8a5b69b86490152ff39afc58a95af002a398273e5ce589"}, - {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69375194457ad0fad3a839b9e29aa0b0ed53bb54db1bfb6c3ae43d111c31ce41"}, - {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a52040312b1a858b5e31ef28c2e865376a386c60c0e248370bbea2d3f3b760d"}, - {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3faeb2998e4fcb256542e8a926d08da08977f7f5e62cf733f3c211c2a5586223"}, - {file = "contourpy-1.3.0-cp310-cp310-win32.whl", hash = "sha256:36e0cff201bcb17a0a8ecc7f454fe078437fa6bda730e695a92f2d9932bd507f"}, - {file = "contourpy-1.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:87ddffef1dbe5e669b5c2440b643d3fdd8622a348fe1983fad7a0f0ccb1cd67b"}, - {file = "contourpy-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fa4c02abe6c446ba70d96ece336e621efa4aecae43eaa9b030ae5fb92b309ad"}, - {file = "contourpy-1.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:834e0cfe17ba12f79963861e0f908556b2cedd52e1f75e6578801febcc6a9f49"}, - {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbc4c3217eee163fa3984fd1567632b48d6dfd29216da3ded3d7b844a8014a66"}, - {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4865cd1d419e0c7a7bf6de1777b185eebdc51470800a9f42b9e9decf17762081"}, - {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:303c252947ab4b14c08afeb52375b26781ccd6a5ccd81abcdfc1fafd14cf93c1"}, - {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:637f674226be46f6ba372fd29d9523dd977a291f66ab2a74fbeb5530bb3f445d"}, - {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:76a896b2f195b57db25d6b44e7e03f221d32fe318d03ede41f8b4d9ba1bff53c"}, - {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e1fd23e9d01591bab45546c089ae89d926917a66dceb3abcf01f6105d927e2cb"}, - {file = "contourpy-1.3.0-cp311-cp311-win32.whl", hash = "sha256:d402880b84df3bec6eab53cd0cf802cae6a2ef9537e70cf75e91618a3801c20c"}, - {file = "contourpy-1.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:6cb6cc968059db9c62cb35fbf70248f40994dfcd7aa10444bbf8b3faeb7c2d67"}, - {file = "contourpy-1.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:570ef7cf892f0afbe5b2ee410c507ce12e15a5fa91017a0009f79f7d93a1268f"}, - {file = "contourpy-1.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:da84c537cb8b97d153e9fb208c221c45605f73147bd4cadd23bdae915042aad6"}, - {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0be4d8425bfa755e0fd76ee1e019636ccc7c29f77a7c86b4328a9eb6a26d0639"}, - {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c0da700bf58f6e0b65312d0a5e695179a71d0163957fa381bb3c1f72972537c"}, - {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eb8b141bb00fa977d9122636b16aa67d37fd40a3d8b52dd837e536d64b9a4d06"}, - {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3634b5385c6716c258d0419c46d05c8aa7dc8cb70326c9a4fb66b69ad2b52e09"}, - {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0dce35502151b6bd35027ac39ba6e5a44be13a68f55735c3612c568cac3805fd"}, - {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aea348f053c645100612b333adc5983d87be69acdc6d77d3169c090d3b01dc35"}, - {file = "contourpy-1.3.0-cp312-cp312-win32.whl", hash = "sha256:90f73a5116ad1ba7174341ef3ea5c3150ddf20b024b98fb0c3b29034752c8aeb"}, - {file = "contourpy-1.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:b11b39aea6be6764f84360fce6c82211a9db32a7c7de8fa6dd5397cf1d079c3b"}, - {file = "contourpy-1.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3e1c7fa44aaae40a2247e2e8e0627f4bea3dd257014764aa644f319a5f8600e3"}, - {file = "contourpy-1.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:364174c2a76057feef647c802652f00953b575723062560498dc7930fc9b1cb7"}, - {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32b238b3b3b649e09ce9aaf51f0c261d38644bdfa35cbaf7b263457850957a84"}, - {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d51fca85f9f7ad0b65b4b9fe800406d0d77017d7270d31ec3fb1cc07358fdea0"}, - {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:732896af21716b29ab3e988d4ce14bc5133733b85956316fb0c56355f398099b"}, - {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d73f659398a0904e125280836ae6f88ba9b178b2fed6884f3b1f95b989d2c8da"}, - {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c6c7c2408b7048082932cf4e641fa3b8ca848259212f51c8c59c45aa7ac18f14"}, - {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f317576606de89da6b7e0861cf6061f6146ead3528acabff9236458a6ba467f8"}, - {file = "contourpy-1.3.0-cp313-cp313-win32.whl", hash = "sha256:31cd3a85dbdf1fc002280c65caa7e2b5f65e4a973fcdf70dd2fdcb9868069294"}, - {file = "contourpy-1.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:4553c421929ec95fb07b3aaca0fae668b2eb5a5203d1217ca7c34c063c53d087"}, - {file = "contourpy-1.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:345af746d7766821d05d72cb8f3845dfd08dd137101a2cb9b24de277d716def8"}, - {file = "contourpy-1.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3bb3808858a9dc68f6f03d319acd5f1b8a337e6cdda197f02f4b8ff67ad2057b"}, - {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:420d39daa61aab1221567b42eecb01112908b2cab7f1b4106a52caaec8d36973"}, - {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d63ee447261e963af02642ffcb864e5a2ee4cbfd78080657a9880b8b1868e18"}, - {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:167d6c890815e1dac9536dca00828b445d5d0df4d6a8c6adb4a7ec3166812fa8"}, - {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:710a26b3dc80c0e4febf04555de66f5fd17e9cf7170a7b08000601a10570bda6"}, - {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:75ee7cb1a14c617f34a51d11fa7524173e56551646828353c4af859c56b766e2"}, - {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:33c92cdae89ec5135d036e7218e69b0bb2851206077251f04a6c4e0e21f03927"}, - {file = "contourpy-1.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a11077e395f67ffc2c44ec2418cfebed032cd6da3022a94fc227b6faf8e2acb8"}, - {file = "contourpy-1.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e8134301d7e204c88ed7ab50028ba06c683000040ede1d617298611f9dc6240c"}, - {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e12968fdfd5bb45ffdf6192a590bd8ddd3ba9e58360b29683c6bb71a7b41edca"}, - {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fd2a0fc506eccaaa7595b7e1418951f213cf8255be2600f1ea1b61e46a60c55f"}, - {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4cfb5c62ce023dfc410d6059c936dcf96442ba40814aefbfa575425a3a7f19dc"}, - {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68a32389b06b82c2fdd68276148d7b9275b5f5cf13e5417e4252f6d1a34f72a2"}, - {file = "contourpy-1.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:94e848a6b83da10898cbf1311a815f770acc9b6a3f2d646f330d57eb4e87592e"}, - {file = "contourpy-1.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d78ab28a03c854a873787a0a42254a0ccb3cb133c672f645c9f9c8f3ae9d0800"}, - {file = "contourpy-1.3.0-cp39-cp39-win32.whl", hash = "sha256:81cb5ed4952aae6014bc9d0421dec7c5835c9c8c31cdf51910b708f548cf58e5"}, - {file = "contourpy-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:14e262f67bd7e6eb6880bc564dcda30b15e351a594657e55b7eec94b6ef72843"}, - {file = "contourpy-1.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:fe41b41505a5a33aeaed2a613dccaeaa74e0e3ead6dd6fd3a118fb471644fd6c"}, - {file = "contourpy-1.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca7e17a65f72a5133bdbec9ecf22401c62bcf4821361ef7811faee695799779"}, - {file = "contourpy-1.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1ec4dc6bf570f5b22ed0d7efba0dfa9c5b9e0431aeea7581aa217542d9e809a4"}, - {file = "contourpy-1.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:00ccd0dbaad6d804ab259820fa7cb0b8036bda0686ef844d24125d8287178ce0"}, - {file = "contourpy-1.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ca947601224119117f7c19c9cdf6b3ab54c5726ef1d906aa4a69dfb6dd58102"}, - {file = "contourpy-1.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6ec93afeb848a0845a18989da3beca3eec2c0f852322efe21af1931147d12cb"}, - {file = "contourpy-1.3.0.tar.gz", hash = "sha256:7ffa0db17717a8ffb127efd0c95a4362d996b892c2904db72428d5b52e1938a4"}, +python-versions = ">=3.8" +files = [ + {file = "contourpy-1.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:46e24f5412c948d81736509377e255f6040e94216bf1a9b5ea1eaa9d29f6ec1b"}, + {file = "contourpy-1.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e48694d6a9c5a26ee85b10130c77a011a4fedf50a7279fa0bdaf44bafb4299d"}, + {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a66045af6cf00e19d02191ab578a50cb93b2028c3eefed999793698e9ea768ae"}, + {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ebf42695f75ee1a952f98ce9775c873e4971732a87334b099dde90b6af6a916"}, + {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6aec19457617ef468ff091669cca01fa7ea557b12b59a7908b9474bb9674cf0"}, + {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:462c59914dc6d81e0b11f37e560b8a7c2dbab6aca4f38be31519d442d6cde1a1"}, + {file = "contourpy-1.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6d0a8efc258659edc5299f9ef32d8d81de8b53b45d67bf4bfa3067f31366764d"}, + {file = "contourpy-1.1.1-cp310-cp310-win32.whl", hash = "sha256:d6ab42f223e58b7dac1bb0af32194a7b9311065583cc75ff59dcf301afd8a431"}, + {file = "contourpy-1.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:549174b0713d49871c6dee90a4b499d3f12f5e5f69641cd23c50a4542e2ca1eb"}, + {file = "contourpy-1.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:407d864db716a067cc696d61fa1ef6637fedf03606e8417fe2aeed20a061e6b2"}, + {file = "contourpy-1.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe80c017973e6a4c367e037cb31601044dd55e6bfacd57370674867d15a899b"}, + {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e30aaf2b8a2bac57eb7e1650df1b3a4130e8d0c66fc2f861039d507a11760e1b"}, + {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3de23ca4f381c3770dee6d10ead6fff524d540c0f662e763ad1530bde5112532"}, + {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:566f0e41df06dfef2431defcfaa155f0acfa1ca4acbf8fd80895b1e7e2ada40e"}, + {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b04c2f0adaf255bf756cf08ebef1be132d3c7a06fe6f9877d55640c5e60c72c5"}, + {file = "contourpy-1.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d0c188ae66b772d9d61d43c6030500344c13e3f73a00d1dc241da896f379bb62"}, + {file = "contourpy-1.1.1-cp311-cp311-win32.whl", hash = "sha256:0683e1ae20dc038075d92e0e0148f09ffcefab120e57f6b4c9c0f477ec171f33"}, + {file = "contourpy-1.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:8636cd2fc5da0fb102a2504fa2c4bea3cbc149533b345d72cdf0e7a924decc45"}, + {file = "contourpy-1.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:560f1d68a33e89c62da5da4077ba98137a5e4d3a271b29f2f195d0fba2adcb6a"}, + {file = "contourpy-1.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:24216552104ae8f3b34120ef84825400b16eb6133af2e27a190fdc13529f023e"}, + {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56de98a2fb23025882a18b60c7f0ea2d2d70bbbcfcf878f9067234b1c4818442"}, + {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:07d6f11dfaf80a84c97f1a5ba50d129d9303c5b4206f776e94037332e298dda8"}, + {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1eaac5257a8f8a047248d60e8f9315c6cff58f7803971170d952555ef6344a7"}, + {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19557fa407e70f20bfaba7d55b4d97b14f9480856c4fb65812e8a05fe1c6f9bf"}, + {file = "contourpy-1.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:081f3c0880712e40effc5f4c3b08feca6d064cb8cfbb372ca548105b86fd6c3d"}, + {file = "contourpy-1.1.1-cp312-cp312-win32.whl", hash = "sha256:059c3d2a94b930f4dafe8105bcdc1b21de99b30b51b5bce74c753686de858cb6"}, + {file = "contourpy-1.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:f44d78b61740e4e8c71db1cf1fd56d9050a4747681c59ec1094750a658ceb970"}, + {file = "contourpy-1.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:70e5a10f8093d228bb2b552beeb318b8928b8a94763ef03b858ef3612b29395d"}, + {file = "contourpy-1.1.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8394e652925a18ef0091115e3cc191fef350ab6dc3cc417f06da66bf98071ae9"}, + {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5bd5680f844c3ff0008523a71949a3ff5e4953eb7701b28760805bc9bcff217"}, + {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66544f853bfa85c0d07a68f6c648b2ec81dafd30f272565c37ab47a33b220684"}, + {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0c02b75acfea5cab07585d25069207e478d12309557f90a61b5a3b4f77f46ce"}, + {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41339b24471c58dc1499e56783fedc1afa4bb018bcd035cfb0ee2ad2a7501ef8"}, + {file = "contourpy-1.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f29fb0b3f1217dfe9362ec55440d0743fe868497359f2cf93293f4b2701b8251"}, + {file = "contourpy-1.1.1-cp38-cp38-win32.whl", hash = "sha256:f9dc7f933975367251c1b34da882c4f0e0b2e24bb35dc906d2f598a40b72bfc7"}, + {file = "contourpy-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:498e53573e8b94b1caeb9e62d7c2d053c263ebb6aa259c81050766beb50ff8d9"}, + {file = "contourpy-1.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ba42e3810999a0ddd0439e6e5dbf6d034055cdc72b7c5c839f37a7c274cb4eba"}, + {file = "contourpy-1.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c06e4c6e234fcc65435223c7b2a90f286b7f1b2733058bdf1345d218cc59e34"}, + {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca6fab080484e419528e98624fb5c4282148b847e3602dc8dbe0cb0669469887"}, + {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93df44ab351119d14cd1e6b52a5063d3336f0754b72736cc63db59307dabb718"}, + {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eafbef886566dc1047d7b3d4b14db0d5b7deb99638d8e1be4e23a7c7ac59ff0f"}, + {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efe0fab26d598e1ec07d72cf03eaeeba8e42b4ecf6b9ccb5a356fde60ff08b85"}, + {file = "contourpy-1.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f08e469821a5e4751c97fcd34bcb586bc243c39c2e39321822060ba902eac49e"}, + {file = "contourpy-1.1.1-cp39-cp39-win32.whl", hash = "sha256:bfc8a5e9238232a45ebc5cb3bfee71f1167064c8d382cadd6076f0d51cff1da0"}, + {file = "contourpy-1.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:c84fdf3da00c2827d634de4fcf17e3e067490c4aea82833625c4c8e6cdea0887"}, + {file = "contourpy-1.1.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:229a25f68046c5cf8067d6d6351c8b99e40da11b04d8416bf8d2b1d75922521e"}, + {file = "contourpy-1.1.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10dab5ea1bd4401c9483450b5b0ba5416be799bbd50fc7a6cc5e2a15e03e8a3"}, + {file = "contourpy-1.1.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4f9147051cb8fdb29a51dc2482d792b3b23e50f8f57e3720ca2e3d438b7adf23"}, + {file = "contourpy-1.1.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a75cc163a5f4531a256f2c523bd80db509a49fc23721b36dd1ef2f60ff41c3cb"}, + {file = "contourpy-1.1.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b53d5769aa1f2d4ea407c65f2d1d08002952fac1d9e9d307aa2e1023554a163"}, + {file = "contourpy-1.1.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11b836b7dbfb74e049c302bbf74b4b8f6cb9d0b6ca1bf86cfa8ba144aedadd9c"}, + {file = "contourpy-1.1.1.tar.gz", hash = "sha256:96ba37c2e24b7212a77da85004c38e7c4d155d3e72a45eeaf22c1f03f607e8ab"}, ] [package.dependencies] -numpy = ">=1.23" +numpy = {version = ">=1.16,<2.0", markers = "python_version <= \"3.11\""} [package.extras] bokeh = ["bokeh", "selenium"] docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] -mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.11.1)", "types-Pillow"] +mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.4.1)", "types-Pillow"] test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] -test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist", "wurlitzer"] +test-no-images = ["pytest", "pytest-cov", "wurlitzer"] [[package]] name = "coverage" -version = "7.6.3" +version = "7.6.1" description = "Code coverage measurement for Python" optional = false -python-versions = ">=3.9" -files = [ - {file = "coverage-7.6.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6da42bbcec130b188169107ecb6ee7bd7b4c849d24c9370a0c884cf728d8e976"}, - {file = "coverage-7.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c222958f59b0ae091f4535851cbb24eb57fc0baea07ba675af718fb5302dddb2"}, - {file = "coverage-7.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab84a8b698ad5a6c365b08061920138e7a7dd9a04b6feb09ba1bfae68346ce6d"}, - {file = "coverage-7.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70a6756ce66cd6fe8486c775b30889f0dc4cb20c157aa8c35b45fd7868255c5c"}, - {file = "coverage-7.6.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c2e6fa98032fec8282f6b27e3f3986c6e05702828380618776ad794e938f53a"}, - {file = "coverage-7.6.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:921fbe13492caf6a69528f09d5d7c7d518c8d0e7b9f6701b7719715f29a71e6e"}, - {file = "coverage-7.6.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:6d99198203f0b9cb0b5d1c0393859555bc26b548223a769baf7e321a627ed4fc"}, - {file = "coverage-7.6.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:87cd2e29067ea397a47e352efb13f976eb1b03e18c999270bb50589323294c6e"}, - {file = "coverage-7.6.3-cp310-cp310-win32.whl", hash = "sha256:a3328c3e64ea4ab12b85999eb0779e6139295bbf5485f69d42cf794309e3d007"}, - {file = "coverage-7.6.3-cp310-cp310-win_amd64.whl", hash = "sha256:bca4c8abc50d38f9773c1ec80d43f3768df2e8576807d1656016b9d3eeaa96fd"}, - {file = "coverage-7.6.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c51ef82302386d686feea1c44dbeef744585da16fcf97deea2a8d6c1556f519b"}, - {file = "coverage-7.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0ca37993206402c6c35dc717f90d4c8f53568a8b80f0bf1a1b2b334f4d488fba"}, - {file = "coverage-7.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c77326300b839c44c3e5a8fe26c15b7e87b2f32dfd2fc9fee1d13604347c9b38"}, - {file = "coverage-7.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e484e479860e00da1f005cd19d1c5d4a813324e5951319ac3f3eefb497cc549"}, - {file = "coverage-7.6.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c6c0f4d53ef603397fc894a895b960ecd7d44c727df42a8d500031716d4e8d2"}, - {file = "coverage-7.6.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:37be7b5ea3ff5b7c4a9db16074dc94523b5f10dd1f3b362a827af66a55198175"}, - {file = "coverage-7.6.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:43b32a06c47539fe275106b376658638b418c7cfdfff0e0259fbf877e845f14b"}, - {file = "coverage-7.6.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ee77c7bef0724165e795b6b7bf9c4c22a9b8468a6bdb9c6b4281293c6b22a90f"}, - {file = "coverage-7.6.3-cp311-cp311-win32.whl", hash = "sha256:43517e1f6b19f610a93d8227e47790722c8bf7422e46b365e0469fc3d3563d97"}, - {file = "coverage-7.6.3-cp311-cp311-win_amd64.whl", hash = "sha256:04f2189716e85ec9192df307f7c255f90e78b6e9863a03223c3b998d24a3c6c6"}, - {file = "coverage-7.6.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:27bd5f18d8f2879e45724b0ce74f61811639a846ff0e5c0395b7818fae87aec6"}, - {file = "coverage-7.6.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d546cfa78844b8b9c1c0533de1851569a13f87449897bbc95d698d1d3cb2a30f"}, - {file = "coverage-7.6.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9975442f2e7a5cfcf87299c26b5a45266ab0696348420049b9b94b2ad3d40234"}, - {file = "coverage-7.6.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:583049c63106c0555e3ae3931edab5669668bbef84c15861421b94e121878d3f"}, - {file = "coverage-7.6.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2341a78ae3a5ed454d524206a3fcb3cec408c2a0c7c2752cd78b606a2ff15af4"}, - {file = "coverage-7.6.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a4fb91d5f72b7e06a14ff4ae5be625a81cd7e5f869d7a54578fc271d08d58ae3"}, - {file = "coverage-7.6.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e279f3db904e3b55f520f11f983cc8dc8a4ce9b65f11692d4718ed021ec58b83"}, - {file = "coverage-7.6.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aa23ce39661a3e90eea5f99ec59b763b7d655c2cada10729ed920a38bfc2b167"}, - {file = "coverage-7.6.3-cp312-cp312-win32.whl", hash = "sha256:52ac29cc72ee7e25ace7807249638f94c9b6a862c56b1df015d2b2e388e51dbd"}, - {file = "coverage-7.6.3-cp312-cp312-win_amd64.whl", hash = "sha256:40e8b1983080439d4802d80b951f4a93d991ef3261f69e81095a66f86cf3c3c6"}, - {file = "coverage-7.6.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9134032f5aa445ae591c2ba6991d10136a1f533b1d2fa8f8c21126468c5025c6"}, - {file = "coverage-7.6.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:99670790f21a96665a35849990b1df447993880bb6463a0a1d757897f30da929"}, - {file = "coverage-7.6.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dc7d6b380ca76f5e817ac9eef0c3686e7834c8346bef30b041a4ad286449990"}, - {file = "coverage-7.6.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f7b26757b22faf88fcf232f5f0e62f6e0fd9e22a8a5d0d5016888cdfe1f6c1c4"}, - {file = "coverage-7.6.3-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c59d6a4a4633fad297f943c03d0d2569867bd5372eb5684befdff8df8522e39"}, - {file = "coverage-7.6.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f263b18692f8ed52c8de7f40a0751e79015983dbd77b16906e5b310a39d3ca21"}, - {file = "coverage-7.6.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:79644f68a6ff23b251cae1c82b01a0b51bc40c8468ca9585c6c4b1aeee570e0b"}, - {file = "coverage-7.6.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:71967c35828c9ff94e8c7d405469a1fb68257f686bca7c1ed85ed34e7c2529c4"}, - {file = "coverage-7.6.3-cp313-cp313-win32.whl", hash = "sha256:e266af4da2c1a4cbc6135a570c64577fd3e6eb204607eaff99d8e9b710003c6f"}, - {file = "coverage-7.6.3-cp313-cp313-win_amd64.whl", hash = "sha256:ea52bd218d4ba260399a8ae4bb6b577d82adfc4518b93566ce1fddd4a49d1dce"}, - {file = "coverage-7.6.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8d4c6ea0f498c7c79111033a290d060c517853a7bcb2f46516f591dab628ddd3"}, - {file = "coverage-7.6.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:331b200ad03dbaa44151d74daeb7da2cf382db424ab923574f6ecca7d3b30de3"}, - {file = "coverage-7.6.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54356a76b67cf8a3085818026bb556545ebb8353951923b88292556dfa9f812d"}, - {file = "coverage-7.6.3-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ebec65f5068e7df2d49466aab9128510c4867e532e07cb6960075b27658dca38"}, - {file = "coverage-7.6.3-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d33a785ea8354c480515e781554d3be582a86297e41ccbea627a5c632647f2cd"}, - {file = "coverage-7.6.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:f7ddb920106bbbbcaf2a274d56f46956bf56ecbde210d88061824a95bdd94e92"}, - {file = "coverage-7.6.3-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:70d24936ca6c15a3bbc91ee9c7fc661132c6f4c9d42a23b31b6686c05073bde5"}, - {file = "coverage-7.6.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c30e42ea11badb147f0d2e387115b15e2bd8205a5ad70d6ad79cf37f6ac08c91"}, - {file = "coverage-7.6.3-cp313-cp313t-win32.whl", hash = "sha256:365defc257c687ce3e7d275f39738dcd230777424117a6c76043459db131dd43"}, - {file = "coverage-7.6.3-cp313-cp313t-win_amd64.whl", hash = "sha256:23bb63ae3f4c645d2d82fa22697364b0046fbafb6261b258a58587441c5f7bd0"}, - {file = "coverage-7.6.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:da29ceabe3025a1e5a5aeeb331c5b1af686daab4ff0fb4f83df18b1180ea83e2"}, - {file = "coverage-7.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:df8c05a0f574d480947cba11b947dc41b1265d721c3777881da2fb8d3a1ddfba"}, - {file = "coverage-7.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec1e3b40b82236d100d259854840555469fad4db64f669ab817279eb95cd535c"}, - {file = "coverage-7.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b4adeb878a374126f1e5cf03b87f66279f479e01af0e9a654cf6d1509af46c40"}, - {file = "coverage-7.6.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43d6a66e33b1455b98fc7312b124296dad97a2e191c80320587234a77b1b736e"}, - {file = "coverage-7.6.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1990b1f4e2c402beb317840030bb9f1b6a363f86e14e21b4212e618acdfce7f6"}, - {file = "coverage-7.6.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:12f9515d875859faedb4144fd38694a761cd2a61ef9603bf887b13956d0bbfbb"}, - {file = "coverage-7.6.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:99ded130555c021d99729fabd4ddb91a6f4cc0707df4b1daf912c7850c373b13"}, - {file = "coverage-7.6.3-cp39-cp39-win32.whl", hash = "sha256:c3a79f56dee9136084cf84a6c7c4341427ef36e05ae6415bf7d787c96ff5eaa3"}, - {file = "coverage-7.6.3-cp39-cp39-win_amd64.whl", hash = "sha256:aac7501ae73d4a02f4b7ac8fcb9dc55342ca98ffb9ed9f2dfb8a25d53eda0e4d"}, - {file = "coverage-7.6.3-pp39.pp310-none-any.whl", hash = "sha256:b9853509b4bf57ba7b1f99b9d866c422c9c5248799ab20e652bbb8a184a38181"}, - {file = "coverage-7.6.3.tar.gz", hash = "sha256:bb7d5fe92bd0dc235f63ebe9f8c6e0884f7360f88f3411bfed1350c872ef2054"}, +python-versions = ">=3.8" +files = [ + {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"}, + {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"}, + {file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"}, + {file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"}, + {file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"}, + {file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, + {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, + {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"}, + {file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"}, + {file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"}, + {file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"}, + {file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"}, + {file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"}, + {file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"}, + {file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"}, + {file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"}, + {file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"}, + {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, ] [package.dependencies] @@ -518,13 +508,13 @@ tests = ["pytest", "pytest-cov", "pytest-xdist"] [[package]] name = "databricks-sdk" -version = "0.34.0" +version = "0.31.1" description = "Databricks SDK for Python (Beta)" optional = false python-versions = ">=3.7" files = [ - {file = "databricks_sdk-0.34.0-py3-none-any.whl", hash = "sha256:8c8e023007041fee275764067013ccf9e119509047f0670aee71a7831c8efaec"}, - {file = "databricks_sdk-0.34.0.tar.gz", hash = "sha256:1d4ec47783cf17cb6fc2aec43025625e04519f01dbb1696d621ed3cacdb64eb5"}, + {file = "databricks_sdk-0.31.1-py3-none-any.whl", hash = "sha256:9ab286f87ae1cc98a00ef7d207e40661f4d14a464071425ad169d235919b35f6"}, + {file = "databricks_sdk-0.31.1.tar.gz", hash = "sha256:8609e655d0e5ecb15c2a8a6468e737f8dcb4f28c33239388de3ab386b921d790"}, ] [package.dependencies] @@ -554,13 +544,13 @@ dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] [[package]] name = "distlib" -version = "0.3.9" +version = "0.3.8" description = "Distribution utilities" optional = false python-versions = "*" files = [ - {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, - {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, + {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, + {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, ] [[package]] @@ -640,19 +630,19 @@ testing = ["hatch", "pre-commit", "pytest", "tox"] [[package]] name = "filelock" -version = "3.16.1" +version = "3.15.4" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, - {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, + {file = "filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7"}, + {file = "filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb"}, ] [package.extras] -docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] -typing = ["typing-extensions (>=4.12.2)"] +docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"] +typing = ["typing-extensions (>=4.8)"] [[package]] name = "flask" @@ -679,59 +669,53 @@ dotenv = ["python-dotenv"] [[package]] name = "fonttools" -version = "4.54.1" +version = "4.53.1" description = "Tools to manipulate font files" optional = false python-versions = ">=3.8" files = [ - {file = "fonttools-4.54.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ed7ee041ff7b34cc62f07545e55e1468808691dddfd315d51dd82a6b37ddef2"}, - {file = "fonttools-4.54.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41bb0b250c8132b2fcac148e2e9198e62ff06f3cc472065dff839327945c5882"}, - {file = "fonttools-4.54.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7965af9b67dd546e52afcf2e38641b5be956d68c425bef2158e95af11d229f10"}, - {file = "fonttools-4.54.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:278913a168f90d53378c20c23b80f4e599dca62fbffae4cc620c8eed476b723e"}, - {file = "fonttools-4.54.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0e88e3018ac809b9662615072dcd6b84dca4c2d991c6d66e1970a112503bba7e"}, - {file = "fonttools-4.54.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4aa4817f0031206e637d1e685251ac61be64d1adef111060df84fdcbc6ab6c44"}, - {file = "fonttools-4.54.1-cp310-cp310-win32.whl", hash = "sha256:7e3b7d44e18c085fd8c16dcc6f1ad6c61b71ff463636fcb13df7b1b818bd0c02"}, - {file = "fonttools-4.54.1-cp310-cp310-win_amd64.whl", hash = "sha256:dd9cc95b8d6e27d01e1e1f1fae8559ef3c02c76317da650a19047f249acd519d"}, - {file = "fonttools-4.54.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5419771b64248484299fa77689d4f3aeed643ea6630b2ea750eeab219588ba20"}, - {file = "fonttools-4.54.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:301540e89cf4ce89d462eb23a89464fef50915255ece765d10eee8b2bf9d75b2"}, - {file = "fonttools-4.54.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ae5091547e74e7efecc3cbf8e75200bc92daaeb88e5433c5e3e95ea8ce5aa7"}, - {file = "fonttools-4.54.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82834962b3d7c5ca98cb56001c33cf20eb110ecf442725dc5fdf36d16ed1ab07"}, - {file = "fonttools-4.54.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d26732ae002cc3d2ecab04897bb02ae3f11f06dd7575d1df46acd2f7c012a8d8"}, - {file = "fonttools-4.54.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:58974b4987b2a71ee08ade1e7f47f410c367cdfc5a94fabd599c88165f56213a"}, - {file = "fonttools-4.54.1-cp311-cp311-win32.whl", hash = "sha256:ab774fa225238986218a463f3fe151e04d8c25d7de09df7f0f5fce27b1243dbc"}, - {file = "fonttools-4.54.1-cp311-cp311-win_amd64.whl", hash = "sha256:07e005dc454eee1cc60105d6a29593459a06321c21897f769a281ff2d08939f6"}, - {file = "fonttools-4.54.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:54471032f7cb5fca694b5f1a0aaeba4af6e10ae989df408e0216f7fd6cdc405d"}, - {file = "fonttools-4.54.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fa92cb248e573daab8d032919623cc309c005086d743afb014c836636166f08"}, - {file = "fonttools-4.54.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a911591200114969befa7f2cb74ac148bce5a91df5645443371aba6d222e263"}, - {file = "fonttools-4.54.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93d458c8a6a354dc8b48fc78d66d2a8a90b941f7fec30e94c7ad9982b1fa6bab"}, - {file = "fonttools-4.54.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5eb2474a7c5be8a5331146758debb2669bf5635c021aee00fd7c353558fc659d"}, - {file = "fonttools-4.54.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c9c563351ddc230725c4bdf7d9e1e92cbe6ae8553942bd1fb2b2ff0884e8b714"}, - {file = "fonttools-4.54.1-cp312-cp312-win32.whl", hash = "sha256:fdb062893fd6d47b527d39346e0c5578b7957dcea6d6a3b6794569370013d9ac"}, - {file = "fonttools-4.54.1-cp312-cp312-win_amd64.whl", hash = "sha256:e4564cf40cebcb53f3dc825e85910bf54835e8a8b6880d59e5159f0f325e637e"}, - {file = "fonttools-4.54.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6e37561751b017cf5c40fce0d90fd9e8274716de327ec4ffb0df957160be3bff"}, - {file = "fonttools-4.54.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:357cacb988a18aace66e5e55fe1247f2ee706e01debc4b1a20d77400354cddeb"}, - {file = "fonttools-4.54.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8e953cc0bddc2beaf3a3c3b5dd9ab7554677da72dfaf46951e193c9653e515a"}, - {file = "fonttools-4.54.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:58d29b9a294573d8319f16f2f79e42428ba9b6480442fa1836e4eb89c4d9d61c"}, - {file = "fonttools-4.54.1-cp313-cp313-win32.whl", hash = "sha256:9ef1b167e22709b46bf8168368b7b5d3efeaaa746c6d39661c1b4405b6352e58"}, - {file = "fonttools-4.54.1-cp313-cp313-win_amd64.whl", hash = "sha256:262705b1663f18c04250bd1242b0515d3bbae177bee7752be67c979b7d47f43d"}, - {file = "fonttools-4.54.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ed2f80ca07025551636c555dec2b755dd005e2ea8fbeb99fc5cdff319b70b23b"}, - {file = "fonttools-4.54.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9dc080e5a1c3b2656caff2ac2633d009b3a9ff7b5e93d0452f40cd76d3da3b3c"}, - {file = "fonttools-4.54.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d152d1be65652fc65e695e5619e0aa0982295a95a9b29b52b85775243c06556"}, - {file = "fonttools-4.54.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8583e563df41fdecef31b793b4dd3af8a9caa03397be648945ad32717a92885b"}, - {file = "fonttools-4.54.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:0d1d353ef198c422515a3e974a1e8d5b304cd54a4c2eebcae708e37cd9eeffb1"}, - {file = "fonttools-4.54.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:fda582236fee135d4daeca056c8c88ec5f6f6d88a004a79b84a02547c8f57386"}, - {file = "fonttools-4.54.1-cp38-cp38-win32.whl", hash = "sha256:e7d82b9e56716ed32574ee106cabca80992e6bbdcf25a88d97d21f73a0aae664"}, - {file = "fonttools-4.54.1-cp38-cp38-win_amd64.whl", hash = "sha256:ada215fd079e23e060157aab12eba0d66704316547f334eee9ff26f8c0d7b8ab"}, - {file = "fonttools-4.54.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f5b8a096e649768c2f4233f947cf9737f8dbf8728b90e2771e2497c6e3d21d13"}, - {file = "fonttools-4.54.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4e10d2e0a12e18f4e2dd031e1bf7c3d7017be5c8dbe524d07706179f355c5dac"}, - {file = "fonttools-4.54.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31c32d7d4b0958600eac75eaf524b7b7cb68d3a8c196635252b7a2c30d80e986"}, - {file = "fonttools-4.54.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c39287f5c8f4a0c5a55daf9eaf9ccd223ea59eed3f6d467133cc727d7b943a55"}, - {file = "fonttools-4.54.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a7a310c6e0471602fe3bf8efaf193d396ea561486aeaa7adc1f132e02d30c4b9"}, - {file = "fonttools-4.54.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d3b659d1029946f4ff9b6183984578041b520ce0f8fb7078bb37ec7445806b33"}, - {file = "fonttools-4.54.1-cp39-cp39-win32.whl", hash = "sha256:e96bc94c8cda58f577277d4a71f51c8e2129b8b36fd05adece6320dd3d57de8a"}, - {file = "fonttools-4.54.1-cp39-cp39-win_amd64.whl", hash = "sha256:e8a4b261c1ef91e7188a30571be6ad98d1c6d9fa2427244c545e2fa0a2494dd7"}, - {file = "fonttools-4.54.1-py3-none-any.whl", hash = "sha256:37cddd62d83dc4f72f7c3f3c2bcf2697e89a30efb152079896544a93907733bd"}, - {file = "fonttools-4.54.1.tar.gz", hash = "sha256:957f669d4922f92c171ba01bef7f29410668db09f6c02111e22b2bce446f3285"}, + {file = "fonttools-4.53.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0679a30b59d74b6242909945429dbddb08496935b82f91ea9bf6ad240ec23397"}, + {file = "fonttools-4.53.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8bf06b94694251861ba7fdeea15c8ec0967f84c3d4143ae9daf42bbc7717fe3"}, + {file = "fonttools-4.53.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b96cd370a61f4d083c9c0053bf634279b094308d52fdc2dd9a22d8372fdd590d"}, + {file = "fonttools-4.53.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1c7c5aa18dd3b17995898b4a9b5929d69ef6ae2af5b96d585ff4005033d82f0"}, + {file = "fonttools-4.53.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e013aae589c1c12505da64a7d8d023e584987e51e62006e1bb30d72f26522c41"}, + {file = "fonttools-4.53.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9efd176f874cb6402e607e4cc9b4a9cd584d82fc34a4b0c811970b32ba62501f"}, + {file = "fonttools-4.53.1-cp310-cp310-win32.whl", hash = "sha256:c8696544c964500aa9439efb6761947393b70b17ef4e82d73277413f291260a4"}, + {file = "fonttools-4.53.1-cp310-cp310-win_amd64.whl", hash = "sha256:8959a59de5af6d2bec27489e98ef25a397cfa1774b375d5787509c06659b3671"}, + {file = "fonttools-4.53.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:da33440b1413bad53a8674393c5d29ce64d8c1a15ef8a77c642ffd900d07bfe1"}, + {file = "fonttools-4.53.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ff7e5e9bad94e3a70c5cd2fa27f20b9bb9385e10cddab567b85ce5d306ea923"}, + {file = "fonttools-4.53.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6e7170d675d12eac12ad1a981d90f118c06cf680b42a2d74c6c931e54b50719"}, + {file = "fonttools-4.53.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bee32ea8765e859670c4447b0817514ca79054463b6b79784b08a8df3a4d78e3"}, + {file = "fonttools-4.53.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6e08f572625a1ee682115223eabebc4c6a2035a6917eac6f60350aba297ccadb"}, + {file = "fonttools-4.53.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b21952c092ffd827504de7e66b62aba26fdb5f9d1e435c52477e6486e9d128b2"}, + {file = "fonttools-4.53.1-cp311-cp311-win32.whl", hash = "sha256:9dfdae43b7996af46ff9da520998a32b105c7f098aeea06b2226b30e74fbba88"}, + {file = "fonttools-4.53.1-cp311-cp311-win_amd64.whl", hash = "sha256:d4d0096cb1ac7a77b3b41cd78c9b6bc4a400550e21dc7a92f2b5ab53ed74eb02"}, + {file = "fonttools-4.53.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d92d3c2a1b39631a6131c2fa25b5406855f97969b068e7e08413325bc0afba58"}, + {file = "fonttools-4.53.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3b3c8ebafbee8d9002bd8f1195d09ed2bd9ff134ddec37ee8f6a6375e6a4f0e8"}, + {file = "fonttools-4.53.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32f029c095ad66c425b0ee85553d0dc326d45d7059dbc227330fc29b43e8ba60"}, + {file = "fonttools-4.53.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10f5e6c3510b79ea27bb1ebfcc67048cde9ec67afa87c7dd7efa5c700491ac7f"}, + {file = "fonttools-4.53.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f677ce218976496a587ab17140da141557beb91d2a5c1a14212c994093f2eae2"}, + {file = "fonttools-4.53.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9e6ceba2a01b448e36754983d376064730690401da1dd104ddb543519470a15f"}, + {file = "fonttools-4.53.1-cp312-cp312-win32.whl", hash = "sha256:791b31ebbc05197d7aa096bbc7bd76d591f05905d2fd908bf103af4488e60670"}, + {file = "fonttools-4.53.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ed170b5e17da0264b9f6fae86073be3db15fa1bd74061c8331022bca6d09bab"}, + {file = "fonttools-4.53.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c818c058404eb2bba05e728d38049438afd649e3c409796723dfc17cd3f08749"}, + {file = "fonttools-4.53.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:651390c3b26b0c7d1f4407cad281ee7a5a85a31a110cbac5269de72a51551ba2"}, + {file = "fonttools-4.53.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e54f1bba2f655924c1138bbc7fa91abd61f45c68bd65ab5ed985942712864bbb"}, + {file = "fonttools-4.53.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9cd19cf4fe0595ebdd1d4915882b9440c3a6d30b008f3cc7587c1da7b95be5f"}, + {file = "fonttools-4.53.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2af40ae9cdcb204fc1d8f26b190aa16534fcd4f0df756268df674a270eab575d"}, + {file = "fonttools-4.53.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:35250099b0cfb32d799fb5d6c651220a642fe2e3c7d2560490e6f1d3f9ae9169"}, + {file = "fonttools-4.53.1-cp38-cp38-win32.whl", hash = "sha256:f08df60fbd8d289152079a65da4e66a447efc1d5d5a4d3f299cdd39e3b2e4a7d"}, + {file = "fonttools-4.53.1-cp38-cp38-win_amd64.whl", hash = "sha256:7b6b35e52ddc8fb0db562133894e6ef5b4e54e1283dff606fda3eed938c36fc8"}, + {file = "fonttools-4.53.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75a157d8d26c06e64ace9df037ee93a4938a4606a38cb7ffaf6635e60e253b7a"}, + {file = "fonttools-4.53.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4824c198f714ab5559c5be10fd1adf876712aa7989882a4ec887bf1ef3e00e31"}, + {file = "fonttools-4.53.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:becc5d7cb89c7b7afa8321b6bb3dbee0eec2b57855c90b3e9bf5fb816671fa7c"}, + {file = "fonttools-4.53.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84ec3fb43befb54be490147b4a922b5314e16372a643004f182babee9f9c3407"}, + {file = "fonttools-4.53.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:73379d3ffdeecb376640cd8ed03e9d2d0e568c9d1a4e9b16504a834ebadc2dfb"}, + {file = "fonttools-4.53.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:02569e9a810f9d11f4ae82c391ebc6fb5730d95a0657d24d754ed7763fb2d122"}, + {file = "fonttools-4.53.1-cp39-cp39-win32.whl", hash = "sha256:aae7bd54187e8bf7fd69f8ab87b2885253d3575163ad4d669a262fe97f0136cb"}, + {file = "fonttools-4.53.1-cp39-cp39-win_amd64.whl", hash = "sha256:e5b708073ea3d684235648786f5f6153a48dc8762cdfe5563c57e80787c29fbb"}, + {file = "fonttools-4.53.1-py3-none-any.whl", hash = "sha256:f1f8758a2ad110bd6432203a344269f445a2907dc24ef6bccfd0ac4e14e0d71d"}, + {file = "fonttools-4.53.1.tar.gz", hash = "sha256:e128778a8e9bc11159ce5447f76766cefbd876f44bd79aff030287254e4752c4"}, ] [package.extras] @@ -750,13 +734,13 @@ woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] [[package]] name = "fsspec" -version = "2024.9.0" +version = "2024.6.1" description = "File-system specification" optional = false python-versions = ">=3.8" files = [ - {file = "fsspec-2024.9.0-py3-none-any.whl", hash = "sha256:a0947d552d8a6efa72cc2c730b12c41d043509156966cca4fb157b0f2a0c574b"}, - {file = "fsspec-2024.9.0.tar.gz", hash = "sha256:4b0afb90c2f21832df142f292649035d80b421f60a9e1c027802e5a0da2b04e8"}, + {file = "fsspec-2024.6.1-py3-none-any.whl", hash = "sha256:3cb443f8bcd2efb31295a5b9fdb02aee81d8452c80d28f97a6d0959e6cee101e"}, + {file = "fsspec-2024.6.1.tar.gz", hash = "sha256:fad7d7e209dd4c1208e3bbfda706620e0da5142bebbd9c384afb95b07e798e49"}, ] [package.extras] @@ -838,13 +822,13 @@ test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", [[package]] name = "google-auth" -version = "2.35.0" +version = "2.34.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google_auth-2.35.0-py2.py3-none-any.whl", hash = "sha256:25df55f327ef021de8be50bad0dfd4a916ad0de96da86cd05661c9297723ad3f"}, - {file = "google_auth-2.35.0.tar.gz", hash = "sha256:f4c64ed4e01e8e8b646ef34c018f8bf3338df0c8e37d8b3bba40e7f574a3278a"}, + {file = "google_auth-2.34.0-py2.py3-none-any.whl", hash = "sha256:72fd4733b80b6d777dcde515628a9eb4a577339437012874ea286bca7261ee65"}, + {file = "google_auth-2.34.0.tar.gz", hash = "sha256:8eb87396435c19b20d32abd2f984e31c191a15284af72eb922f10e5bde9c04cc"}, ] [package.dependencies] @@ -859,6 +843,24 @@ pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] reauth = ["pyu2f (>=0.1.5)"] requests = ["requests (>=2.20.0,<3.0.0.dev0)"] +[[package]] +name = "google-auth-oauthlib" +version = "1.0.0" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.6" +files = [ + {file = "google-auth-oauthlib-1.0.0.tar.gz", hash = "sha256:e375064964820b47221a7e1b7ee1fd77051b6323c3f9e3e19785f78ab67ecfc5"}, + {file = "google_auth_oauthlib-1.0.0-py2.py3-none-any.whl", hash = "sha256:95880ca704928c300f48194d1770cf5b1462835b6e49db61445a520f793fd5fb"}, +] + +[package.dependencies] +google-auth = ">=2.15.0" +requests-oauthlib = ">=0.7.0" + +[package.extras] +tool = ["click (>=6.0.0)"] + [[package]] name = "graphene" version = "3.3" @@ -881,18 +883,15 @@ test = ["coveralls (>=3.3,<4)", "iso8601 (>=1,<2)", "mock (>=4,<5)", "pytest (>= [[package]] name = "graphql-core" -version = "3.2.5" +version = "3.2.3" description = "GraphQL implementation for Python, a port of GraphQL.js, the JavaScript reference implementation for GraphQL." optional = false -python-versions = "<4,>=3.6" +python-versions = ">=3.6,<4" files = [ - {file = "graphql_core-3.2.5-py3-none-any.whl", hash = "sha256:2f150d5096448aa4f8ab26268567bbfeef823769893b39c1a2e1409590939c8a"}, - {file = "graphql_core-3.2.5.tar.gz", hash = "sha256:e671b90ed653c808715645e3998b7ab67d382d55467b7e2978549111bbabf8d5"}, + {file = "graphql-core-3.2.3.tar.gz", hash = "sha256:06d2aad0ac723e35b1cb47885d3e5c45e956a53bc1b209a9fc5369007fe46676"}, + {file = "graphql_core-3.2.3-py3-none-any.whl", hash = "sha256:5766780452bd5ec8ba133f8bf287dc92713e3868ddd83aee4faab9fc3e303dc3"}, ] -[package.dependencies] -typing-extensions = {version = ">=4,<5", markers = "python_version < \"3.10\""} - [[package]] name = "graphql-relay" version = "3.2.0" @@ -909,84 +908,69 @@ graphql-core = ">=3.2,<3.3" [[package]] name = "greenlet" -version = "3.1.1" +version = "3.0.3" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.7" files = [ - {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617"}, - {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7"}, - {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6"}, - {file = "greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80"}, - {file = "greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70"}, - {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159"}, - {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e"}, - {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1"}, - {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383"}, - {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a"}, - {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511"}, - {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395"}, - {file = "greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39"}, - {file = "greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d"}, - {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79"}, - {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa"}, - {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441"}, - {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36"}, - {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9"}, - {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0"}, - {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942"}, - {file = "greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01"}, - {file = "greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1"}, - {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff"}, - {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a"}, - {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e"}, - {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4"}, - {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e"}, - {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1"}, - {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c"}, - {file = "greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761"}, - {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011"}, - {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13"}, - {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475"}, - {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b"}, - {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822"}, - {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01"}, - {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6"}, - {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47da355d8687fd65240c364c90a31569a133b7b60de111c255ef5b606f2ae291"}, - {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98884ecf2ffb7d7fe6bd517e8eb99d31ff7855a840fa6d0d63cd07c037f6a981"}, - {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803"}, - {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db32b5348615a04b82240cc67983cb315309e88d444a288934ee6ceaebcad6cc"}, - {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dcc62f31eae24de7f8dce72134c8651c58000d3b1868e01392baea7c32c247de"}, - {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1d3755bcb2e02de341c55b4fca7a745a24a9e7212ac953f6b3a48d117d7257aa"}, - {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b8da394b34370874b4572676f36acabac172602abf054cbc4ac910219f3340af"}, - {file = "greenlet-3.1.1-cp37-cp37m-win32.whl", hash = "sha256:a0dfc6c143b519113354e780a50381508139b07d2177cb6ad6a08278ec655798"}, - {file = "greenlet-3.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:54558ea205654b50c438029505def3834e80f0869a70fb15b871c29b4575ddef"}, - {file = "greenlet-3.1.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:346bed03fe47414091be4ad44786d1bd8bef0c3fcad6ed3dee074a032ab408a9"}, - {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfc59d69fc48664bc693842bd57acfdd490acafda1ab52c7836e3fc75c90a111"}, - {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21e10da6ec19b457b82636209cbe2331ff4306b54d06fa04b7c138ba18c8a81"}, - {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37b9de5a96111fc15418819ab4c4432e4f3c2ede61e660b1e33971eba26ef9ba"}, - {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef9ea3f137e5711f0dbe5f9263e8c009b7069d8a1acea822bd5e9dae0ae49c8"}, - {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85f3ff71e2e60bd4b4932a043fbbe0f499e263c628390b285cb599154a3b03b1"}, - {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95ffcf719966dd7c453f908e208e14cde192e09fde6c7186c8f1896ef778d8cd"}, - {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:03a088b9de532cbfe2ba2034b2b85e82df37874681e8c470d6fb2f8c04d7e4b7"}, - {file = "greenlet-3.1.1-cp38-cp38-win32.whl", hash = "sha256:8b8b36671f10ba80e159378df9c4f15c14098c4fd73a36b9ad715f057272fbef"}, - {file = "greenlet-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:7017b2be767b9d43cc31416aba48aab0d2309ee31b4dbf10a1d38fb7972bdf9d"}, - {file = "greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3"}, - {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42"}, - {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f"}, - {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437"}, - {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145"}, - {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c"}, - {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e"}, - {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e"}, - {file = "greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c"}, - {file = "greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22"}, - {file = "greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467"}, + {file = "greenlet-3.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405"}, + {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f"}, + {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb"}, + {file = "greenlet-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9"}, + {file = "greenlet-3.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22"}, + {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3"}, + {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d"}, + {file = "greenlet-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728"}, + {file = "greenlet-3.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf"}, + {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305"}, + {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6"}, + {file = "greenlet-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2"}, + {file = "greenlet-3.0.3-cp37-cp37m-macosx_11_0_universal2.whl", hash = "sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41"}, + {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7"}, + {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6"}, + {file = "greenlet-3.0.3-cp37-cp37m-win32.whl", hash = "sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d"}, + {file = "greenlet-3.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67"}, + {file = "greenlet-3.0.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4"}, + {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5"}, + {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da"}, + {file = "greenlet-3.0.3-cp38-cp38-win32.whl", hash = "sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3"}, + {file = "greenlet-3.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf"}, + {file = "greenlet-3.0.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b"}, + {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6"}, + {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113"}, + {file = "greenlet-3.0.3-cp39-cp39-win32.whl", hash = "sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e"}, + {file = "greenlet-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067"}, + {file = "greenlet-3.0.3.tar.gz", hash = "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491"}, ] [package.extras] @@ -995,70 +979,61 @@ test = ["objgraph", "psutil"] [[package]] name = "grpcio" -version = "1.67.0" +version = "1.66.1" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.8" files = [ - {file = "grpcio-1.67.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:bd79929b3bb96b54df1296cd3bf4d2b770bd1df6c2bdf549b49bab286b925cdc"}, - {file = "grpcio-1.67.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:16724ffc956ea42967f5758c2f043faef43cb7e48a51948ab593570570d1e68b"}, - {file = "grpcio-1.67.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:2b7183c80b602b0ad816315d66f2fb7887614ead950416d60913a9a71c12560d"}, - {file = "grpcio-1.67.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:efe32b45dd6d118f5ea2e5deaed417d8a14976325c93812dd831908522b402c9"}, - {file = "grpcio-1.67.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe89295219b9c9e47780a0f1c75ca44211e706d1c598242249fe717af3385ec8"}, - {file = "grpcio-1.67.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa8d025fae1595a207b4e47c2e087cb88d47008494db258ac561c00877d4c8f8"}, - {file = "grpcio-1.67.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f95e15db43e75a534420e04822df91f645664bf4ad21dfaad7d51773c80e6bb4"}, - {file = "grpcio-1.67.0-cp310-cp310-win32.whl", hash = "sha256:a6b9a5c18863fd4b6624a42e2712103fb0f57799a3b29651c0e5b8119a519d65"}, - {file = "grpcio-1.67.0-cp310-cp310-win_amd64.whl", hash = "sha256:b6eb68493a05d38b426604e1dc93bfc0137c4157f7ab4fac5771fd9a104bbaa6"}, - {file = "grpcio-1.67.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:e91d154689639932305b6ea6f45c6e46bb51ecc8ea77c10ef25aa77f75443ad4"}, - {file = "grpcio-1.67.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:cb204a742997277da678611a809a8409657b1398aaeebf73b3d9563b7d154c13"}, - {file = "grpcio-1.67.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:ae6de510f670137e755eb2a74b04d1041e7210af2444103c8c95f193340d17ee"}, - {file = "grpcio-1.67.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74b900566bdf68241118f2918d312d3bf554b2ce0b12b90178091ea7d0a17b3d"}, - {file = "grpcio-1.67.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4e95e43447a02aa603abcc6b5e727d093d161a869c83b073f50b9390ecf0fa8"}, - {file = "grpcio-1.67.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0bb94e66cd8f0baf29bd3184b6aa09aeb1a660f9ec3d85da615c5003154bc2bf"}, - {file = "grpcio-1.67.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:82e5bd4b67b17c8c597273663794a6a46a45e44165b960517fe6d8a2f7f16d23"}, - {file = "grpcio-1.67.0-cp311-cp311-win32.whl", hash = "sha256:7fc1d2b9fd549264ae585026b266ac2db53735510a207381be509c315b4af4e8"}, - {file = "grpcio-1.67.0-cp311-cp311-win_amd64.whl", hash = "sha256:ac11ecb34a86b831239cc38245403a8de25037b448464f95c3315819e7519772"}, - {file = "grpcio-1.67.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:227316b5631260e0bef8a3ce04fa7db4cc81756fea1258b007950b6efc90c05d"}, - {file = "grpcio-1.67.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d90cfdafcf4b45a7a076e3e2a58e7bc3d59c698c4f6470b0bb13a4d869cf2273"}, - {file = "grpcio-1.67.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:77196216d5dd6f99af1c51e235af2dd339159f657280e65ce7e12c1a8feffd1d"}, - {file = "grpcio-1.67.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15c05a26a0f7047f720da41dc49406b395c1470eef44ff7e2c506a47ac2c0591"}, - {file = "grpcio-1.67.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3840994689cc8cbb73d60485c594424ad8adb56c71a30d8948d6453083624b52"}, - {file = "grpcio-1.67.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:5a1e03c3102b6451028d5dc9f8591131d6ab3c8a0e023d94c28cb930ed4b5f81"}, - {file = "grpcio-1.67.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:682968427a63d898759474e3b3178d42546e878fdce034fd7474ef75143b64e3"}, - {file = "grpcio-1.67.0-cp312-cp312-win32.whl", hash = "sha256:d01793653248f49cf47e5695e0a79805b1d9d4eacef85b310118ba1dfcd1b955"}, - {file = "grpcio-1.67.0-cp312-cp312-win_amd64.whl", hash = "sha256:985b2686f786f3e20326c4367eebdaed3e7aa65848260ff0c6644f817042cb15"}, - {file = "grpcio-1.67.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:8c9a35b8bc50db35ab8e3e02a4f2a35cfba46c8705c3911c34ce343bd777813a"}, - {file = "grpcio-1.67.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:42199e704095b62688998c2d84c89e59a26a7d5d32eed86d43dc90e7a3bd04aa"}, - {file = "grpcio-1.67.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:c4c425f440fb81f8d0237c07b9322fc0fb6ee2b29fbef5f62a322ff8fcce240d"}, - {file = "grpcio-1.67.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:323741b6699cd2b04a71cb38f502db98f90532e8a40cb675393d248126a268af"}, - {file = "grpcio-1.67.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:662c8e105c5e5cee0317d500eb186ed7a93229586e431c1bf0c9236c2407352c"}, - {file = "grpcio-1.67.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:f6bd2ab135c64a4d1e9e44679a616c9bc944547357c830fafea5c3caa3de5153"}, - {file = "grpcio-1.67.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:2f55c1e0e2ae9bdd23b3c63459ee4c06d223b68aeb1961d83c48fb63dc29bc03"}, - {file = "grpcio-1.67.0-cp313-cp313-win32.whl", hash = "sha256:fd6bc27861e460fe28e94226e3673d46e294ca4673d46b224428d197c5935e69"}, - {file = "grpcio-1.67.0-cp313-cp313-win_amd64.whl", hash = "sha256:cf51d28063338608cd8d3cd64677e922134837902b70ce00dad7f116e3998210"}, - {file = "grpcio-1.67.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:7f200aca719c1c5dc72ab68be3479b9dafccdf03df530d137632c534bb6f1ee3"}, - {file = "grpcio-1.67.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0892dd200ece4822d72dd0952f7112c542a487fc48fe77568deaaa399c1e717d"}, - {file = "grpcio-1.67.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:f4d613fbf868b2e2444f490d18af472ccb47660ea3df52f068c9c8801e1f3e85"}, - {file = "grpcio-1.67.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c69bf11894cad9da00047f46584d5758d6ebc9b5950c0dc96fec7e0bce5cde9"}, - {file = "grpcio-1.67.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9bca3ca0c5e74dea44bf57d27e15a3a3996ce7e5780d61b7c72386356d231db"}, - {file = "grpcio-1.67.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:014dfc020e28a0d9be7e93a91f85ff9f4a87158b7df9952fe23cc42d29d31e1e"}, - {file = "grpcio-1.67.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d4ea4509d42c6797539e9ec7496c15473177ce9abc89bc5c71e7abe50fc25737"}, - {file = "grpcio-1.67.0-cp38-cp38-win32.whl", hash = "sha256:9d75641a2fca9ae1ae86454fd25d4c298ea8cc195dbc962852234d54a07060ad"}, - {file = "grpcio-1.67.0-cp38-cp38-win_amd64.whl", hash = "sha256:cff8e54d6a463883cda2fab94d2062aad2f5edd7f06ae3ed030f2a74756db365"}, - {file = "grpcio-1.67.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:62492bd534979e6d7127b8a6b29093161a742dee3875873e01964049d5250a74"}, - {file = "grpcio-1.67.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eef1dce9d1a46119fd09f9a992cf6ab9d9178b696382439446ca5f399d7b96fe"}, - {file = "grpcio-1.67.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:f623c57a5321461c84498a99dddf9d13dac0e40ee056d884d6ec4ebcab647a78"}, - {file = "grpcio-1.67.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54d16383044e681f8beb50f905249e4e7261dd169d4aaf6e52eab67b01cbbbe2"}, - {file = "grpcio-1.67.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2a44e572fb762c668e4812156b81835f7aba8a721b027e2d4bb29fb50ff4d33"}, - {file = "grpcio-1.67.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:391df8b0faac84d42f5b8dfc65f5152c48ed914e13c522fd05f2aca211f8bfad"}, - {file = "grpcio-1.67.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfd9306511fdfc623a1ba1dc3bc07fbd24e6cfbe3c28b4d1e05177baa2f99617"}, - {file = "grpcio-1.67.0-cp39-cp39-win32.whl", hash = "sha256:30d47dbacfd20cbd0c8be9bfa52fdb833b395d4ec32fe5cff7220afc05d08571"}, - {file = "grpcio-1.67.0-cp39-cp39-win_amd64.whl", hash = "sha256:f55f077685f61f0fbd06ea355142b71e47e4a26d2d678b3ba27248abfe67163a"}, - {file = "grpcio-1.67.0.tar.gz", hash = "sha256:e090b2553e0da1c875449c8e75073dd4415dd71c9bde6a406240fdf4c0ee467c"}, + {file = "grpcio-1.66.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:4877ba180591acdf127afe21ec1c7ff8a5ecf0fe2600f0d3c50e8c4a1cbc6492"}, + {file = "grpcio-1.66.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:3750c5a00bd644c75f4507f77a804d0189d97a107eb1481945a0cf3af3e7a5ac"}, + {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:a013c5fbb12bfb5f927444b477a26f1080755a931d5d362e6a9a720ca7dbae60"}, + {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1b24c23d51a1e8790b25514157d43f0a4dce1ac12b3f0b8e9f66a5e2c4c132f"}, + {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7ffb8ea674d68de4cac6f57d2498fef477cef582f1fa849e9f844863af50083"}, + {file = "grpcio-1.66.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:307b1d538140f19ccbd3aed7a93d8f71103c5d525f3c96f8616111614b14bf2a"}, + {file = "grpcio-1.66.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1c17ebcec157cfb8dd445890a03e20caf6209a5bd4ac5b040ae9dbc59eef091d"}, + {file = "grpcio-1.66.1-cp310-cp310-win32.whl", hash = "sha256:ef82d361ed5849d34cf09105d00b94b6728d289d6b9235513cb2fcc79f7c432c"}, + {file = "grpcio-1.66.1-cp310-cp310-win_amd64.whl", hash = "sha256:292a846b92cdcd40ecca46e694997dd6b9be6c4c01a94a0dfb3fcb75d20da858"}, + {file = "grpcio-1.66.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:c30aeceeaff11cd5ddbc348f37c58bcb96da8d5aa93fed78ab329de5f37a0d7a"}, + {file = "grpcio-1.66.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8a1e224ce6f740dbb6b24c58f885422deebd7eb724aff0671a847f8951857c26"}, + {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:a66fe4dc35d2330c185cfbb42959f57ad36f257e0cc4557d11d9f0a3f14311df"}, + {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e3ba04659e4fce609de2658fe4dbf7d6ed21987a94460f5f92df7579fd5d0e22"}, + {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4573608e23f7e091acfbe3e84ac2045680b69751d8d67685ffa193a4429fedb1"}, + {file = "grpcio-1.66.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7e06aa1f764ec8265b19d8f00140b8c4b6ca179a6dc67aa9413867c47e1fb04e"}, + {file = "grpcio-1.66.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3885f037eb11f1cacc41f207b705f38a44b69478086f40608959bf5ad85826dd"}, + {file = "grpcio-1.66.1-cp311-cp311-win32.whl", hash = "sha256:97ae7edd3f3f91480e48ede5d3e7d431ad6005bfdbd65c1b56913799ec79e791"}, + {file = "grpcio-1.66.1-cp311-cp311-win_amd64.whl", hash = "sha256:cfd349de4158d797db2bd82d2020554a121674e98fbe6b15328456b3bf2495bb"}, + {file = "grpcio-1.66.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:a92c4f58c01c77205df6ff999faa008540475c39b835277fb8883b11cada127a"}, + {file = "grpcio-1.66.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fdb14bad0835914f325349ed34a51940bc2ad965142eb3090081593c6e347be9"}, + {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:f03a5884c56256e08fd9e262e11b5cfacf1af96e2ce78dc095d2c41ccae2c80d"}, + {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ca2559692d8e7e245d456877a85ee41525f3ed425aa97eb7a70fc9a79df91a0"}, + {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84ca1be089fb4446490dd1135828bd42a7c7f8421e74fa581611f7afdf7ab761"}, + {file = "grpcio-1.66.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:d639c939ad7c440c7b2819a28d559179a4508783f7e5b991166f8d7a34b52815"}, + {file = "grpcio-1.66.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b9feb4e5ec8dc2d15709f4d5fc367794d69277f5d680baf1910fc9915c633524"}, + {file = "grpcio-1.66.1-cp312-cp312-win32.whl", hash = "sha256:7101db1bd4cd9b880294dec41a93fcdce465bdbb602cd8dc5bd2d6362b618759"}, + {file = "grpcio-1.66.1-cp312-cp312-win_amd64.whl", hash = "sha256:b0aa03d240b5539648d996cc60438f128c7f46050989e35b25f5c18286c86734"}, + {file = "grpcio-1.66.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:ecfe735e7a59e5a98208447293ff8580e9db1e890e232b8b292dc8bd15afc0d2"}, + {file = "grpcio-1.66.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4825a3aa5648010842e1c9d35a082187746aa0cdbf1b7a2a930595a94fb10fce"}, + {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:f517fd7259fe823ef3bd21e508b653d5492e706e9f0ef82c16ce3347a8a5620c"}, + {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1fe60d0772831d96d263b53d83fb9a3d050a94b0e94b6d004a5ad111faa5b5b"}, + {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31a049daa428f928f21090403e5d18ea02670e3d5d172581670be006100db9ef"}, + {file = "grpcio-1.66.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6f914386e52cbdeb5d2a7ce3bf1fdfacbe9d818dd81b6099a05b741aaf3848bb"}, + {file = "grpcio-1.66.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bff2096bdba686019fb32d2dde45b95981f0d1490e054400f70fc9a8af34b49d"}, + {file = "grpcio-1.66.1-cp38-cp38-win32.whl", hash = "sha256:aa8ba945c96e73de29d25331b26f3e416e0c0f621e984a3ebdb2d0d0b596a3b3"}, + {file = "grpcio-1.66.1-cp38-cp38-win_amd64.whl", hash = "sha256:161d5c535c2bdf61b95080e7f0f017a1dfcb812bf54093e71e5562b16225b4ce"}, + {file = "grpcio-1.66.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:d0cd7050397b3609ea51727b1811e663ffda8bda39c6a5bb69525ef12414b503"}, + {file = "grpcio-1.66.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0e6c9b42ded5d02b6b1fea3a25f036a2236eeb75d0579bfd43c0018c88bf0a3e"}, + {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:c9f80f9fad93a8cf71c7f161778ba47fd730d13a343a46258065c4deb4b550c0"}, + {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dd67ed9da78e5121efc5c510f0122a972216808d6de70953a740560c572eb44"}, + {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48b0d92d45ce3be2084b92fb5bae2f64c208fea8ceed7fccf6a7b524d3c4942e"}, + {file = "grpcio-1.66.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4d813316d1a752be6f5c4360c49f55b06d4fe212d7df03253dfdae90c8a402bb"}, + {file = "grpcio-1.66.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9c9bebc6627873ec27a70fc800f6083a13c70b23a5564788754b9ee52c5aef6c"}, + {file = "grpcio-1.66.1-cp39-cp39-win32.whl", hash = "sha256:30a1c2cf9390c894c90bbc70147f2372130ad189cffef161f0432d0157973f45"}, + {file = "grpcio-1.66.1-cp39-cp39-win_amd64.whl", hash = "sha256:17663598aadbedc3cacd7bbde432f541c8e07d2496564e22b214b22c7523dac8"}, + {file = "grpcio-1.66.1.tar.gz", hash = "sha256:35334f9c9745add3e357e3372756fd32d925bd52c41da97f4dfdafbde0bf0ee2"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.67.0)"] +protobuf = ["grpcio-tools (>=1.66.1)"] [[package]] name = "gunicorn" @@ -1083,13 +1058,13 @@ tornado = ["tornado (>=0.2)"] [[package]] name = "huggingface-hub" -version = "0.25.2" +version = "0.24.6" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.25.2-py3-none-any.whl", hash = "sha256:1897caf88ce7f97fe0110603d8f66ac264e3ba6accdf30cd66cc0fed5282ad25"}, - {file = "huggingface_hub-0.25.2.tar.gz", hash = "sha256:a1014ea111a5f40ccd23f7f7ba8ac46e20fa3b658ced1f86a00c75c06ec6423c"}, + {file = "huggingface_hub-0.24.6-py3-none-any.whl", hash = "sha256:a990f3232aa985fe749bc9474060cbad75e8b2f115f6665a9fda5b9c97818970"}, + {file = "huggingface_hub-0.24.6.tar.gz", hash = "sha256:cc2579e761d070713eaa9c323e3debe39d5b464ae3a7261c39a9195b27bb8000"}, ] [package.dependencies] @@ -1117,13 +1092,13 @@ typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "t [[package]] name = "humanize" -version = "4.11.0" +version = "4.10.0" description = "Python humanize utilities" optional = false -python-versions = ">=3.9" +python-versions = ">=3.8" files = [ - {file = "humanize-4.11.0-py3-none-any.whl", hash = "sha256:b53caaec8532bcb2fff70c8826f904c35943f8cecaca29d272d9df38092736c0"}, - {file = "humanize-4.11.0.tar.gz", hash = "sha256:e66f36020a2d5a974c504bd2555cf770621dbdbb6d82f94a6857c0b1ea2608be"}, + {file = "humanize-4.10.0-py3-none-any.whl", hash = "sha256:39e7ccb96923e732b5c2e27aeaa3b10a8dfeeba3eb965ba7b74a3eb0e30040a6"}, + {file = "humanize-4.10.0.tar.gz", hash = "sha256:06b6eb0293e4b85e8d385397c5868926820db32b9b654b932f57fa41c23c9978"}, ] [package.extras] @@ -1131,13 +1106,13 @@ tests = ["freezegun", "pytest", "pytest-cov"] [[package]] name = "identify" -version = "2.6.1" +version = "2.6.0" description = "File identification library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "identify-2.6.1-py2.py3-none-any.whl", hash = "sha256:53863bcac7caf8d2ed85bd20312ea5dcfc22226800f6d6881f232d861db5a8f0"}, - {file = "identify-2.6.1.tar.gz", hash = "sha256:91478c5fb7c3aac5ff7bf9b4344f803843dc586832d5f110d672b19aa1984c98"}, + {file = "identify-2.6.0-py2.py3-none-any.whl", hash = "sha256:e79ae4406387a9d300332b5fd366d8994f1525e8414984e1a59e058b2eda2dd0"}, + {file = "identify-2.6.0.tar.gz", hash = "sha256:cb171c685bdc31bcc4c1734698736a7d5b6c8bf2e0c15117f4d469c8640ae5cf"}, ] [package.extras] @@ -1145,27 +1120,24 @@ license = ["ukkonen"] [[package]] name = "idna" -version = "3.10" +version = "3.8" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" files = [ - {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, - {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, + {file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"}, + {file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"}, ] -[package.extras] -all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] - [[package]] name = "imageio" -version = "2.36.0" +version = "2.35.1" description = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats." optional = false -python-versions = ">=3.9" +python-versions = ">=3.8" files = [ - {file = "imageio-2.36.0-py3-none-any.whl", hash = "sha256:471f1eda55618ee44a3c9960911c35e647d9284c68f077e868df633398f137f0"}, - {file = "imageio-2.36.0.tar.gz", hash = "sha256:1c8f294db862c256e9562354d65aa54725b8dafed7f10f02bb3ec20ec1678850"}, + {file = "imageio-2.35.1-py3-none-any.whl", hash = "sha256:6eb2e5244e7a16b85c10b5c2fe0f7bf961b40fcb9f1a9fd1bd1d2c2f8fb3cd65"}, + {file = "imageio-2.35.1.tar.gz", hash = "sha256:4952dfeef3c3947957f6d5dedb1f4ca31c6e509a476891062396834048aeed2a"}, ] [package.dependencies] @@ -1173,8 +1145,8 @@ numpy = "*" pillow = ">=8.3.2" [package.extras] -all-plugins = ["astropy", "av", "imageio-ffmpeg", "numpy (>2)", "pillow-heif", "psutil", "rawpy", "tifffile"] -all-plugins-pypy = ["av", "imageio-ffmpeg", "pillow-heif", "psutil", "tifffile"] +all-plugins = ["astropy", "av", "imageio-ffmpeg", "psutil", "tifffile"] +all-plugins-pypy = ["av", "imageio-ffmpeg", "psutil", "tifffile"] build = ["wheel"] dev = ["black", "flake8", "fsspec[github]", "pytest", "pytest-cov"] docs = ["numpydoc", "pydata-sphinx-theme", "sphinx (<6)"] @@ -1192,36 +1164,32 @@ tifffile = ["tifffile"] [[package]] name = "importlib-metadata" -version = "8.5.0" +version = "8.4.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, - {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, + {file = "importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1"}, + {file = "importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"}, ] [package.dependencies] -zipp = ">=3.20" +zipp = ">=0.5" [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] -cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] -test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] -type = ["pytest-mypy"] +test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] [[package]] name = "importlib-resources" -version = "6.4.5" +version = "6.4.4" description = "Read resources from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_resources-6.4.5-py3-none-any.whl", hash = "sha256:ac29d5f956f01d5e4bb63102a5a19957f1b9175e45649977264a1416783bb717"}, - {file = "importlib_resources-6.4.5.tar.gz", hash = "sha256:980862a1d16c9e147a59603677fa2aa5fd82b87f223b6cb870695bcfce830065"}, + {file = "importlib_resources-6.4.4-py3-none-any.whl", hash = "sha256:dda242603d1c9cd836c3368b1174ed74cb4049ecd209e7a1a0104620c18c5c11"}, + {file = "importlib_resources-6.4.4.tar.gz", hash = "sha256:20600c8b7361938dc0bb2d5ec0297802e575df486f5a544fa414da65e13721f7"}, ] [package.dependencies] @@ -1287,125 +1255,115 @@ files = [ [[package]] name = "kiwisolver" -version = "1.4.7" +version = "1.4.5" description = "A fast implementation of the Cassowary constraint solver" optional = false -python-versions = ">=3.8" +python-versions = ">=3.7" files = [ - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8a9c83f75223d5e48b0bc9cb1bf2776cf01563e00ade8775ffe13b0b6e1af3a6"}, - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58370b1ffbd35407444d57057b57da5d6549d2d854fa30249771775c63b5fe17"}, - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aa0abdf853e09aff551db11fce173e2177d00786c688203f52c87ad7fcd91ef9"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8d53103597a252fb3ab8b5845af04c7a26d5e7ea8122303dd7a021176a87e8b9"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:88f17c5ffa8e9462fb79f62746428dd57b46eb931698e42e990ad63103f35e6c"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a9ca9c710d598fd75ee5de59d5bda2684d9db36a9f50b6125eaea3969c2599"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4d742cb7af1c28303a51b7a27aaee540e71bb8e24f68c736f6f2ffc82f2bf05"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28c7fea2196bf4c2f8d46a0415c77a1c480cc0724722f23d7410ffe9842c407"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e968b84db54f9d42046cf154e02911e39c0435c9801681e3fc9ce8a3c4130278"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0c18ec74c0472de033e1bebb2911c3c310eef5649133dd0bedf2a169a1b269e5"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8f0ea6da6d393d8b2e187e6a5e3fb81f5862010a40c3945e2c6d12ae45cfb2ad"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f106407dda69ae456dd1227966bf445b157ccc80ba0dff3802bb63f30b74e895"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84ec80df401cfee1457063732d90022f93951944b5b58975d34ab56bb150dfb3"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win32.whl", hash = "sha256:71bb308552200fb2c195e35ef05de12f0c878c07fc91c270eb3d6e41698c3bcc"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win_amd64.whl", hash = "sha256:44756f9fd339de0fb6ee4f8c1696cfd19b2422e0d70b4cefc1cc7f1f64045a8c"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win_arm64.whl", hash = "sha256:78a42513018c41c2ffd262eb676442315cbfe3c44eed82385c2ed043bc63210a"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d2b0e12a42fb4e72d509fc994713d099cbb15ebf1103545e8a45f14da2dfca54"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2a8781ac3edc42ea4b90bc23e7d37b665d89423818e26eb6df90698aa2287c95"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:46707a10836894b559e04b0fd143e343945c97fd170d69a2d26d640b4e297935"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef97b8df011141c9b0f6caf23b29379f87dd13183c978a30a3c546d2c47314cb"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab58c12a2cd0fc769089e6d38466c46d7f76aced0a1f54c77652446733d2d02"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:803b8e1459341c1bb56d1c5c010406d5edec8a0713a0945851290a7930679b51"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9a9e8a507420fe35992ee9ecb302dab68550dedc0da9e2880dd88071c5fb052"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18077b53dc3bb490e330669a99920c5e6a496889ae8c63b58fbc57c3d7f33a18"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6af936f79086a89b3680a280c47ea90b4df7047b5bdf3aa5c524bbedddb9e545"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3abc5b19d24af4b77d1598a585b8a719beb8569a71568b66f4ebe1fb0449460b"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:933d4de052939d90afbe6e9d5273ae05fb836cc86c15b686edd4b3560cc0ee36"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:65e720d2ab2b53f1f72fb5da5fb477455905ce2c88aaa671ff0a447c2c80e8e3"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3bf1ed55088f214ba6427484c59553123fdd9b218a42bbc8c6496d6754b1e523"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win32.whl", hash = "sha256:4c00336b9dd5ad96d0a558fd18a8b6f711b7449acce4c157e7343ba92dd0cf3d"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win_amd64.whl", hash = "sha256:929e294c1ac1e9f615c62a4e4313ca1823ba37326c164ec720a803287c4c499b"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win_arm64.whl", hash = "sha256:e33e8fbd440c917106b237ef1a2f1449dfbb9b6f6e1ce17c94cd6a1e0d438376"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:5360cc32706dab3931f738d3079652d20982511f7c0ac5711483e6eab08efff2"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942216596dc64ddb25adb215c3c783215b23626f8d84e8eff8d6d45c3f29f75a"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:48b571ecd8bae15702e4f22d3ff6a0f13e54d3d00cd25216d5e7f658242065ee"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad42ba922c67c5f219097b28fae965e10045ddf145d2928bfac2eb2e17673640"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:612a10bdae23404a72941a0fc8fa2660c6ea1217c4ce0dbcab8a8f6543ea9e7f"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e838bba3a3bac0fe06d849d29772eb1afb9745a59710762e4ba3f4cb8424483"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:22f499f6157236c19f4bbbd472fa55b063db77a16cd74d49afe28992dff8c258"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693902d433cf585133699972b6d7c42a8b9f8f826ebcaf0132ff55200afc599e"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4e77f2126c3e0b0d055f44513ed349038ac180371ed9b52fe96a32aa071a5107"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:657a05857bda581c3656bfc3b20e353c232e9193eb167766ad2dc58b56504948"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4bfa75a048c056a411f9705856abfc872558e33c055d80af6a380e3658766038"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:34ea1de54beef1c104422d210c47c7d2a4999bdecf42c7b5718fbe59a4cac383"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:90da3b5f694b85231cf93586dad5e90e2d71b9428f9aad96952c99055582f520"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win32.whl", hash = "sha256:18e0cca3e008e17fe9b164b55735a325140a5a35faad8de92dd80265cd5eb80b"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win_amd64.whl", hash = "sha256:58cb20602b18f86f83a5c87d3ee1c766a79c0d452f8def86d925e6c60fbf7bfb"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win_arm64.whl", hash = "sha256:f5a8b53bdc0b3961f8b6125e198617c40aeed638b387913bf1ce78afb1b0be2a"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2e6039dcbe79a8e0f044f1c39db1986a1b8071051efba3ee4d74f5b365f5226e"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a1ecf0ac1c518487d9d23b1cd7139a6a65bc460cd101ab01f1be82ecf09794b6"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7ab9ccab2b5bd5702ab0803676a580fffa2aa178c2badc5557a84cc943fcf750"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f816dd2277f8d63d79f9c8473a79fe54047bc0467754962840782c575522224d"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf8bcc23ceb5a1b624572a1623b9f79d2c3b337c8c455405ef231933a10da379"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dea0bf229319828467d7fca8c7c189780aa9ff679c94539eed7532ebe33ed37c"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c06a4c7cf15ec739ce0e5971b26c93638730090add60e183530d70848ebdd34"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:913983ad2deb14e66d83c28b632fd35ba2b825031f2fa4ca29675e665dfecbe1"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5337ec7809bcd0f424c6b705ecf97941c46279cf5ed92311782c7c9c2026f07f"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c26ed10c4f6fa6ddb329a5120ba3b6db349ca192ae211e882970bfc9d91420b"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c619b101e6de2222c1fcb0531e1b17bbffbe54294bfba43ea0d411d428618c27"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:073a36c8273647592ea332e816e75ef8da5c303236ec0167196793eb1e34657a"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3ce6b2b0231bda412463e152fc18335ba32faf4e8c23a754ad50ffa70e4091ee"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win32.whl", hash = "sha256:f4c9aee212bc89d4e13f58be11a56cc8036cabad119259d12ace14b34476fd07"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win_amd64.whl", hash = "sha256:8a3ec5aa8e38fc4c8af308917ce12c536f1c88452ce554027e55b22cbbfbff76"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win_arm64.whl", hash = "sha256:76c8094ac20ec259471ac53e774623eb62e6e1f56cd8690c67ce6ce4fcb05650"}, - {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5d5abf8f8ec1f4e22882273c423e16cae834c36856cac348cfbfa68e01c40f3a"}, - {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aeb3531b196ef6f11776c21674dba836aeea9d5bd1cf630f869e3d90b16cfade"}, - {file = "kiwisolver-1.4.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b7d755065e4e866a8086c9bdada157133ff466476a2ad7861828e17b6026e22c"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08471d4d86cbaec61f86b217dd938a83d85e03785f51121e791a6e6689a3be95"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7bbfcb7165ce3d54a3dfbe731e470f65739c4c1f85bb1018ee912bae139e263b"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d34eb8494bea691a1a450141ebb5385e4b69d38bb8403b5146ad279f4b30fa3"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9242795d174daa40105c1d86aba618e8eab7bf96ba8c3ee614da8302a9f95503"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a0f64a48bb81af7450e641e3fe0b0394d7381e342805479178b3d335d60ca7cf"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8e045731a5416357638d1700927529e2b8ab304811671f665b225f8bf8d8f933"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4322872d5772cae7369f8351da1edf255a604ea7087fe295411397d0cfd9655e"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e1631290ee9271dffe3062d2634c3ecac02c83890ada077d225e081aca8aab89"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:edcfc407e4eb17e037bca59be0e85a2031a2ac87e4fed26d3e9df88b4165f92d"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4d05d81ecb47d11e7f8932bd8b61b720bf0b41199358f3f5e36d38e28f0532c5"}, - {file = "kiwisolver-1.4.7-cp38-cp38-win32.whl", hash = "sha256:b38ac83d5f04b15e515fd86f312479d950d05ce2368d5413d46c088dda7de90a"}, - {file = "kiwisolver-1.4.7-cp38-cp38-win_amd64.whl", hash = "sha256:d83db7cde68459fc803052a55ace60bea2bae361fc3b7a6d5da07e11954e4b09"}, - {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f9362ecfca44c863569d3d3c033dbe8ba452ff8eed6f6b5806382741a1334bd"}, - {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e8df2eb9b2bac43ef8b082e06f750350fbbaf2887534a5be97f6cf07b19d9583"}, - {file = "kiwisolver-1.4.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f32d6edbc638cde7652bd690c3e728b25332acbadd7cad670cc4a02558d9c417"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e2e6c39bd7b9372b0be21456caab138e8e69cc0fc1190a9dfa92bd45a1e6e904"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dda56c24d869b1193fcc763f1284b9126550eaf84b88bbc7256e15028f19188a"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79849239c39b5e1fd906556c474d9b0439ea6792b637511f3fe3a41158d89ca8"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5e3bc157fed2a4c02ec468de4ecd12a6e22818d4f09cde2c31ee3226ffbefab2"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3da53da805b71e41053dc670f9a820d1157aae77b6b944e08024d17bcd51ef88"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8705f17dfeb43139a692298cb6637ee2e59c0194538153e83e9ee0c75c2eddde"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:82a5c2f4b87c26bb1a0ef3d16b5c4753434633b83d365cc0ddf2770c93829e3c"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce8be0466f4c0d585cdb6c1e2ed07232221df101a4c6f28821d2aa754ca2d9e2"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:409afdfe1e2e90e6ee7fc896f3df9a7fec8e793e58bfa0d052c8a82f99c37abb"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5b9c3f4ee0b9a439d2415012bd1b1cc2df59e4d6a9939f4d669241d30b414327"}, - {file = "kiwisolver-1.4.7-cp39-cp39-win32.whl", hash = "sha256:a79ae34384df2b615eefca647a2873842ac3b596418032bef9a7283675962644"}, - {file = "kiwisolver-1.4.7-cp39-cp39-win_amd64.whl", hash = "sha256:cf0438b42121a66a3a667de17e779330fc0f20b0d97d59d2f2121e182b0505e4"}, - {file = "kiwisolver-1.4.7-cp39-cp39-win_arm64.whl", hash = "sha256:764202cc7e70f767dab49e8df52c7455e8de0df5d858fa801a11aa0d882ccf3f"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:94252291e3fe68001b1dd747b4c0b3be12582839b95ad4d1b641924d68fd4643"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b7dfa3b546da08a9f622bb6becdb14b3e24aaa30adba66749d38f3cc7ea9706"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd3de6481f4ed8b734da5df134cd5a6a64fe32124fe83dde1e5b5f29fe30b1e6"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91b5f9f1205845d488c928e8570dcb62b893372f63b8b6e98b863ebd2368ff2"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40fa14dbd66b8b8f470d5fc79c089a66185619d31645f9b0773b88b19f7223c4"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:eb542fe7933aa09d8d8f9d9097ef37532a7df6497819d16efe4359890a2f417a"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bfa1acfa0c54932d5607e19a2c24646fb4c1ae2694437789129cf099789a3b00"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:eee3ea935c3d227d49b4eb85660ff631556841f6e567f0f7bda972df6c2c9935"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f3160309af4396e0ed04db259c3ccbfdc3621b5559b5453075e5de555e1f3a1b"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a17f6a29cf8935e587cc8a4dbfc8368c55edc645283db0ce9801016f83526c2d"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10849fb2c1ecbfae45a693c070e0320a91b35dd4bcf58172c023b994283a124d"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:ac542bf38a8a4be2dc6b15248d36315ccc65f0743f7b1a76688ffb6b5129a5c2"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8b01aac285f91ca889c800042c35ad3b239e704b150cfd3382adfc9dcc780e39"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48be928f59a1f5c8207154f935334d374e79f2b5d212826307d072595ad76a2e"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f37cfe618a117e50d8c240555331160d73d0411422b59b5ee217843d7b693608"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:599b5c873c63a1f6ed7eead644a8a380cfbdf5db91dcb6f85707aaab213b1674"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:801fa7802e5cfabe3ab0c81a34c323a319b097dfb5004be950482d882f3d7225"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0c6c43471bc764fad4bc99c5c2d6d16a676b1abf844ca7c8702bdae92df01ee0"}, - {file = "kiwisolver-1.4.7.tar.gz", hash = "sha256:9893ff81bd7107f7b685d3017cc6583daadb4fc26e4a888350df530e41980a60"}, + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af"}, + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3"}, + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ef7afcd2d281494c0a9101d5c571970708ad911d028137cd558f02b851c08b4"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9eaa8b117dc8337728e834b9c6e2611f10c79e38f65157c4c38e9400286f5cb1"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec20916e7b4cbfb1f12380e46486ec4bcbaa91a9c448b97023fde0d5bbf9e4ff"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39b42c68602539407884cf70d6a480a469b93b81b7701378ba5e2328660c847a"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa12042de0171fad672b6c59df69106d20d5596e4f87b5e8f76df757a7c399aa"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a40773c71d7ccdd3798f6489aaac9eee213d566850a9533f8d26332d626b82c"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:19df6e621f6d8b4b9c4d45f40a66839294ff2bb235e64d2178f7522d9170ac5b"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:83d78376d0d4fd884e2c114d0621624b73d2aba4e2788182d286309ebdeed770"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e391b1f0a8a5a10ab3b9bb6afcfd74f2175f24f8975fb87ecae700d1503cdee0"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:852542f9481f4a62dbb5dd99e8ab7aedfeb8fb6342349a181d4036877410f525"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59edc41b24031bc25108e210c0def6f6c2191210492a972d585a06ff246bb79b"}, + {file = "kiwisolver-1.4.5-cp310-cp310-win32.whl", hash = "sha256:a6aa6315319a052b4ee378aa171959c898a6183f15c1e541821c5c59beaa0238"}, + {file = "kiwisolver-1.4.5-cp310-cp310-win_amd64.whl", hash = "sha256:d0ef46024e6a3d79c01ff13801cb19d0cad7fd859b15037aec74315540acc276"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:11863aa14a51fd6ec28688d76f1735f8f69ab1fabf388851a595d0721af042f5"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ab3919a9997ab7ef2fbbed0cc99bb28d3c13e6d4b1ad36e97e482558a91be90"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fcc700eadbbccbf6bc1bcb9dbe0786b4b1cb91ca0dcda336eef5c2beed37b797"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfdd7c0b105af050eb3d64997809dc21da247cf44e63dc73ff0fd20b96be55a9"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76c6a5964640638cdeaa0c359382e5703e9293030fe730018ca06bc2010c4437"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbea0db94288e29afcc4c28afbf3a7ccaf2d7e027489c449cf7e8f83c6346eb9"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ceec1a6bc6cab1d6ff5d06592a91a692f90ec7505d6463a88a52cc0eb58545da"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:040c1aebeda72197ef477a906782b5ab0d387642e93bda547336b8957c61022e"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f91de7223d4c7b793867797bacd1ee53bfe7359bd70d27b7b58a04efbb9436c8"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:faae4860798c31530dd184046a900e652c95513796ef51a12bc086710c2eec4d"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0157420efcb803e71d1b28e2c287518b8808b7cf1ab8af36718fd0a2c453eb0"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:06f54715b7737c2fecdbf140d1afb11a33d59508a47bf11bb38ecf21dc9ab79f"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fdb7adb641a0d13bdcd4ef48e062363d8a9ad4a182ac7647ec88f695e719ae9f"}, + {file = "kiwisolver-1.4.5-cp311-cp311-win32.whl", hash = "sha256:bb86433b1cfe686da83ce32a9d3a8dd308e85c76b60896d58f082136f10bffac"}, + {file = "kiwisolver-1.4.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c08e1312a9cf1074d17b17728d3dfce2a5125b2d791527f33ffbe805200a355"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:32d5cf40c4f7c7b3ca500f8985eb3fb3a7dfc023215e876f207956b5ea26632a"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f846c260f483d1fd217fe5ed7c173fb109efa6b1fc8381c8b7552c5781756192"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ff5cf3571589b6d13bfbfd6bcd7a3f659e42f96b5fd1c4830c4cf21d4f5ef45"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7269d9e5f1084a653d575c7ec012ff57f0c042258bf5db0954bf551c158466e7"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da802a19d6e15dffe4b0c24b38b3af68e6c1a68e6e1d8f30148c83864f3881db"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3aba7311af82e335dd1e36ffff68aaca609ca6290c2cb6d821a39aa075d8e3ff"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763773d53f07244148ccac5b084da5adb90bfaee39c197554f01b286cf869228"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2270953c0d8cdab5d422bee7d2007f043473f9d2999631c86a223c9db56cbd16"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d099e745a512f7e3bbe7249ca835f4d357c586d78d79ae8f1dcd4d8adeb9bda9"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:74db36e14a7d1ce0986fa104f7d5637aea5c82ca6326ed0ec5694280942d1162"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e5bab140c309cb3a6ce373a9e71eb7e4873c70c2dda01df6820474f9889d6d4"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0f114aa76dc1b8f636d077979c0ac22e7cd8f3493abbab152f20eb8d3cda71f3"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:88a2df29d4724b9237fc0c6eaf2a1adae0cdc0b3e9f4d8e7dc54b16812d2d81a"}, + {file = "kiwisolver-1.4.5-cp312-cp312-win32.whl", hash = "sha256:72d40b33e834371fd330fb1472ca19d9b8327acb79a5821d4008391db8e29f20"}, + {file = "kiwisolver-1.4.5-cp312-cp312-win_amd64.whl", hash = "sha256:2c5674c4e74d939b9d91dda0fae10597ac7521768fec9e399c70a1f27e2ea2d9"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3a2b053a0ab7a3960c98725cfb0bf5b48ba82f64ec95fe06f1d06c99b552e130"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd32d6c13807e5c66a7cbb79f90b553642f296ae4518a60d8d76243b0ad2898"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59ec7b7c7e1a61061850d53aaf8e93db63dce0c936db1fda2658b70e4a1be709"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da4cfb373035def307905d05041c1d06d8936452fe89d464743ae7fb8371078b"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2400873bccc260b6ae184b2b8a4fec0e4082d30648eadb7c3d9a13405d861e89"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1b04139c4236a0f3aff534479b58f6f849a8b351e1314826c2d230849ed48985"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4e66e81a5779b65ac21764c295087de82235597a2293d18d943f8e9e32746265"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7931d8f1f67c4be9ba1dd9c451fb0eeca1a25b89e4d3f89e828fe12a519b782a"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b3f7e75f3015df442238cca659f8baa5f42ce2a8582727981cbfa15fee0ee205"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:bbf1d63eef84b2e8c89011b7f2235b1e0bf7dacc11cac9431fc6468e99ac77fb"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4c380469bd3f970ef677bf2bcba2b6b0b4d5c75e7a020fb863ef75084efad66f"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-win32.whl", hash = "sha256:9408acf3270c4b6baad483865191e3e582b638b1654a007c62e3efe96f09a9a3"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-win_amd64.whl", hash = "sha256:5b94529f9b2591b7af5f3e0e730a4e0a41ea174af35a4fd067775f9bdfeee01a"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:11c7de8f692fc99816e8ac50d1d1aef4f75126eefc33ac79aac02c099fd3db71"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:53abb58632235cd154176ced1ae8f0d29a6657aa1aa9decf50b899b755bc2b93"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88b9f257ca61b838b6f8094a62418421f87ac2a1069f7e896c36a7d86b5d4c29"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3195782b26fc03aa9c6913d5bad5aeb864bdc372924c093b0f1cebad603dd712"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc579bf0f502e54926519451b920e875f433aceb4624a3646b3252b5caa9e0b6"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a580c91d686376f0f7c295357595c5a026e6cbc3d77b7c36e290201e7c11ecb"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cfe6ab8da05c01ba6fbea630377b5da2cd9bcbc6338510116b01c1bc939a2c18"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d2e5a98f0ec99beb3c10e13b387f8db39106d53993f498b295f0c914328b1333"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a51a263952b1429e429ff236d2f5a21c5125437861baeed77f5e1cc2d2c7c6da"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3edd2fa14e68c9be82c5b16689e8d63d89fe927e56debd6e1dbce7a26a17f81b"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:74d1b44c6cfc897df648cc9fdaa09bc3e7679926e6f96df05775d4fb3946571c"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:76d9289ed3f7501012e05abb8358bbb129149dbd173f1f57a1bf1c22d19ab7cc"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:92dea1ffe3714fa8eb6a314d2b3c773208d865a0e0d35e713ec54eea08a66250"}, + {file = "kiwisolver-1.4.5-cp38-cp38-win32.whl", hash = "sha256:5c90ae8c8d32e472be041e76f9d2f2dbff4d0b0be8bd4041770eddb18cf49a4e"}, + {file = "kiwisolver-1.4.5-cp38-cp38-win_amd64.whl", hash = "sha256:c7940c1dc63eb37a67721b10d703247552416f719c4188c54e04334321351ced"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9407b6a5f0d675e8a827ad8742e1d6b49d9c1a1da5d952a67d50ef5f4170b18d"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15568384086b6df3c65353820a4473575dbad192e35010f622c6ce3eebd57af9"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0dc9db8e79f0036e8173c466d21ef18e1befc02de8bf8aa8dc0813a6dc8a7046"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cdc8a402aaee9a798b50d8b827d7ecf75edc5fb35ea0f91f213ff927c15f4ff0"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6c3bd3cde54cafb87d74d8db50b909705c62b17c2099b8f2e25b461882e544ff"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:955e8513d07a283056b1396e9a57ceddbd272d9252c14f154d450d227606eb54"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:346f5343b9e3f00b8db8ba359350eb124b98c99efd0b408728ac6ebf38173958"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9098e0049e88c6a24ff64545cdfc50807818ba6c1b739cae221bbbcbc58aad3"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:00bd361b903dc4bbf4eb165f24d1acbee754fce22ded24c3d56eec268658a5cf"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7b8b454bac16428b22560d0a1cf0a09875339cab69df61d7805bf48919415901"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f1d072c2eb0ad60d4c183f3fb44ac6f73fb7a8f16a2694a91f988275cbf352f9"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:31a82d498054cac9f6d0b53d02bb85811185bcb477d4b60144f915f3b3126342"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6512cb89e334e4700febbffaaa52761b65b4f5a3cf33f960213d5656cea36a77"}, + {file = "kiwisolver-1.4.5-cp39-cp39-win32.whl", hash = "sha256:9db8ea4c388fdb0f780fe91346fd438657ea602d58348753d9fb265ce1bca67f"}, + {file = "kiwisolver-1.4.5-cp39-cp39-win_amd64.whl", hash = "sha256:59415f46a37f7f2efeec758353dd2eae1b07640d8ca0f0c42548ec4125492635"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5c7b3b3a728dc6faf3fc372ef24f21d1e3cee2ac3e9596691d746e5a536de920"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:620ced262a86244e2be10a676b646f29c34537d0d9cc8eb26c08f53d98013390"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:378a214a1e3bbf5ac4a8708304318b4f890da88c9e6a07699c4ae7174c09a68d"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf7be1207676ac608a50cd08f102f6742dbfc70e8d60c4db1c6897f62f71523"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ba55dce0a9b8ff59495ddd050a0225d58bd0983d09f87cfe2b6aec4f2c1234e4"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd32ea360bcbb92d28933fc05ed09bffcb1704ba3fc7942e81db0fd4f81a7892"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5e7139af55d1688f8b960ee9ad5adafc4ac17c1c473fe07133ac092310d76544"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dced8146011d2bc2e883f9bd68618b8247387f4bbec46d7392b3c3b032640126"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9bf3325c47b11b2e51bca0824ea217c7cd84491d8ac4eefd1e409705ef092bd"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5794cf59533bc3f1b1c821f7206a3617999db9fbefc345360aafe2e067514929"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e368f200bbc2e4f905b8e71eb38b3c04333bddaa6a2464a6355487b02bb7fb09"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5d706eba36b4c4d5bc6c6377bb6568098765e990cfc21ee16d13963fab7b3e7"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85267bd1aa8880a9c88a8cb71e18d3d64d2751a790e6ca6c27b8ccc724bcd5ad"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:210ef2c3a1f03272649aff1ef992df2e724748918c4bc2d5a90352849eb40bea"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11d011a7574eb3b82bcc9c1a1d35c1d7075677fdd15de527d91b46bd35e935ee"}, + {file = "kiwisolver-1.4.5.tar.gz", hash = "sha256:e57e563a57fb22a142da34f38acc2fc1a5c864bc29ca1517a88abc963e60d6ec"}, ] [[package]] @@ -1644,121 +1602,127 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] [[package]] name = "markupsafe" -version = "3.0.1" +version = "2.1.5" description = "Safely add untrusted strings to HTML/XML markup." optional = false -python-versions = ">=3.9" -files = [ - {file = "MarkupSafe-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:db842712984e91707437461930e6011e60b39136c7331e971952bb30465bc1a1"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3ffb4a8e7d46ed96ae48805746755fadd0909fea2306f93d5d8233ba23dda12a"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67c519635a4f64e495c50e3107d9b4075aec33634272b5db1cde839e07367589"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48488d999ed50ba8d38c581d67e496f955821dc183883550a6fbc7f1aefdc170"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f31ae06f1328595d762c9a2bf29dafd8621c7d3adc130cbb46278079758779ca"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80fcbf3add8790caddfab6764bde258b5d09aefbe9169c183f88a7410f0f6dea"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3341c043c37d78cc5ae6e3e305e988532b072329639007fd408a476642a89fd6"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cb53e2a99df28eee3b5f4fea166020d3ef9116fdc5764bc5117486e6d1211b25"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-win32.whl", hash = "sha256:db15ce28e1e127a0013dfb8ac243a8e392db8c61eae113337536edb28bdc1f97"}, - {file = "MarkupSafe-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:4ffaaac913c3f7345579db4f33b0020db693f302ca5137f106060316761beea9"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:26627785a54a947f6d7336ce5963569b5d75614619e75193bdb4e06e21d447ad"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b954093679d5750495725ea6f88409946d69cfb25ea7b4c846eef5044194f583"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:973a371a55ce9ed333a3a0f8e0bcfae9e0d637711534bcb11e130af2ab9334e7"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:244dbe463d5fb6d7ce161301a03a6fe744dac9072328ba9fc82289238582697b"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d98e66a24497637dd31ccab090b34392dddb1f2f811c4b4cd80c230205c074a3"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ad91738f14eb8da0ff82f2acd0098b6257621410dcbd4df20aaa5b4233d75a50"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7044312a928a66a4c2a22644147bc61a199c1709712069a344a3fb5cfcf16915"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a4792d3b3a6dfafefdf8e937f14906a51bd27025a36f4b188728a73382231d91"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-win32.whl", hash = "sha256:fa7d686ed9883f3d664d39d5a8e74d3c5f63e603c2e3ff0abcba23eac6542635"}, - {file = "MarkupSafe-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:9ba25a71ebf05b9bb0e2ae99f8bc08a07ee8e98c612175087112656ca0f5c8bf"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8ae369e84466aa70f3154ee23c1451fda10a8ee1b63923ce76667e3077f2b0c4"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40f1e10d51c92859765522cbd79c5c8989f40f0419614bcdc5015e7b6bf97fc5"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a4cb365cb49b750bdb60b846b0c0bc49ed62e59a76635095a179d440540c346"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee3941769bd2522fe39222206f6dd97ae83c442a94c90f2b7a25d847d40f4729"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62fada2c942702ef8952754abfc1a9f7658a4d5460fabe95ac7ec2cbe0d02abc"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4c2d64fdba74ad16138300815cfdc6ab2f4647e23ced81f59e940d7d4a1469d9"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:fb532dd9900381d2e8f48172ddc5a59db4c445a11b9fab40b3b786da40d3b56b"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0f84af7e813784feb4d5e4ff7db633aba6c8ca64a833f61d8e4eade234ef0c38"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-win32.whl", hash = "sha256:cbf445eb5628981a80f54087f9acdbf84f9b7d862756110d172993b9a5ae81aa"}, - {file = "MarkupSafe-3.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:a10860e00ded1dd0a65b83e717af28845bb7bd16d8ace40fe5531491de76b79f"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e81c52638315ff4ac1b533d427f50bc0afc746deb949210bc85f05d4f15fd772"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:312387403cd40699ab91d50735ea7a507b788091c416dd007eac54434aee51da"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ae99f31f47d849758a687102afdd05bd3d3ff7dbab0a8f1587981b58a76152a"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c97ff7fedf56d86bae92fa0a646ce1a0ec7509a7578e1ed238731ba13aabcd1c"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7420ceda262dbb4b8d839a4ec63d61c261e4e77677ed7c66c99f4e7cb5030dd"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45d42d132cff577c92bfba536aefcfea7e26efb975bd455db4e6602f5c9f45e7"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c8817557d0de9349109acb38b9dd570b03cc5014e8aabf1cbddc6e81005becd"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a54c43d3ec4cf2a39f4387ad044221c66a376e58c0d0e971d47c475ba79c6b5"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-win32.whl", hash = "sha256:c91b394f7601438ff79a4b93d16be92f216adb57d813a78be4446fe0f6bc2d8c"}, - {file = "MarkupSafe-3.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:fe32482b37b4b00c7a52a07211b479653b7fe4f22b2e481b9a9b099d8a430f2f"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:17b2aea42a7280db02ac644db1d634ad47dcc96faf38ab304fe26ba2680d359a"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:852dc840f6d7c985603e60b5deaae1d89c56cb038b577f6b5b8c808c97580f1d"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0778de17cff1acaeccc3ff30cd99a3fd5c50fc58ad3d6c0e0c4c58092b859396"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:800100d45176652ded796134277ecb13640c1a537cad3b8b53da45aa96330453"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d06b24c686a34c86c8c1fba923181eae6b10565e4d80bdd7bc1c8e2f11247aa4"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:33d1c36b90e570ba7785dacd1faaf091203d9942bc036118fab8110a401eb1a8"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:beeebf760a9c1f4c07ef6a53465e8cfa776ea6a2021eda0d0417ec41043fe984"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bbde71a705f8e9e4c3e9e33db69341d040c827c7afa6789b14c6e16776074f5a"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-win32.whl", hash = "sha256:82b5dba6eb1bcc29cc305a18a3c5365d2af06ee71b123216416f7e20d2a84e5b"}, - {file = "MarkupSafe-3.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:730d86af59e0e43ce277bb83970530dd223bf7f2a838e086b50affa6ec5f9295"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4935dd7883f1d50e2ffecca0aa33dc1946a94c8f3fdafb8df5c330e48f71b132"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e9393357f19954248b00bed7c56f29a25c930593a77630c719653d51e7669c2a"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40621d60d0e58aa573b68ac5e2d6b20d44392878e0bfc159012a5787c4e35bc8"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f94190df587738280d544971500b9cafc9b950d32efcb1fba9ac10d84e6aa4e6"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6a387d61fe41cdf7ea95b38e9af11cfb1a63499af2759444b99185c4ab33f5b"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8ad4ad1429cd4f315f32ef263c1342166695fad76c100c5d979c45d5570ed58b"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e24bfe89c6ac4c31792793ad9f861b8f6dc4546ac6dc8f1c9083c7c4f2b335cd"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2a4b34a8d14649315c4bc26bbfa352663eb51d146e35eef231dd739d54a5430a"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-win32.whl", hash = "sha256:242d6860f1fd9191aef5fae22b51c5c19767f93fb9ead4d21924e0bcb17619d8"}, - {file = "MarkupSafe-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:93e8248d650e7e9d49e8251f883eed60ecbc0e8ffd6349e18550925e31bd029b"}, - {file = "markupsafe-3.0.1.tar.gz", hash = "sha256:3e683ee4f5d0fa2dde4db77ed8dd8a876686e3fc417655c2ece9a90576905344"}, +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, + {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] [[package]] name = "matplotlib" -version = "3.9.2" +version = "3.7.5" description = "Python plotting package" optional = false -python-versions = ">=3.9" -files = [ - {file = "matplotlib-3.9.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9d78bbc0cbc891ad55b4f39a48c22182e9bdaea7fc0e5dbd364f49f729ca1bbb"}, - {file = "matplotlib-3.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c375cc72229614632c87355366bdf2570c2dac01ac66b8ad048d2dabadf2d0d4"}, - {file = "matplotlib-3.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d94ff717eb2bd0b58fe66380bd8b14ac35f48a98e7c6765117fe67fb7684e64"}, - {file = "matplotlib-3.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab68d50c06938ef28681073327795c5db99bb4666214d2d5f880ed11aeaded66"}, - {file = "matplotlib-3.9.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:65aacf95b62272d568044531e41de26285d54aec8cb859031f511f84bd8b495a"}, - {file = "matplotlib-3.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:3fd595f34aa8a55b7fc8bf9ebea8aa665a84c82d275190a61118d33fbc82ccae"}, - {file = "matplotlib-3.9.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d8dd059447824eec055e829258ab092b56bb0579fc3164fa09c64f3acd478772"}, - {file = "matplotlib-3.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c797dac8bb9c7a3fd3382b16fe8f215b4cf0f22adccea36f1545a6d7be310b41"}, - {file = "matplotlib-3.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d719465db13267bcef19ea8954a971db03b9f48b4647e3860e4bc8e6ed86610f"}, - {file = "matplotlib-3.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8912ef7c2362f7193b5819d17dae8629b34a95c58603d781329712ada83f9447"}, - {file = "matplotlib-3.9.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7741f26a58a240f43bee74965c4882b6c93df3e7eb3de160126d8c8f53a6ae6e"}, - {file = "matplotlib-3.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:ae82a14dab96fbfad7965403c643cafe6515e386de723e498cf3eeb1e0b70cc7"}, - {file = "matplotlib-3.9.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ac43031375a65c3196bee99f6001e7fa5bdfb00ddf43379d3c0609bdca042df9"}, - {file = "matplotlib-3.9.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:be0fc24a5e4531ae4d8e858a1a548c1fe33b176bb13eff7f9d0d38ce5112a27d"}, - {file = "matplotlib-3.9.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf81de2926c2db243c9b2cbc3917619a0fc85796c6ba4e58f541df814bbf83c7"}, - {file = "matplotlib-3.9.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6ee45bc4245533111ced13f1f2cace1e7f89d1c793390392a80c139d6cf0e6c"}, - {file = "matplotlib-3.9.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:306c8dfc73239f0e72ac50e5a9cf19cc4e8e331dd0c54f5e69ca8758550f1e1e"}, - {file = "matplotlib-3.9.2-cp312-cp312-win_amd64.whl", hash = "sha256:5413401594cfaff0052f9d8b1aafc6d305b4bd7c4331dccd18f561ff7e1d3bd3"}, - {file = "matplotlib-3.9.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:18128cc08f0d3cfff10b76baa2f296fc28c4607368a8402de61bb3f2eb33c7d9"}, - {file = "matplotlib-3.9.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4876d7d40219e8ae8bb70f9263bcbe5714415acfdf781086601211335e24f8aa"}, - {file = "matplotlib-3.9.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d9f07a80deab4bb0b82858a9e9ad53d1382fd122be8cde11080f4e7dfedb38b"}, - {file = "matplotlib-3.9.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7c0410f181a531ec4e93bbc27692f2c71a15c2da16766f5ba9761e7ae518413"}, - {file = "matplotlib-3.9.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:909645cce2dc28b735674ce0931a4ac94e12f5b13f6bb0b5a5e65e7cea2c192b"}, - {file = "matplotlib-3.9.2-cp313-cp313-win_amd64.whl", hash = "sha256:f32c7410c7f246838a77d6d1eff0c0f87f3cb0e7c4247aebea71a6d5a68cab49"}, - {file = "matplotlib-3.9.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:37e51dd1c2db16ede9cfd7b5cabdfc818b2c6397c83f8b10e0e797501c963a03"}, - {file = "matplotlib-3.9.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b82c5045cebcecd8496a4d694d43f9cc84aeeb49fe2133e036b207abe73f4d30"}, - {file = "matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f053c40f94bc51bc03832a41b4f153d83f2062d88c72b5e79997072594e97e51"}, - {file = "matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbe196377a8248972f5cede786d4c5508ed5f5ca4a1e09b44bda889958b33f8c"}, - {file = "matplotlib-3.9.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5816b1e1fe8c192cbc013f8f3e3368ac56fbecf02fb41b8f8559303f24c5015e"}, - {file = "matplotlib-3.9.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:cef2a73d06601437be399908cf13aee74e86932a5ccc6ccdf173408ebc5f6bb2"}, - {file = "matplotlib-3.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e0830e188029c14e891fadd99702fd90d317df294c3298aad682739c5533721a"}, - {file = "matplotlib-3.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ba9c1299c920964e8d3857ba27173b4dbb51ca4bab47ffc2c2ba0eb5e2cbc5"}, - {file = "matplotlib-3.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cd93b91ab47a3616b4d3c42b52f8363b88ca021e340804c6ab2536344fad9ca"}, - {file = "matplotlib-3.9.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6d1ce5ed2aefcdce11904fc5bbea7d9c21fff3d5f543841edf3dea84451a09ea"}, - {file = "matplotlib-3.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:b2696efdc08648536efd4e1601b5fd491fd47f4db97a5fbfd175549a7365c1b2"}, - {file = "matplotlib-3.9.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:d52a3b618cb1cbb769ce2ee1dcdb333c3ab6e823944e9a2d36e37253815f9556"}, - {file = "matplotlib-3.9.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:039082812cacd6c6bec8e17a9c1e6baca230d4116d522e81e1f63a74d01d2e21"}, - {file = "matplotlib-3.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6758baae2ed64f2331d4fd19be38b7b4eae3ecec210049a26b6a4f3ae1c85dcc"}, - {file = "matplotlib-3.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:050598c2b29e0b9832cde72bcf97627bf00262adbc4a54e2b856426bb2ef0697"}, - {file = "matplotlib-3.9.2.tar.gz", hash = "sha256:96ab43906269ca64a6366934106fa01534454a69e471b7bf3d79083981aaab92"}, +python-versions = ">=3.8" +files = [ + {file = "matplotlib-3.7.5-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:4a87b69cb1cb20943010f63feb0b2901c17a3b435f75349fd9865713bfa63925"}, + {file = "matplotlib-3.7.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d3ce45010fefb028359accebb852ca0c21bd77ec0f281952831d235228f15810"}, + {file = "matplotlib-3.7.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fbea1e762b28400393d71be1a02144aa16692a3c4c676ba0178ce83fc2928fdd"}, + {file = "matplotlib-3.7.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec0e1adc0ad70ba8227e957551e25a9d2995e319c29f94a97575bb90fa1d4469"}, + {file = "matplotlib-3.7.5-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6738c89a635ced486c8a20e20111d33f6398a9cbebce1ced59c211e12cd61455"}, + {file = "matplotlib-3.7.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1210b7919b4ed94b5573870f316bca26de3e3b07ffdb563e79327dc0e6bba515"}, + {file = "matplotlib-3.7.5-cp310-cp310-win32.whl", hash = "sha256:068ebcc59c072781d9dcdb82f0d3f1458271c2de7ca9c78f5bd672141091e9e1"}, + {file = "matplotlib-3.7.5-cp310-cp310-win_amd64.whl", hash = "sha256:f098ffbaab9df1e3ef04e5a5586a1e6b1791380698e84938d8640961c79b1fc0"}, + {file = "matplotlib-3.7.5-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:f65342c147572673f02a4abec2d5a23ad9c3898167df9b47c149f32ce61ca078"}, + {file = "matplotlib-3.7.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4ddf7fc0e0dc553891a117aa083039088d8a07686d4c93fb8a810adca68810af"}, + {file = "matplotlib-3.7.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0ccb830fc29442360d91be48527809f23a5dcaee8da5f4d9b2d5b867c1b087b8"}, + {file = "matplotlib-3.7.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efc6bb28178e844d1f408dd4d6341ee8a2e906fc9e0fa3dae497da4e0cab775d"}, + {file = "matplotlib-3.7.5-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b15c4c2d374f249f324f46e883340d494c01768dd5287f8bc00b65b625ab56c"}, + {file = "matplotlib-3.7.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d028555421912307845e59e3de328260b26d055c5dac9b182cc9783854e98fb"}, + {file = "matplotlib-3.7.5-cp311-cp311-win32.whl", hash = "sha256:fe184b4625b4052fa88ef350b815559dd90cc6cc8e97b62f966e1ca84074aafa"}, + {file = "matplotlib-3.7.5-cp311-cp311-win_amd64.whl", hash = "sha256:084f1f0f2f1010868c6f1f50b4e1c6f2fb201c58475494f1e5b66fed66093647"}, + {file = "matplotlib-3.7.5-cp312-cp312-macosx_10_12_universal2.whl", hash = "sha256:34bceb9d8ddb142055ff27cd7135f539f2f01be2ce0bafbace4117abe58f8fe4"}, + {file = "matplotlib-3.7.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:c5a2134162273eb8cdfd320ae907bf84d171de948e62180fa372a3ca7cf0f433"}, + {file = "matplotlib-3.7.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:039ad54683a814002ff37bf7981aa1faa40b91f4ff84149beb53d1eb64617980"}, + {file = "matplotlib-3.7.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d742ccd1b09e863b4ca58291728db645b51dab343eebb08d5d4b31b308296ce"}, + {file = "matplotlib-3.7.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:743b1c488ca6a2bc7f56079d282e44d236bf375968bfd1b7ba701fd4d0fa32d6"}, + {file = "matplotlib-3.7.5-cp312-cp312-win_amd64.whl", hash = "sha256:fbf730fca3e1f23713bc1fae0a57db386e39dc81ea57dc305c67f628c1d7a342"}, + {file = "matplotlib-3.7.5-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:cfff9b838531698ee40e40ea1a8a9dc2c01edb400b27d38de6ba44c1f9a8e3d2"}, + {file = "matplotlib-3.7.5-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:1dbcca4508bca7847fe2d64a05b237a3dcaec1f959aedb756d5b1c67b770c5ee"}, + {file = "matplotlib-3.7.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4cdf4ef46c2a1609a50411b66940b31778db1e4b73d4ecc2eaa40bd588979b13"}, + {file = "matplotlib-3.7.5-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:167200ccfefd1674b60e957186dfd9baf58b324562ad1a28e5d0a6b3bea77905"}, + {file = "matplotlib-3.7.5-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:53e64522934df6e1818b25fd48cf3b645b11740d78e6ef765fbb5fa5ce080d02"}, + {file = "matplotlib-3.7.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3e3bc79b2d7d615067bd010caff9243ead1fc95cf735c16e4b2583173f717eb"}, + {file = "matplotlib-3.7.5-cp38-cp38-win32.whl", hash = "sha256:6b641b48c6819726ed47c55835cdd330e53747d4efff574109fd79b2d8a13748"}, + {file = "matplotlib-3.7.5-cp38-cp38-win_amd64.whl", hash = "sha256:f0b60993ed3488b4532ec6b697059897891927cbfc2b8d458a891b60ec03d9d7"}, + {file = "matplotlib-3.7.5-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:090964d0afaff9c90e4d8de7836757e72ecfb252fb02884016d809239f715651"}, + {file = "matplotlib-3.7.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:9fc6fcfbc55cd719bc0bfa60bde248eb68cf43876d4c22864603bdd23962ba25"}, + {file = "matplotlib-3.7.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7cc3078b019bb863752b8b60e8b269423000f1603cb2299608231996bd9d54"}, + {file = "matplotlib-3.7.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e4e9a868e8163abaaa8259842d85f949a919e1ead17644fb77a60427c90473c"}, + {file = "matplotlib-3.7.5-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa7ebc995a7d747dacf0a717d0eb3aa0f0c6a0e9ea88b0194d3a3cd241a1500f"}, + {file = "matplotlib-3.7.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3785bfd83b05fc0e0c2ae4c4a90034fe693ef96c679634756c50fe6efcc09856"}, + {file = "matplotlib-3.7.5-cp39-cp39-win32.whl", hash = "sha256:29b058738c104d0ca8806395f1c9089dfe4d4f0f78ea765c6c704469f3fffc81"}, + {file = "matplotlib-3.7.5-cp39-cp39-win_amd64.whl", hash = "sha256:fd4028d570fa4b31b7b165d4a685942ae9cdc669f33741e388c01857d9723eab"}, + {file = "matplotlib-3.7.5-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2a9a3f4d6a7f88a62a6a18c7e6a84aedcaf4faf0708b4ca46d87b19f1b526f88"}, + {file = "matplotlib-3.7.5-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b9b3fd853d4a7f008a938df909b96db0b454225f935d3917520305b90680579c"}, + {file = "matplotlib-3.7.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0ad550da9f160737d7890217c5eeed4337d07e83ca1b2ca6535078f354e7675"}, + {file = "matplotlib-3.7.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:20da7924a08306a861b3f2d1da0d1aa9a6678e480cf8eacffe18b565af2813e7"}, + {file = "matplotlib-3.7.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b45c9798ea6bb920cb77eb7306409756a7fab9db9b463e462618e0559aecb30e"}, + {file = "matplotlib-3.7.5-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a99866267da1e561c7776fe12bf4442174b79aac1a47bd7e627c7e4d077ebd83"}, + {file = "matplotlib-3.7.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b6aa62adb6c268fc87d80f963aca39c64615c31830b02697743c95590ce3fbb"}, + {file = "matplotlib-3.7.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e530ab6a0afd082d2e9c17eb1eb064a63c5b09bb607b2b74fa41adbe3e162286"}, + {file = "matplotlib-3.7.5.tar.gz", hash = "sha256:1e5c971558ebc811aa07f54c7b7c677d78aa518ef4c390e14673a09e0860184a"}, ] [package.dependencies] @@ -1766,16 +1730,13 @@ contourpy = ">=1.0.1" cycler = ">=0.10" fonttools = ">=4.22.0" importlib-resources = {version = ">=3.2.0", markers = "python_version < \"3.10\""} -kiwisolver = ">=1.3.1" -numpy = ">=1.23" +kiwisolver = ">=1.0.1" +numpy = ">=1.20,<2" packaging = ">=20.0" -pillow = ">=8" +pillow = ">=6.2.0" pyparsing = ">=2.3.1" python-dateutil = ">=2.7" -[package.extras] -dev = ["meson-python (>=0.13.1)", "numpy (>=1.25)", "pybind11 (>=2.6)", "setuptools (>=64)", "setuptools_scm (>=7)"] - [[package]] name = "mdurl" version = "0.1.2" @@ -1848,13 +1809,13 @@ pyyaml = ">=5.1" [[package]] name = "mkdocs-material" -version = "9.5.41" +version = "9.5.34" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.5.41-py3-none-any.whl", hash = "sha256:990bc138c33342b5b73e7545915ebc0136e501bfbd8e365735144f5120891d83"}, - {file = "mkdocs_material-9.5.41.tar.gz", hash = "sha256:30fa5d459b4b8130848ecd8e1c908878345d9d8268f7ddbc31eebe88d462d97b"}, + {file = "mkdocs_material-9.5.34-py3-none-any.whl", hash = "sha256:54caa8be708de2b75167fd4d3b9f3d949579294f49cb242515d4653dbee9227e"}, + {file = "mkdocs_material-9.5.34.tar.gz", hash = "sha256:1e60ddf716cfb5679dfd65900b8a25d277064ed82d9a53cd5190e3f894df7840"}, ] [package.dependencies] @@ -1888,13 +1849,13 @@ files = [ [[package]] name = "mlflow" -version = "2.17.0" +version = "2.16.0" description = "MLflow is an open source platform for the complete machine learning lifecycle" optional = false python-versions = ">=3.8" files = [ - {file = "mlflow-2.17.0-py3-none-any.whl", hash = "sha256:64fbc0dfcb7322ed4cbccadc2f533bdd2944001b983ea8c10db45c7c59b46b7c"}, - {file = "mlflow-2.17.0.tar.gz", hash = "sha256:5bb2089b833da48e4a92a9b4cb1cb5fa509a571eb3c603be39f5238b4721e076"}, + {file = "mlflow-2.16.0-py3-none-any.whl", hash = "sha256:9f27ef6ae7a82d7ecd67b6b4a4d50637a5e8160639115570fbc689758f9c0b54"}, + {file = "mlflow-2.16.0.tar.gz", hash = "sha256:82ea1a2e800f404f1586783b7636091c0a5754cf9ff45afeadf3a5e467f5168f"}, ] [package.dependencies] @@ -1909,7 +1870,7 @@ Jinja2 = [ ] markdown = ">=3.3,<4" matplotlib = "<4" -mlflow-skinny = "2.17.0" +mlflow-skinny = "2.16.0" numpy = "<3" pandas = "<3" pyarrow = ">=4.0.0,<18" @@ -1921,24 +1882,23 @@ waitress = {version = "<4", markers = "platform_system == \"Windows\""} [package.extras] aliyun-oss = ["aliyunstoreplugin"] databricks = ["azure-storage-file-datalake (>12)", "boto3 (>1)", "botocore", "google-cloud-storage (>=1.30.0)"] -extras = ["azureml-core (>=1.2.0)", "boto3", "botocore", "google-cloud-storage (>=1.30.0)", "kubernetes", "prometheus-flask-exporter", "pyarrow", "pysftp", "requests-auth-aws-sigv4", "virtualenv"] +extras = ["azureml-core (>=1.2.0)", "boto3", "botocore", "google-cloud-storage (>=1.30.0)", "kubernetes", "mlserver (>=1.2.0,!=1.3.1,<1.4.0)", "mlserver-mlflow (>=1.2.0,!=1.3.1,<1.4.0)", "prometheus-flask-exporter", "pyarrow", "pysftp", "requests-auth-aws-sigv4", "virtualenv"] gateway = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] genai = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] jfrog = ["mlflow-jfrog-plugin"] -langchain = ["langchain (>=0.1.0,<=0.3.1)"] -mlserver = ["mlserver (>=1.2.0,!=1.3.1)", "mlserver-mlflow (>=1.2.0,!=1.3.1)"] +langchain = ["langchain (>=0.1.0,<=0.2.15)"] sqlserver = ["mlflow-dbstore"] xethub = ["mlflow-xethub"] [[package]] name = "mlflow-skinny" -version = "2.17.0" +version = "2.16.0" description = "MLflow is an open source platform for the complete machine learning lifecycle" optional = false python-versions = ">=3.8" files = [ - {file = "mlflow_skinny-2.17.0-py3-none-any.whl", hash = "sha256:9eff7160f7459e09c01cc5bc2a68fdba7b64adbce069ef6d1013569830569048"}, - {file = "mlflow_skinny-2.17.0.tar.gz", hash = "sha256:bbb770368e68ffe783a76fa38854618c1411b44bda21eb8b770ca4cc28801299"}, + {file = "mlflow_skinny-2.16.0-py3-none-any.whl", hash = "sha256:c55541f50efd0f6637377b10e8a654847a3fcd815b8680a95f02e0ca6bd7700c"}, + {file = "mlflow_skinny-2.16.0.tar.gz", hash = "sha256:9b823173063743783b4e7b6c52bdadcc7d9dab48eb883ac454c0d56609df6b2d"}, ] [package.dependencies] @@ -1959,12 +1919,11 @@ sqlparse = ">=0.4.0,<1" [package.extras] aliyun-oss = ["aliyunstoreplugin"] databricks = ["azure-storage-file-datalake (>12)", "boto3 (>1)", "botocore", "google-cloud-storage (>=1.30.0)"] -extras = ["azureml-core (>=1.2.0)", "boto3", "botocore", "google-cloud-storage (>=1.30.0)", "kubernetes", "prometheus-flask-exporter", "pyarrow", "pysftp", "requests-auth-aws-sigv4", "virtualenv"] +extras = ["azureml-core (>=1.2.0)", "boto3", "botocore", "google-cloud-storage (>=1.30.0)", "kubernetes", "mlserver (>=1.2.0,!=1.3.1,<1.4.0)", "mlserver-mlflow (>=1.2.0,!=1.3.1,<1.4.0)", "prometheus-flask-exporter", "pyarrow", "pysftp", "requests-auth-aws-sigv4", "virtualenv"] gateway = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] genai = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] jfrog = ["mlflow-jfrog-plugin"] -langchain = ["langchain (>=0.1.0,<=0.3.1)"] -mlserver = ["mlserver (>=1.2.0,!=1.3.1)", "mlserver-mlflow (>=1.2.0,!=1.3.1)"] +langchain = ["langchain (>=0.1.0,<=0.2.15)"] sqlserver = ["mlflow-dbstore"] xethub = ["mlflow-xethub"] @@ -2043,50 +2002,49 @@ tests = ["pytest (>=4.6)"] [[package]] name = "networkx" -version = "3.2.1" +version = "3.1" description = "Python package for creating and manipulating graphs and networks" optional = false -python-versions = ">=3.9" +python-versions = ">=3.8" files = [ - {file = "networkx-3.2.1-py3-none-any.whl", hash = "sha256:f18c69adc97877c42332c170849c96cefa91881c99a7cb3e95b7c659ebdc1ec2"}, - {file = "networkx-3.2.1.tar.gz", hash = "sha256:9f1bb5cf3409bf324e0a722c20bdb4c20ee39bf1c30ce8ae499c8502b0b5e0c6"}, + {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, + {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, ] [package.extras] -default = ["matplotlib (>=3.5)", "numpy (>=1.22)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"] -developer = ["changelist (==0.4)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] -doc = ["nb2plots (>=0.7)", "nbconvert (<7.9)", "numpydoc (>=1.6)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"] -extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.11)", "sympy (>=1.10)"] -test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] +default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] +developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] +doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] +test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] [[package]] name = "nibabel" -version = "5.3.1" +version = "5.2.1" description = "Access a multitude of neuroimaging data formats" optional = false -python-versions = ">=3.9" +python-versions = ">=3.8" files = [ - {file = "nibabel-5.3.1-py3-none-any.whl", hash = "sha256:5c04c7139d41a59ef92839f1cabbe73061edd5787340bf2c9a34ed71f0db9d07"}, - {file = "nibabel-5.3.1.tar.gz", hash = "sha256:aec1b75dcf6bd9595a9196ff341b87957c69fb21bc5e38719463478dad83000a"}, + {file = "nibabel-5.2.1-py3-none-any.whl", hash = "sha256:2cbbc22985f7f9d39d050df47249771dfb8d48447f5e7a993177e4cabfe047f0"}, + {file = "nibabel-5.2.1.tar.gz", hash = "sha256:b6c80b2e728e4bc2b65f1142d9b8d2287a9102a8bf8477e115ef0d8334559975"}, ] [package.dependencies] -importlib-resources = {version = ">=5.12", markers = "python_version < \"3.12\""} -numpy = ">=1.22" -packaging = ">=20" -typing-extensions = {version = ">=4.6", markers = "python_version < \"3.13\""} +importlib-resources = {version = ">=1.3", markers = "python_version < \"3.9\""} +numpy = ">=1.20" +packaging = ">=17" [package.extras] -all = ["h5py", "pillow", "pydicom (>=2.3)", "pyzstd (>=0.14.3)", "scipy"] +all = ["nibabel[dicomfs,minc2,spm,zstd]"] dev = ["tox"] -dicom = ["pydicom (>=2.3)"] -dicomfs = ["pillow", "pydicom (>=2.3)"] -doc = ["matplotlib (>=3.5)", "numpydoc", "sphinx", "texext", "tomli"] +dicom = ["pydicom (>=1.0.0)"] +dicomfs = ["nibabel[dicom]", "pillow"] +doc = ["matplotlib (>=1.5.3)", "numpydoc", "sphinx", "texext", "tomli"] doctest = ["tox"] minc2 = ["h5py"] spm = ["scipy"] style = ["tox"] -test = ["coverage (>=7.2)", "pytest", "pytest-cov", "pytest-doctestplus", "pytest-httpserver", "pytest-xdist"] +test = ["pytest", "pytest-cov", "pytest-doctestplus", "pytest-httpserver", "pytest-xdist"] typing = ["tox"] zstd = ["pyzstd (>=0.14.3)"] @@ -2132,56 +2090,39 @@ files = [ [[package]] name = "numpy" -version = "2.0.2" +version = "1.24.4" description = "Fundamental package for array computing in Python" optional = false -python-versions = ">=3.9" -files = [ - {file = "numpy-2.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:51129a29dbe56f9ca83438b706e2e69a39892b5eda6cedcb6b0c9fdc9b0d3ece"}, - {file = "numpy-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f15975dfec0cf2239224d80e32c3170b1d168335eaedee69da84fbe9f1f9cd04"}, - {file = "numpy-2.0.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:8c5713284ce4e282544c68d1c3b2c7161d38c256d2eefc93c1d683cf47683e66"}, - {file = "numpy-2.0.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:becfae3ddd30736fe1889a37f1f580e245ba79a5855bff5f2a29cb3ccc22dd7b"}, - {file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2da5960c3cf0df7eafefd806d4e612c5e19358de82cb3c343631188991566ccd"}, - {file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:496f71341824ed9f3d2fd36cf3ac57ae2e0165c143b55c3a035ee219413f3318"}, - {file = "numpy-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a61ec659f68ae254e4d237816e33171497e978140353c0c2038d46e63282d0c8"}, - {file = "numpy-2.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d731a1c6116ba289c1e9ee714b08a8ff882944d4ad631fd411106a30f083c326"}, - {file = "numpy-2.0.2-cp310-cp310-win32.whl", hash = "sha256:984d96121c9f9616cd33fbd0618b7f08e0cfc9600a7ee1d6fd9b239186d19d97"}, - {file = "numpy-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:c7b0be4ef08607dd04da4092faee0b86607f111d5ae68036f16cc787e250a131"}, - {file = "numpy-2.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:49ca4decb342d66018b01932139c0961a8f9ddc7589611158cb3c27cbcf76448"}, - {file = "numpy-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:11a76c372d1d37437857280aa142086476136a8c0f373b2e648ab2c8f18fb195"}, - {file = "numpy-2.0.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:807ec44583fd708a21d4a11d94aedf2f4f3c3719035c76a2bbe1fe8e217bdc57"}, - {file = "numpy-2.0.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8cafab480740e22f8d833acefed5cc87ce276f4ece12fdaa2e8903db2f82897a"}, - {file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a15f476a45e6e5a3a79d8a14e62161d27ad897381fecfa4a09ed5322f2085669"}, - {file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13e689d772146140a252c3a28501da66dfecd77490b498b168b501835041f951"}, - {file = "numpy-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9ea91dfb7c3d1c56a0e55657c0afb38cf1eeae4544c208dc465c3c9f3a7c09f9"}, - {file = "numpy-2.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c1c9307701fec8f3f7a1e6711f9089c06e6284b3afbbcd259f7791282d660a15"}, - {file = "numpy-2.0.2-cp311-cp311-win32.whl", hash = "sha256:a392a68bd329eafac5817e5aefeb39038c48b671afd242710b451e76090e81f4"}, - {file = "numpy-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:286cd40ce2b7d652a6f22efdfc6d1edf879440e53e76a75955bc0c826c7e64dc"}, - {file = "numpy-2.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:df55d490dea7934f330006d0f81e8551ba6010a5bf035a249ef61a94f21c500b"}, - {file = "numpy-2.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8df823f570d9adf0978347d1f926b2a867d5608f434a7cff7f7908c6570dcf5e"}, - {file = "numpy-2.0.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:9a92ae5c14811e390f3767053ff54eaee3bf84576d99a2456391401323f4ec2c"}, - {file = "numpy-2.0.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:a842d573724391493a97a62ebbb8e731f8a5dcc5d285dfc99141ca15a3302d0c"}, - {file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05e238064fc0610c840d1cf6a13bf63d7e391717d247f1bf0318172e759e692"}, - {file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0123ffdaa88fa4ab64835dcbde75dcdf89c453c922f18dced6e27c90d1d0ec5a"}, - {file = "numpy-2.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:96a55f64139912d61de9137f11bf39a55ec8faec288c75a54f93dfd39f7eb40c"}, - {file = "numpy-2.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec9852fb39354b5a45a80bdab5ac02dd02b15f44b3804e9f00c556bf24b4bded"}, - {file = "numpy-2.0.2-cp312-cp312-win32.whl", hash = "sha256:671bec6496f83202ed2d3c8fdc486a8fc86942f2e69ff0e986140339a63bcbe5"}, - {file = "numpy-2.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:cfd41e13fdc257aa5778496b8caa5e856dc4896d4ccf01841daee1d96465467a"}, - {file = "numpy-2.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9059e10581ce4093f735ed23f3b9d283b9d517ff46009ddd485f1747eb22653c"}, - {file = "numpy-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:423e89b23490805d2a5a96fe40ec507407b8ee786d66f7328be214f9679df6dd"}, - {file = "numpy-2.0.2-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:2b2955fa6f11907cf7a70dab0d0755159bca87755e831e47932367fc8f2f2d0b"}, - {file = "numpy-2.0.2-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:97032a27bd9d8988b9a97a8c4d2c9f2c15a81f61e2f21404d7e8ef00cb5be729"}, - {file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e795a8be3ddbac43274f18588329c72939870a16cae810c2b73461c40718ab1"}, - {file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26b258c385842546006213344c50655ff1555a9338e2e5e02a0756dc3e803dd"}, - {file = "numpy-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fec9451a7789926bcf7c2b8d187292c9f93ea30284802a0ab3f5be8ab36865d"}, - {file = "numpy-2.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9189427407d88ff25ecf8f12469d4d39d35bee1db5d39fc5c168c6f088a6956d"}, - {file = "numpy-2.0.2-cp39-cp39-win32.whl", hash = "sha256:905d16e0c60200656500c95b6b8dca5d109e23cb24abc701d41c02d74c6b3afa"}, - {file = "numpy-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:a3f4ab0caa7f053f6797fcd4e1e25caee367db3112ef2b6ef82d749530768c73"}, - {file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7f0a0c6f12e07fa94133c8a67404322845220c06a9e80e85999afe727f7438b8"}, - {file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:312950fdd060354350ed123c0e25a71327d3711584beaef30cdaa93320c392d4"}, - {file = "numpy-2.0.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26df23238872200f63518dd2aa984cfca675d82469535dc7162dc2ee52d9dd5c"}, - {file = "numpy-2.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a46288ec55ebbd58947d31d72be2c63cbf839f0a63b49cb755022310792a3385"}, - {file = "numpy-2.0.2.tar.gz", hash = "sha256:883c987dee1880e2a864ab0dc9892292582510604156762362d9326444636e78"}, +python-versions = ">=3.8" +files = [ + {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"}, + {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"}, + {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"}, + {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"}, + {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"}, + {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"}, + {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"}, + {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"}, + {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"}, + {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"}, + {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"}, + {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"}, + {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"}, + {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"}, + {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"}, + {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"}, + {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"}, + {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"}, + {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"}, ] [[package]] @@ -2307,14 +2248,14 @@ files = [ [[package]] name = "nvidia-nvjitlink-cu12" -version = "12.6.77" +version = "12.6.68" description = "Nvidia JIT LTO Library" optional = false python-versions = ">=3" files = [ - {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:3bf10d85bb1801e9c894c6e197e44dd137d2a0a9e43f8450e9ad13f2df0dd52d"}, - {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:9ae346d16203ae4ea513be416495167a0101d33d2d14935aa9c1829a3fb45142"}, - {file = "nvidia_nvjitlink_cu12-12.6.77-py3-none-win_amd64.whl", hash = "sha256:410718cd44962bed862a31dd0318620f6f9a8b28a6291967bcfcb446a6516771"}, + {file = "nvidia_nvjitlink_cu12-12.6.68-py3-none-manylinux2014_aarch64.whl", hash = "sha256:b3fd0779845f68b92063ab1393abab1ed0a23412fc520df79a8190d098b5cd6b"}, + {file = "nvidia_nvjitlink_cu12-12.6.68-py3-none-manylinux2014_x86_64.whl", hash = "sha256:125a6c2a44e96386dda634e13d944e60b07a0402d391a070e8fb4104b34ea1ab"}, + {file = "nvidia_nvjitlink_cu12-12.6.68-py3-none-win_amd64.whl", hash = "sha256:a55744c98d70317c5e23db14866a8cc2b733f7324509e941fc96276f9f37801d"}, ] [[package]] @@ -2328,49 +2269,68 @@ files = [ {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"}, ] +[[package]] +name = "oauthlib" +version = "3.2.2" +description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" +optional = false +python-versions = ">=3.6" +files = [ + {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, + {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, +] + +[package.extras] +rsa = ["cryptography (>=3.0.0)"] +signals = ["blinker (>=1.4.0)"] +signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] + [[package]] name = "opentelemetry-api" -version = "1.16.0" +version = "1.27.0" description = "OpenTelemetry Python API" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "opentelemetry_api-1.16.0-py3-none-any.whl", hash = "sha256:79e8f0cf88dbdd36b6abf175d2092af1efcaa2e71552d0d2b3b181a9707bf4bc"}, - {file = "opentelemetry_api-1.16.0.tar.gz", hash = "sha256:4b0e895a3b1f5e1908043ebe492d33e33f9ccdbe6d02d3994c2f8721a63ddddb"}, + {file = "opentelemetry_api-1.27.0-py3-none-any.whl", hash = "sha256:953d5871815e7c30c81b56d910c707588000fff7a3ca1c73e6531911d53065e7"}, + {file = "opentelemetry_api-1.27.0.tar.gz", hash = "sha256:ed673583eaa5f81b5ce5e86ef7cdaf622f88ef65f0b9aab40b843dcae5bef342"}, ] [package.dependencies] deprecated = ">=1.2.6" -setuptools = ">=16.0" +importlib-metadata = ">=6.0,<=8.4.0" [[package]] name = "opentelemetry-sdk" -version = "1.16.0" +version = "1.27.0" description = "OpenTelemetry Python SDK" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "opentelemetry_sdk-1.16.0-py3-none-any.whl", hash = "sha256:15f03915eec4839f885a5e6ed959cde59b8690c8c012d07c95b4b138c98dc43f"}, - {file = "opentelemetry_sdk-1.16.0.tar.gz", hash = "sha256:4d3bb91e9e209dbeea773b5565d901da4f76a29bf9dbc1c9500be3cabb239a4e"}, + {file = "opentelemetry_sdk-1.27.0-py3-none-any.whl", hash = "sha256:365f5e32f920faf0fd9e14fdfd92c086e317eaa5f860edba9cdc17a380d9197d"}, + {file = "opentelemetry_sdk-1.27.0.tar.gz", hash = "sha256:d525017dea0ccce9ba4e0245100ec46ecdc043f2d7b8315d56b19aff0904fa6f"}, ] [package.dependencies] -opentelemetry-api = "1.16.0" -opentelemetry-semantic-conventions = "0.37b0" -setuptools = ">=16.0" +opentelemetry-api = "1.27.0" +opentelemetry-semantic-conventions = "0.48b0" typing-extensions = ">=3.7.4" [[package]] name = "opentelemetry-semantic-conventions" -version = "0.37b0" +version = "0.48b0" description = "OpenTelemetry Semantic Conventions" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "opentelemetry_semantic_conventions-0.37b0-py3-none-any.whl", hash = "sha256:462982278a42dab01f68641cd89f8460fe1f93e87c68a012a76fb426dcdba5ee"}, - {file = "opentelemetry_semantic_conventions-0.37b0.tar.gz", hash = "sha256:087ce2e248e42f3ffe4d9fa2303111de72bb93baa06a0f4655980bc1557c4228"}, + {file = "opentelemetry_semantic_conventions-0.48b0-py3-none-any.whl", hash = "sha256:a0de9f45c413a8669788a38569c7e0a11ce6ce97861a628cca785deecdc32a1f"}, + {file = "opentelemetry_semantic_conventions-0.48b0.tar.gz", hash = "sha256:12d74983783b6878162208be57c9effcb89dc88691c64992d70bb89dc00daa1a"}, ] +[package.dependencies] +deprecated = ">=1.2.6" +opentelemetry-api = "1.27.0" + [[package]] name = "packaging" version = "24.1" @@ -2399,89 +2359,51 @@ lint = ["black"] [[package]] name = "pandas" -version = "2.2.3" +version = "1.5.3" description = "Powerful data structures for data analysis, time series, and statistics" optional = false -python-versions = ">=3.9" -files = [ - {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, - {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, - {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"}, - {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"}, - {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"}, - {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"}, - {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"}, - {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"}, - {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"}, - {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"}, - {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"}, - {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"}, - {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"}, - {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"}, - {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"}, - {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"}, - {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"}, - {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"}, - {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"}, - {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"}, - {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"}, - {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"}, - {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"}, - {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"}, - {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"}, - {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"}, - {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"}, - {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"}, - {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"}, - {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"}, - {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"}, - {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"}, - {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"}, - {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"}, - {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"}, - {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"}, - {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"}, - {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"}, - {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"}, - {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"}, - {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"}, - {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"}, +python-versions = ">=3.8" +files = [ + {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3749077d86e3a2f0ed51367f30bf5b82e131cc0f14260c4d3e499186fccc4406"}, + {file = "pandas-1.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:972d8a45395f2a2d26733eb8d0f629b2f90bebe8e8eddbb8829b180c09639572"}, + {file = "pandas-1.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50869a35cbb0f2e0cd5ec04b191e7b12ed688874bd05dd777c19b28cbea90996"}, + {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3ac844a0fe00bfaeb2c9b51ab1424e5c8744f89860b138434a363b1f620f354"}, + {file = "pandas-1.5.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0a56cef15fd1586726dace5616db75ebcfec9179a3a55e78f72c5639fa2a23"}, + {file = "pandas-1.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:478ff646ca42b20376e4ed3fa2e8d7341e8a63105586efe54fa2508ee087f328"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6973549c01ca91ec96199e940495219c887ea815b2083722821f1d7abfa2b4dc"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c39a8da13cede5adcd3be1182883aea1c925476f4e84b2807a46e2775306305d"}, + {file = "pandas-1.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f76d097d12c82a535fda9dfe5e8dd4127952b45fea9b0276cb30cca5ea313fbc"}, + {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e474390e60ed609cec869b0da796ad94f420bb057d86784191eefc62b65819ae"}, + {file = "pandas-1.5.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f2b952406a1588ad4cad5b3f55f520e82e902388a6d5a4a91baa8d38d23c7f6"}, + {file = "pandas-1.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:bc4c368f42b551bf72fac35c5128963a171b40dce866fb066540eeaf46faa003"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:14e45300521902689a81f3f41386dc86f19b8ba8dd5ac5a3c7010ef8d2932813"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9842b6f4b8479e41968eced654487258ed81df7d1c9b7b870ceea24ed9459b31"}, + {file = "pandas-1.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:26d9c71772c7afb9d5046e6e9cf42d83dd147b5cf5bcb9d97252077118543792"}, + {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fbcb19d6fceb9e946b3e23258757c7b225ba450990d9ed63ccceeb8cae609f7"}, + {file = "pandas-1.5.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:565fa34a5434d38e9d250af3c12ff931abaf88050551d9fbcdfafca50d62babf"}, + {file = "pandas-1.5.3-cp38-cp38-win32.whl", hash = "sha256:87bd9c03da1ac870a6d2c8902a0e1fd4267ca00f13bc494c9e5a9020920e1d51"}, + {file = "pandas-1.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:41179ce559943d83a9b4bbacb736b04c928b095b5f25dd2b7389eda08f46f373"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c74a62747864ed568f5a82a49a23a8d7fe171d0c69038b38cedf0976831296fa"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c4c00e0b0597c8e4f59e8d461f797e5d70b4d025880516a8261b2817c47759ee"}, + {file = "pandas-1.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a50d9a4336a9621cab7b8eb3fb11adb82de58f9b91d84c2cd526576b881a0c5a"}, + {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd05f7783b3274aa206a1af06f0ceed3f9b412cf665b7247eacd83be41cf7bf0"}, + {file = "pandas-1.5.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f69c4029613de47816b1bb30ff5ac778686688751a5e9c99ad8c7031f6508e5"}, + {file = "pandas-1.5.3-cp39-cp39-win32.whl", hash = "sha256:7cec0bee9f294e5de5bbfc14d0573f65526071029d036b753ee6507d2a21480a"}, + {file = "pandas-1.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:dfd681c5dc216037e0b0a2c821f5ed99ba9f03ebcf119c7dac0e9a7b960b9ec9"}, + {file = "pandas-1.5.3.tar.gz", hash = "sha256:74a3fd7e5a7ec052f183273dc7b0acd3a863edf7520f5d3a1765c04ffdb3b0b1"}, ] [package.dependencies] numpy = [ - {version = ">=1.22.4", markers = "python_version < \"3.11\""}, - {version = ">=1.23.2", markers = "python_version == \"3.11\""}, - {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, + {version = ">=1.20.3", markers = "python_version < \"3.10\""}, + {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, + {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, ] -python-dateutil = ">=2.8.2" +python-dateutil = ">=2.8.1" pytz = ">=2020.1" -tzdata = ">=2022.7" [package.extras] -all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] -aws = ["s3fs (>=2022.11.0)"] -clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] -compression = ["zstandard (>=0.19.0)"] -computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] -consortium-standard = ["dataframe-api-compat (>=0.1.7)"] -excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] -feather = ["pyarrow (>=10.0.1)"] -fss = ["fsspec (>=2022.11.0)"] -gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] -hdf5 = ["tables (>=3.8.0)"] -html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] -mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] -output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] -parquet = ["pyarrow (>=10.0.1)"] -performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] -plot = ["matplotlib (>=3.6.3)"] -postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] -pyarrow = ["pyarrow (>=10.0.1)"] -spss = ["pyreadstat (>=1.2.0)"] -sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] -test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] -xml = ["lxml (>=4.9.2)"] +test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"] [[package]] name = "pathspec" @@ -2506,90 +2428,95 @@ files = [ [[package]] name = "pillow" -version = "11.0.0" +version = "10.4.0" description = "Python Imaging Library (Fork)" optional = false -python-versions = ">=3.9" -files = [ - {file = "pillow-11.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:6619654954dc4936fcff82db8eb6401d3159ec6be81e33c6000dfd76ae189947"}, - {file = "pillow-11.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b3c5ac4bed7519088103d9450a1107f76308ecf91d6dabc8a33a2fcfb18d0fba"}, - {file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a65149d8ada1055029fcb665452b2814fe7d7082fcb0c5bed6db851cb69b2086"}, - {file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88a58d8ac0cc0e7f3a014509f0455248a76629ca9b604eca7dc5927cc593c5e9"}, - {file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:c26845094b1af3c91852745ae78e3ea47abf3dbcd1cf962f16b9a5fbe3ee8488"}, - {file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:1a61b54f87ab5786b8479f81c4b11f4d61702830354520837f8cc791ebba0f5f"}, - {file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:674629ff60030d144b7bca2b8330225a9b11c482ed408813924619c6f302fdbb"}, - {file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:598b4e238f13276e0008299bd2482003f48158e2b11826862b1eb2ad7c768b97"}, - {file = "pillow-11.0.0-cp310-cp310-win32.whl", hash = "sha256:9a0f748eaa434a41fccf8e1ee7a3eed68af1b690e75328fd7a60af123c193b50"}, - {file = "pillow-11.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:a5629742881bcbc1f42e840af185fd4d83a5edeb96475a575f4da50d6ede337c"}, - {file = "pillow-11.0.0-cp310-cp310-win_arm64.whl", hash = "sha256:ee217c198f2e41f184f3869f3e485557296d505b5195c513b2bfe0062dc537f1"}, - {file = "pillow-11.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1c1d72714f429a521d8d2d018badc42414c3077eb187a59579f28e4270b4b0fc"}, - {file = "pillow-11.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:499c3a1b0d6fc8213519e193796eb1a86a1be4b1877d678b30f83fd979811d1a"}, - {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8b2351c85d855293a299038e1f89db92a2f35e8d2f783489c6f0b2b5f3fe8a3"}, - {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f4dba50cfa56f910241eb7f883c20f1e7b1d8f7d91c750cd0b318bad443f4d5"}, - {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:5ddbfd761ee00c12ee1be86c9c0683ecf5bb14c9772ddbd782085779a63dd55b"}, - {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:45c566eb10b8967d71bf1ab8e4a525e5a93519e29ea071459ce517f6b903d7fa"}, - {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b4fd7bd29610a83a8c9b564d457cf5bd92b4e11e79a4ee4716a63c959699b306"}, - {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cb929ca942d0ec4fac404cbf520ee6cac37bf35be479b970c4ffadf2b6a1cad9"}, - {file = "pillow-11.0.0-cp311-cp311-win32.whl", hash = "sha256:006bcdd307cc47ba43e924099a038cbf9591062e6c50e570819743f5607404f5"}, - {file = "pillow-11.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:52a2d8323a465f84faaba5236567d212c3668f2ab53e1c74c15583cf507a0291"}, - {file = "pillow-11.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:16095692a253047fe3ec028e951fa4221a1f3ed3d80c397e83541a3037ff67c9"}, - {file = "pillow-11.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2c0a187a92a1cb5ef2c8ed5412dd8d4334272617f532d4ad4de31e0495bd923"}, - {file = "pillow-11.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:084a07ef0821cfe4858fe86652fffac8e187b6ae677e9906e192aafcc1b69903"}, - {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8069c5179902dcdce0be9bfc8235347fdbac249d23bd90514b7a47a72d9fecf4"}, - {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f02541ef64077f22bf4924f225c0fd1248c168f86e4b7abdedd87d6ebaceab0f"}, - {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:fcb4621042ac4b7865c179bb972ed0da0218a076dc1820ffc48b1d74c1e37fe9"}, - {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:00177a63030d612148e659b55ba99527803288cea7c75fb05766ab7981a8c1b7"}, - {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8853a3bf12afddfdf15f57c4b02d7ded92c7a75a5d7331d19f4f9572a89c17e6"}, - {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3107c66e43bda25359d5ef446f59c497de2b5ed4c7fdba0894f8d6cf3822dafc"}, - {file = "pillow-11.0.0-cp312-cp312-win32.whl", hash = "sha256:86510e3f5eca0ab87429dd77fafc04693195eec7fd6a137c389c3eeb4cfb77c6"}, - {file = "pillow-11.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:8ec4a89295cd6cd4d1058a5e6aec6bf51e0eaaf9714774e1bfac7cfc9051db47"}, - {file = "pillow-11.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:27a7860107500d813fcd203b4ea19b04babe79448268403172782754870dac25"}, - {file = "pillow-11.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcd1fb5bb7b07f64c15618c89efcc2cfa3e95f0e3bcdbaf4642509de1942a699"}, - {file = "pillow-11.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0e038b0745997c7dcaae350d35859c9715c71e92ffb7e0f4a8e8a16732150f38"}, - {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ae08bd8ffc41aebf578c2af2f9d8749d91f448b3bfd41d7d9ff573d74f2a6b2"}, - {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d69bfd8ec3219ae71bcde1f942b728903cad25fafe3100ba2258b973bd2bc1b2"}, - {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:61b887f9ddba63ddf62fd02a3ba7add935d053b6dd7d58998c630e6dbade8527"}, - {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:c6a660307ca9d4867caa8d9ca2c2658ab685de83792d1876274991adec7b93fa"}, - {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:73e3a0200cdda995c7e43dd47436c1548f87a30bb27fb871f352a22ab8dcf45f"}, - {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fba162b8872d30fea8c52b258a542c5dfd7b235fb5cb352240c8d63b414013eb"}, - {file = "pillow-11.0.0-cp313-cp313-win32.whl", hash = "sha256:f1b82c27e89fffc6da125d5eb0ca6e68017faf5efc078128cfaa42cf5cb38798"}, - {file = "pillow-11.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:8ba470552b48e5835f1d23ecb936bb7f71d206f9dfeee64245f30c3270b994de"}, - {file = "pillow-11.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:846e193e103b41e984ac921b335df59195356ce3f71dcfd155aa79c603873b84"}, - {file = "pillow-11.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4ad70c4214f67d7466bea6a08061eba35c01b1b89eaa098040a35272a8efb22b"}, - {file = "pillow-11.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:6ec0d5af64f2e3d64a165f490d96368bb5dea8b8f9ad04487f9ab60dc4bb6003"}, - {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c809a70e43c7977c4a42aefd62f0131823ebf7dd73556fa5d5950f5b354087e2"}, - {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:4b60c9520f7207aaf2e1d94de026682fc227806c6e1f55bba7606d1c94dd623a"}, - {file = "pillow-11.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1e2688958a840c822279fda0086fec1fdab2f95bf2b717b66871c4ad9859d7e8"}, - {file = "pillow-11.0.0-cp313-cp313t-win32.whl", hash = "sha256:607bbe123c74e272e381a8d1957083a9463401f7bd01287f50521ecb05a313f8"}, - {file = "pillow-11.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c39ed17edea3bc69c743a8dd3e9853b7509625c2462532e62baa0732163a904"}, - {file = "pillow-11.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:75acbbeb05b86bc53cbe7b7e6fe00fbcf82ad7c684b3ad82e3d711da9ba287d3"}, - {file = "pillow-11.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2e46773dc9f35a1dd28bd6981332fd7f27bec001a918a72a79b4133cf5291dba"}, - {file = "pillow-11.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2679d2258b7f1192b378e2893a8a0a0ca472234d4c2c0e6bdd3380e8dfa21b6a"}, - {file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eda2616eb2313cbb3eebbe51f19362eb434b18e3bb599466a1ffa76a033fb916"}, - {file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ec184af98a121fb2da42642dea8a29ec80fc3efbaefb86d8fdd2606619045d"}, - {file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:8594f42df584e5b4bb9281799698403f7af489fba84c34d53d1c4bfb71b7c4e7"}, - {file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:c12b5ae868897c7338519c03049a806af85b9b8c237b7d675b8c5e089e4a618e"}, - {file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:70fbbdacd1d271b77b7721fe3cdd2d537bbbd75d29e6300c672ec6bb38d9672f"}, - {file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5178952973e588b3f1360868847334e9e3bf49d19e169bbbdfaf8398002419ae"}, - {file = "pillow-11.0.0-cp39-cp39-win32.whl", hash = "sha256:8c676b587da5673d3c75bd67dd2a8cdfeb282ca38a30f37950511766b26858c4"}, - {file = "pillow-11.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:94f3e1780abb45062287b4614a5bc0874519c86a777d4a7ad34978e86428b8dd"}, - {file = "pillow-11.0.0-cp39-cp39-win_arm64.whl", hash = "sha256:290f2cc809f9da7d6d622550bbf4c1e57518212da51b6a30fe8e0a270a5b78bd"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1187739620f2b365de756ce086fdb3604573337cc28a0d3ac4a01ab6b2d2a6d2"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fbbcb7b57dc9c794843e3d1258c0fbf0f48656d46ffe9e09b63bbd6e8cd5d0a2"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d203af30149ae339ad1b4f710d9844ed8796e97fda23ffbc4cc472968a47d0b"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21a0d3b115009ebb8ac3d2ebec5c2982cc693da935f4ab7bb5c8ebe2f47d36f2"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:73853108f56df97baf2bb8b522f3578221e56f646ba345a372c78326710d3830"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e58876c91f97b0952eb766123bfef372792ab3f4e3e1f1a2267834c2ab131734"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:224aaa38177597bb179f3ec87eeefcce8e4f85e608025e9cfac60de237ba6316"}, - {file = "pillow-11.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5bd2d3bdb846d757055910f0a59792d33b555800813c3b39ada1829c372ccb06"}, - {file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:375b8dd15a1f5d2feafff536d47e22f69625c1aa92f12b339ec0b2ca40263273"}, - {file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:daffdf51ee5db69a82dd127eabecce20729e21f7a3680cf7cbb23f0829189790"}, - {file = "pillow-11.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7326a1787e3c7b0429659e0a944725e1b03eeaa10edd945a86dead1913383944"}, - {file = "pillow-11.0.0.tar.gz", hash = "sha256:72bacbaf24ac003fea9bff9837d1eedb6088758d41e100c1552930151f677739"}, +python-versions = ">=3.8" +files = [ + {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, + {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"}, + {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"}, + {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"}, + {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"}, + {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"}, + {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"}, + {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"}, + {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"}, + {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"}, + {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"}, + {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"}, + {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"}, + {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"}, + {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"}, + {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"}, + {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"}, + {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"}, + {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"}, + {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"}, ] [package.extras] -docs = ["furo", "olefile", "sphinx (>=8.1)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] fpx = ["olefile"] mic = ["olefile"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] @@ -2598,19 +2525,19 @@ xmp = ["defusedxml"] [[package]] name = "platformdirs" -version = "4.3.6" +version = "4.2.2" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, - {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, + {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, + {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, ] [package.extras] -docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] -type = ["mypy (>=1.11.2)"] +docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] +type = ["mypy (>=1.8)"] [[package]] name = "pluggy" @@ -2629,13 +2556,13 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "pre-commit" -version = "4.0.1" +version = "3.5.0" description = "A framework for managing and maintaining multi-language pre-commit hooks." optional = false -python-versions = ">=3.9" +python-versions = ">=3.8" files = [ - {file = "pre_commit-4.0.1-py2.py3-none-any.whl", hash = "sha256:efde913840816312445dc98787724647c65473daefe420785f885e8ed9a06878"}, - {file = "pre_commit-4.0.1.tar.gz", hash = "sha256:80905ac375958c0444c65e9cebebd948b3cdb518f335a091a670a89d652139d2"}, + {file = "pre_commit-3.5.0-py2.py3-none-any.whl", hash = "sha256:841dc9aef25daba9a0238cd27984041fa0467b4199fc4852e27950664919f660"}, + {file = "pre_commit-3.5.0.tar.gz", hash = "sha256:5804465c675b659b0862f07907f96295d490822a450c4c40e747d0b1c6ebcb32"}, ] [package.dependencies] @@ -2647,13 +2574,13 @@ virtualenv = ">=20.10.0" [[package]] name = "prometheus-client" -version = "0.21.0" +version = "0.20.0" description = "Python client for the Prometheus monitoring system." optional = false python-versions = ">=3.8" files = [ - {file = "prometheus_client-0.21.0-py3-none-any.whl", hash = "sha256:4fa6b4dd0ac16d58bb587c04b1caae65b8c5043e85f778f42f5f632f6af2e166"}, - {file = "prometheus_client-0.21.0.tar.gz", hash = "sha256:96c83c606b71ff2b0a433c98889d275f51ffec6c5e267de37c7a2b5c9aa9233e"}, + {file = "prometheus_client-0.20.0-py3-none-any.whl", hash = "sha256:cde524a85bce83ca359cc837f28b8c0db5cac7aa653a588fd7e84ba061c329e7"}, + {file = "prometheus_client-0.20.0.tar.gz", hash = "sha256:287629d00b147a32dcb2be0b9df905da599b2d82f80377083ec8463309a4bb89"}, ] [package.extras] @@ -2675,22 +2602,22 @@ wcwidth = "*" [[package]] name = "protobuf" -version = "4.25.5" +version = "4.25.4" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-4.25.5-cp310-abi3-win32.whl", hash = "sha256:5e61fd921603f58d2f5acb2806a929b4675f8874ff5f330b7d6f7e2e784bbcd8"}, - {file = "protobuf-4.25.5-cp310-abi3-win_amd64.whl", hash = "sha256:4be0571adcbe712b282a330c6e89eae24281344429ae95c6d85e79e84780f5ea"}, - {file = "protobuf-4.25.5-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:b2fde3d805354df675ea4c7c6338c1aecd254dfc9925e88c6d31a2bcb97eb173"}, - {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:919ad92d9b0310070f8356c24b855c98df2b8bd207ebc1c0c6fcc9ab1e007f3d"}, - {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:fe14e16c22be926d3abfcb500e60cab068baf10b542b8c858fa27e098123e331"}, - {file = "protobuf-4.25.5-cp38-cp38-win32.whl", hash = "sha256:98d8d8aa50de6a2747efd9cceba361c9034050ecce3e09136f90de37ddba66e1"}, - {file = "protobuf-4.25.5-cp38-cp38-win_amd64.whl", hash = "sha256:b0234dd5a03049e4ddd94b93400b67803c823cfc405689688f59b34e0742381a"}, - {file = "protobuf-4.25.5-cp39-cp39-win32.whl", hash = "sha256:abe32aad8561aa7cc94fc7ba4fdef646e576983edb94a73381b03c53728a626f"}, - {file = "protobuf-4.25.5-cp39-cp39-win_amd64.whl", hash = "sha256:7a183f592dc80aa7c8da7ad9e55091c4ffc9497b3054452d629bb85fa27c2a45"}, - {file = "protobuf-4.25.5-py3-none-any.whl", hash = "sha256:0aebecb809cae990f8129ada5ca273d9d670b76d9bfc9b1809f0a9c02b7dbf41"}, - {file = "protobuf-4.25.5.tar.gz", hash = "sha256:7f8249476b4a9473645db7f8ab42b02fe1488cbe5fb72fddd445e0665afd8584"}, + {file = "protobuf-4.25.4-cp310-abi3-win32.whl", hash = "sha256:db9fd45183e1a67722cafa5c1da3e85c6492a5383f127c86c4c4aa4845867dc4"}, + {file = "protobuf-4.25.4-cp310-abi3-win_amd64.whl", hash = "sha256:ba3d8504116a921af46499471c63a85260c1a5fc23333154a427a310e015d26d"}, + {file = "protobuf-4.25.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:eecd41bfc0e4b1bd3fa7909ed93dd14dd5567b98c941d6c1ad08fdcab3d6884b"}, + {file = "protobuf-4.25.4-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:4c8a70fdcb995dcf6c8966cfa3a29101916f7225e9afe3ced4395359955d3835"}, + {file = "protobuf-4.25.4-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:3319e073562e2515c6ddc643eb92ce20809f5d8f10fead3332f71c63be6a7040"}, + {file = "protobuf-4.25.4-cp38-cp38-win32.whl", hash = "sha256:7e372cbbda66a63ebca18f8ffaa6948455dfecc4e9c1029312f6c2edcd86c4e1"}, + {file = "protobuf-4.25.4-cp38-cp38-win_amd64.whl", hash = "sha256:051e97ce9fa6067a4546e75cb14f90cf0232dcb3e3d508c448b8d0e4265b61c1"}, + {file = "protobuf-4.25.4-cp39-cp39-win32.whl", hash = "sha256:90bf6fd378494eb698805bbbe7afe6c5d12c8e17fca817a646cd6a1818c696ca"}, + {file = "protobuf-4.25.4-cp39-cp39-win_amd64.whl", hash = "sha256:ac79a48d6b99dfed2729ccccee547b34a1d3d63289c71cef056653a846a2240f"}, + {file = "protobuf-4.25.4-py3-none-any.whl", hash = "sha256:bfbebc1c8e4793cfd58589acfb8a1026be0003e852b9da7db5a4285bde996978"}, + {file = "protobuf-4.25.4.tar.gz", hash = "sha256:0dc4a62cc4052a036ee2204d26fe4d835c62827c855c8a03f29fe6da146b380d"}, ] [[package]] @@ -2786,24 +2713,24 @@ test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"] [[package]] name = "pyasn1" -version = "0.6.1" +version = "0.6.0" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = false python-versions = ">=3.8" files = [ - {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, - {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, + {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"}, + {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"}, ] [[package]] name = "pyasn1-modules" -version = "0.4.1" +version = "0.4.0" description = "A collection of ASN.1-based protocols modules" optional = false python-versions = ">=3.8" files = [ - {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, - {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, + {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"}, + {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"}, ] [package.dependencies] @@ -2811,120 +2738,119 @@ pyasn1 = ">=0.4.6,<0.7.0" [[package]] name = "pydantic" -version = "2.9.2" +version = "2.8.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"}, - {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"}, + {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, + {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, ] [package.dependencies] -annotated-types = ">=0.6.0" -pydantic-core = "2.23.4" +annotated-types = ">=0.4.0" +pydantic-core = "2.20.1" typing-extensions = {version = ">=4.6.1", markers = "python_version < \"3.13\""} [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.23.4" +version = "2.20.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, - {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"}, - {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"}, - {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"}, - {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"}, - {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"}, - {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"}, - {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"}, - {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"}, - {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"}, - {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"}, - {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"}, - {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"}, - {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"}, - {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"}, - {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"}, - {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"}, - {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"}, - {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"}, - {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"}, - {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"}, - {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"}, - {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"}, - {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"}, - {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"}, - {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"}, - {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"}, - {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"}, - {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"}, - {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"}, - {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"}, - {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"}, - {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"}, - {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"}, - {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"}, - {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"}, - {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, + {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, + {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, + {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, + {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, + {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, + {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, + {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, + {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, + {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, + {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, + {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, + {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, + {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, ] [package.dependencies] @@ -2946,13 +2872,13 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pymdown-extensions" -version = "10.11.2" +version = "10.9" description = "Extension pack for Python Markdown." optional = false python-versions = ">=3.8" files = [ - {file = "pymdown_extensions-10.11.2-py3-none-any.whl", hash = "sha256:41cdde0a77290e480cf53892f5c5e50921a7ee3e5cd60ba91bf19837b33badcf"}, - {file = "pymdown_extensions-10.11.2.tar.gz", hash = "sha256:bc8847ecc9e784a098efd35e20cba772bc5a1b529dfcef9dc1972db9021a1049"}, + {file = "pymdown_extensions-10.9-py3-none-any.whl", hash = "sha256:d323f7e90d83c86113ee78f3fe62fc9dee5f56b54d912660703ea1816fed5626"}, + {file = "pymdown_extensions-10.9.tar.gz", hash = "sha256:6ff740bcd99ec4172a938970d42b96128bdc9d4b9bcad72494f29921dc69b753"}, ] [package.dependencies] @@ -2975,13 +2901,13 @@ files = [ [[package]] name = "pyparsing" -version = "3.2.0" +version = "3.1.4" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = false -python-versions = ">=3.9" +python-versions = ">=3.6.8" files = [ - {file = "pyparsing-3.2.0-py3-none-any.whl", hash = "sha256:93d9577b88da0bbea8cc8334ee8b918ed014968fd2ec383e868fb8afb1ccef84"}, - {file = "pyparsing-3.2.0.tar.gz", hash = "sha256:cbf74e27246d595d9a74b186b810f6fbb86726dbf3b9532efb343f6d7294fe9c"}, + {file = "pyparsing-3.1.4-py3-none-any.whl", hash = "sha256:a6a7ee4235a3f944aa1fa2249307708f893fe5717dc603503c6c7969c070fb7c"}, + {file = "pyparsing-3.1.4.tar.gz", hash = "sha256:f86ec8d1a83f11977c9a6ea7598e8c27fc5cddfa5b07ea2241edbbde1d7bc032"}, ] [package.extras] @@ -2989,13 +2915,13 @@ diagrams = ["jinja2", "railroad-diagrams"] [[package]] name = "pytest" -version = "8.3.3" +version = "8.3.2" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, - {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, + {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, + {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, ] [package.dependencies] @@ -3077,40 +3003,73 @@ six = ">=1.5" [[package]] name = "pytz" -version = "2024.2" +version = "2024.1" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" files = [ - {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, - {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, + {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, + {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, +] + +[[package]] +name = "pywavelets" +version = "1.4.1" +description = "PyWavelets, wavelet transform module" +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyWavelets-1.4.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:d854411eb5ee9cb4bc5d0e66e3634aeb8f594210f6a1bed96dbed57ec70f181c"}, + {file = "PyWavelets-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:231b0e0b1cdc1112f4af3c24eea7bf181c418d37922a67670e9bf6cfa2d544d4"}, + {file = "PyWavelets-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:754fa5085768227c4f4a26c1e0c78bc509a266d9ebd0eb69a278be7e3ece943c"}, + {file = "PyWavelets-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da7b9c006171be1f9ddb12cc6e0d3d703b95f7f43cb5e2c6f5f15d3233fcf202"}, + {file = "PyWavelets-1.4.1-cp310-cp310-win32.whl", hash = "sha256:67a0d28a08909f21400cb09ff62ba94c064882ffd9e3a6b27880a111211d59bd"}, + {file = "PyWavelets-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:91d3d393cffa634f0e550d88c0e3f217c96cfb9e32781f2960876f1808d9b45b"}, + {file = "PyWavelets-1.4.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:64c6bac6204327321db30b775060fbe8e8642316e6bff17f06b9f34936f88875"}, + {file = "PyWavelets-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f19327f2129fb7977bc59b966b4974dfd72879c093e44a7287500a7032695de"}, + {file = "PyWavelets-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad987748f60418d5f4138db89d82ba0cb49b086e0cbb8fd5c3ed4a814cfb705e"}, + {file = "PyWavelets-1.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:875d4d620eee655346e3589a16a73790cf9f8917abba062234439b594e706784"}, + {file = "PyWavelets-1.4.1-cp311-cp311-win32.whl", hash = "sha256:7231461d7a8eb3bdc7aa2d97d9f67ea5a9f8902522818e7e2ead9c2b3408eeb1"}, + {file = "PyWavelets-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:daf0aa79842b571308d7c31a9c43bc99a30b6328e6aea3f50388cd8f69ba7dbc"}, + {file = "PyWavelets-1.4.1-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:ab7da0a17822cd2f6545626946d3b82d1a8e106afc4b50e3387719ba01c7b966"}, + {file = "PyWavelets-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:578af438a02a86b70f1975b546f68aaaf38f28fb082a61ceb799816049ed18aa"}, + {file = "PyWavelets-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb5ca8d11d3f98e89e65796a2125be98424d22e5ada360a0dbabff659fca0fc"}, + {file = "PyWavelets-1.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:058b46434eac4c04dd89aeef6fa39e4b6496a951d78c500b6641fd5b2cc2f9f4"}, + {file = "PyWavelets-1.4.1-cp38-cp38-win32.whl", hash = "sha256:de7cd61a88a982edfec01ea755b0740e94766e00a1ceceeafef3ed4c85c605cd"}, + {file = "PyWavelets-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:7ab8d9db0fe549ab2ee0bea61f614e658dd2df419d5b75fba47baa761e95f8f2"}, + {file = "PyWavelets-1.4.1-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:23bafd60350b2b868076d976bdd92f950b3944f119b4754b1d7ff22b7acbf6c6"}, + {file = "PyWavelets-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d0e56cd7a53aed3cceca91a04d62feb3a0aca6725b1912d29546c26f6ea90426"}, + {file = "PyWavelets-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:030670a213ee8fefa56f6387b0c8e7d970c7f7ad6850dc048bd7c89364771b9b"}, + {file = "PyWavelets-1.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71ab30f51ee4470741bb55fc6b197b4a2b612232e30f6ac069106f0156342356"}, + {file = "PyWavelets-1.4.1-cp39-cp39-win32.whl", hash = "sha256:47cac4fa25bed76a45bc781a293c26ac63e8eaae9eb8f9be961758d22b58649c"}, + {file = "PyWavelets-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:88aa5449e109d8f5e7f0adef85f7f73b1ab086102865be64421a3a3d02d277f4"}, + {file = "PyWavelets-1.4.1.tar.gz", hash = "sha256:6437af3ddf083118c26d8f97ab43b0724b956c9f958e9ea788659f6a2834ba93"}, ] +[package.dependencies] +numpy = ">=1.17.3" + [[package]] name = "pywin32" -version = "308" +version = "306" description = "Python for Window Extensions" optional = false python-versions = "*" files = [ - {file = "pywin32-308-cp310-cp310-win32.whl", hash = "sha256:796ff4426437896550d2981b9c2ac0ffd75238ad9ea2d3bfa67a1abd546d262e"}, - {file = "pywin32-308-cp310-cp310-win_amd64.whl", hash = "sha256:4fc888c59b3c0bef905ce7eb7e2106a07712015ea1c8234b703a088d46110e8e"}, - {file = "pywin32-308-cp310-cp310-win_arm64.whl", hash = "sha256:a5ab5381813b40f264fa3495b98af850098f814a25a63589a8e9eb12560f450c"}, - {file = "pywin32-308-cp311-cp311-win32.whl", hash = "sha256:5d8c8015b24a7d6855b1550d8e660d8daa09983c80e5daf89a273e5c6fb5095a"}, - {file = "pywin32-308-cp311-cp311-win_amd64.whl", hash = "sha256:575621b90f0dc2695fec346b2d6302faebd4f0f45c05ea29404cefe35d89442b"}, - {file = "pywin32-308-cp311-cp311-win_arm64.whl", hash = "sha256:100a5442b7332070983c4cd03f2e906a5648a5104b8a7f50175f7906efd16bb6"}, - {file = "pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897"}, - {file = "pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47"}, - {file = "pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091"}, - {file = "pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed"}, - {file = "pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4"}, - {file = "pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd"}, - {file = "pywin32-308-cp37-cp37m-win32.whl", hash = "sha256:1f696ab352a2ddd63bd07430080dd598e6369152ea13a25ebcdd2f503a38f1ff"}, - {file = "pywin32-308-cp37-cp37m-win_amd64.whl", hash = "sha256:13dcb914ed4347019fbec6697a01a0aec61019c1046c2b905410d197856326a6"}, - {file = "pywin32-308-cp38-cp38-win32.whl", hash = "sha256:5794e764ebcabf4ff08c555b31bd348c9025929371763b2183172ff4708152f0"}, - {file = "pywin32-308-cp38-cp38-win_amd64.whl", hash = "sha256:3b92622e29d651c6b783e368ba7d6722b1634b8e70bd376fd7610fe1992e19de"}, - {file = "pywin32-308-cp39-cp39-win32.whl", hash = "sha256:7873ca4dc60ab3287919881a7d4f88baee4a6e639aa6962de25a98ba6b193341"}, - {file = "pywin32-308-cp39-cp39-win_amd64.whl", hash = "sha256:71b3322d949b4cc20776436a9c9ba0eeedcbc9c650daa536df63f0ff111bb920"}, + {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, + {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, + {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, + {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, + {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, + {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, + {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, + {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, + {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, + {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, + {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, + {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, + {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, + {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, ] [[package]] @@ -3205,205 +3164,210 @@ prompt_toolkit = ">=2.0,<=3.0.36" [[package]] name = "rapidfuzz" -version = "3.10.0" +version = "3.9.7" description = "rapid fuzzy string matching" optional = false -python-versions = ">=3.9" -files = [ - {file = "rapidfuzz-3.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:884453860de029380dded8f3c1918af2d8eb5adf8010261645c7e5c88c2b5428"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:718c9bd369288aca5fa929df6dbf66fdbe9768d90940a940c0b5cdc96ade4309"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a68e3724b7dab761c01816aaa64b0903734d999d5589daf97c14ef5cc0629a8e"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1af60988d47534246d9525f77288fdd9de652608a4842815d9018570b959acc6"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3084161fc3e963056232ef8d937449a2943852e07101f5a136c8f3cfa4119217"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6cd67d3d017296d98ff505529104299f78433e4b8af31b55003d901a62bbebe9"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b11a127ac590fc991e8a02c2d7e1ac86e8141c92f78546f18b5c904064a0552c"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:aadce42147fc09dcef1afa892485311e824c050352e1aa6e47f56b9b27af4cf0"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b54853c2371bf0e38d67da379519deb6fbe70055efb32f6607081641af3dc752"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ce19887268e90ee81a3957eef5e46a70ecc000713796639f83828b950343f49e"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f39a2a5ded23b9b9194ec45740dce57177b80f86c6d8eba953d3ff1a25c97766"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0ec338d5f4ad8d9339a88a08db5c23e7f7a52c2b2a10510c48a0cef1fb3f0ddc"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-win32.whl", hash = "sha256:56fd15ea8f4c948864fa5ebd9261c67cf7b89a1c517a0caef4df75446a7af18c"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:43dfc5e733808962a822ff6d9c29f3039a3cfb3620706f5953e17cfe4496724c"}, - {file = "rapidfuzz-3.10.0-cp310-cp310-win_arm64.whl", hash = "sha256:ae7966f205b5a7fde93b44ca8fed37c1c8539328d7f179b1197de34eceaceb5f"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bb0013795b40db5cf361e6f21ee7cda09627cf294977149b50e217d7fe9a2f03"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:69ef5b363afff7150a1fbe788007e307b9802a2eb6ad92ed51ab94e6ad2674c6"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c582c46b1bb0b19f1a5f4c1312f1b640c21d78c371a6615c34025b16ee56369b"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:288f6f6e7410cacb115fb851f3f18bf0e4231eb3f6cb5bd1cec0e7b25c4d039d"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9e29a13d2fd9be3e7d8c26c7ef4ba60b5bc7efbc9dbdf24454c7e9ebba31768"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea2da0459b951ee461bd4e02b8904890bd1c4263999d291c5cd01e6620177ad4"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:457827ba82261aa2ae6ac06a46d0043ab12ba7216b82d87ae1434ec0f29736d6"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5d350864269d56f51ab81ab750c9259ae5cad3152c0680baef143dcec92206a1"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a9b8f51e08c3f983d857c3889930af9ddecc768453822076683664772d87e374"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7f3a6aa6e70fc27e4ff5c479f13cc9fc26a56347610f5f8b50396a0d344c5f55"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:803f255f10d63420979b1909ef976e7d30dec42025c9b067fc1d2040cc365a7e"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2026651761bf83a0f31495cc0f70840d5c0d54388f41316e3f9cb51bd85e49a5"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-win32.whl", hash = "sha256:4df75b3ebbb8cfdb9bf8b213b168620b88fd92d0c16a8bc9f9234630b282db59"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:f9f0bbfb6787b97c51516f3ccf97737d504db5d239ad44527673b81f598b84ab"}, - {file = "rapidfuzz-3.10.0-cp311-cp311-win_arm64.whl", hash = "sha256:10fdad800441b9c97d471a937ba7d42625f1b530db05e572f1cb7d401d95c893"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7dc87073ba3a40dd65591a2100aa71602107443bf10770579ff9c8a3242edb94"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a425a0a868cf8e9c6e93e1cda4b758cdfd314bb9a4fc916c5742c934e3613480"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a86d5d1d75e61df060c1e56596b6b0a4422a929dff19cc3dbfd5eee762c86b61"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34f213d59219a9c3ca14e94a825f585811a68ac56b4118b4dc388b5b14afc108"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96ad46f5f56f70fab2be9e5f3165a21be58d633b90bf6e67fc52a856695e4bcf"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9178277f72d144a6c7704d7ae7fa15b7b86f0f0796f0e1049c7b4ef748a662ef"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76a35e9e19a7c883c422ffa378e9a04bc98cb3b29648c5831596401298ee51e6"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8a6405d34c394c65e4f73a1d300c001f304f08e529d2ed6413b46ee3037956eb"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:bd393683129f446a75d8634306aed7e377627098a1286ff3af2a4f1736742820"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:b0445fa9880ead81f5a7d0efc0b9c977a947d8052c43519aceeaf56eabaf6843"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:c50bc308fa29767ed8f53a8d33b7633a9e14718ced038ed89d41b886e301da32"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e89605afebbd2d4b045bccfdc12a14b16fe8ccbae05f64b4b4c64a97dad1c891"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-win32.whl", hash = "sha256:2db9187f3acf3cd33424ecdbaad75414c298ecd1513470df7bda885dcb68cc15"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:50e3d0c72ea15391ba9531ead7f2068a67c5b18a6a365fef3127583aaadd1725"}, - {file = "rapidfuzz-3.10.0-cp312-cp312-win_arm64.whl", hash = "sha256:9eac95b4278bd53115903d89118a2c908398ee8bdfd977ae844f1bd2b02b917c"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fe5231e8afd069c742ac5b4f96344a0fe4aff52df8e53ef87faebf77f827822c"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:886882367dbc985f5736356105798f2ae6e794e671fc605476cbe2e73838a9bb"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b33e13e537e3afd1627d421a142a12bbbe601543558a391a6fae593356842f6e"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:094c26116d55bf9c53abd840d08422f20da78ec4c4723e5024322321caedca48"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:545fc04f2d592e4350f59deb0818886c1b444ffba3bec535b4fbb97191aaf769"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:916a6abf3632e592b937c3d04c00a6efadd8fd30539cdcd4e6e4d92be7ca5d90"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb6ec40cef63b1922083d33bfef2f91fc0b0bc07b5b09bfee0b0f1717d558292"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c77a7330dd15c7eb5fd3631dc646fc96327f98db8181138766bd14d3e905f0ba"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:949b5e9eeaa4ecb4c7e9c2a4689dddce60929dd1ff9c76a889cdbabe8bbf2171"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b5363932a5aab67010ae1a6205c567d1ef256fb333bc23c27582481606be480c"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:5dd6eec15b13329abe66cc241b484002ecb0e17d694491c944a22410a6a9e5e2"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:79e7f98525b60b3c14524e0a4e1fedf7654657b6e02eb25f1be897ab097706f3"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-win32.whl", hash = "sha256:d29d1b9857c65f8cb3a29270732e1591b9bacf89de9d13fa764f79f07d8f1fd2"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:fa9720e56663cc3649d62b4b5f3145e94b8f5611e8a8e1b46507777249d46aad"}, - {file = "rapidfuzz-3.10.0-cp313-cp313-win_arm64.whl", hash = "sha256:eda4c661e68dddd56c8fbfe1ca35e40dd2afd973f7ebb1605f4d151edc63dff8"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cffbc50e0767396ed483900900dd58ce4351bc0d40e64bced8694bd41864cc71"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c038b9939da3035afb6cb2f465f18163e8f070aba0482923ecff9443def67178"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca366c2e2a54e2f663f4529b189fdeb6e14d419b1c78b754ec1744f3c01070d4"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c4c82b1689b23b1b5e6a603164ed2be41b6f6de292a698b98ba2381e889eb9d"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98f6ebe28831a482981ecfeedc8237047878424ad0c1add2c7f366ba44a20452"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4bd1a7676ee2a4c8e2f7f2550bece994f9f89e58afb96088964145a83af7408b"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec9139baa3f85b65adc700eafa03ed04995ca8533dd56c924f0e458ffec044ab"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:26de93e6495078b6af4c4d93a42ca067b16cc0e95699526c82ab7d1025b4d3bf"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f3a0bda83c18195c361b5500377d0767749f128564ca95b42c8849fd475bb327"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:63e4c175cbce8c3adc22dca5e6154588ae673f6c55374d156f3dac732c88d7de"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4dd3d8443970eaa02ab5ae45ce584b061f2799cd9f7e875190e2617440c1f9d4"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e5ddb2388610799fc46abe389600625058f2a73867e63e20107c5ad5ffa57c47"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-win32.whl", hash = "sha256:2e9be5d05cd960914024412b5406fb75a82f8562f45912ff86255acbfdbfb78e"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:47aca565a39c9a6067927871973ca827023e8b65ba6c5747f4c228c8d7ddc04f"}, - {file = "rapidfuzz-3.10.0-cp39-cp39-win_arm64.whl", hash = "sha256:b0732343cdc4273b5921268026dd7266f75466eb21873cb7635a200d9d9c3fac"}, - {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:f744b5eb1469bf92dd143d36570d2bdbbdc88fe5cb0b5405e53dd34f479cbd8a"}, - {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b67cc21a14327a0eb0f47bc3d7e59ec08031c7c55220ece672f9476e7a8068d3"}, - {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fe5783676f0afba4a522c80b15e99dbf4e393c149ab610308a8ef1f04c6bcc8"}, - {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4688862f957c8629d557d084f20b2d803f8738b6c4066802a0b1cc472e088d9"}, - {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20bd153aacc244e4c907d772c703fea82754c4db14f8aa64d75ff81b7b8ab92d"}, - {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:50484d563f8bfa723c74c944b0bb15b9e054db9c889348c8c307abcbee75ab92"}, - {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5897242d455461f2c5b82d7397b29341fd11e85bf3608a522177071044784ee8"}, - {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:116c71a81e046ba56551d8ab68067ca7034d94b617545316d460a452c5c3c289"}, - {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0a547e4350d1fa32624d3eab51eff8cf329f4cae110b4ea0402486b1da8be40"}, - {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:399b9b79ccfcf50ca3bad7692bc098bb8eade88d7d5e15773b7f866c91156d0c"}, - {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7947a425d1be3e744707ee58c6cb318b93a56e08f080722dcc0347e0b7a1bb9a"}, - {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:94c48b4a2a4b1d22246f48e2b11cae01ec7d23f0c9123f8bb822839ad79d0a88"}, - {file = "rapidfuzz-3.10.0.tar.gz", hash = "sha256:6b62af27e65bb39276a66533655a2fa3c60a487b03935721c45b7809527979be"}, +python-versions = ">=3.8" +files = [ + {file = "rapidfuzz-3.9.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ccf68e30b80e903f2309f90a438dbd640dd98e878eeb5ad361a288051ee5b75c"}, + {file = "rapidfuzz-3.9.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:696a79018ef989bf1c9abd9005841cee18005ccad4748bad8a4c274c47b6241a"}, + {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4eebf6c93af0ae866c22b403a84747580bb5c10f0d7b51c82a87f25405d4dcb"}, + {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e9125377fa3d21a8abd4fbdbcf1c27be73e8b1850f0b61b5b711364bf3b59db"}, + {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c12d180b17a22d107c8747de9c68d0b9c1d15dcda5445ff9bf9f4ccfb67c3e16"}, + {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1318d42610c26dcd68bd3279a1bf9e3605377260867c9a8ed22eafc1bd93a7c"}, + {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd5fa6e3c6e0333051c1f3a49f0807b3366f4131c8d6ac8c3e05fd0d0ce3755c"}, + {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:fcf79b686962d7bec458a0babc904cb4fa319808805e036b9d5a531ee6b9b835"}, + {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:8b01153c7466d0bad48fba77a303d5a768e66f24b763853469f47220b3de4661"}, + {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:94baaeea0b4f8632a6da69348b1e741043eba18d4e3088d674d3f76586b6223d"}, + {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6c5b32875646cb7f60c193ade99b2e4b124f19583492115293cd00f6fb198b17"}, + {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:110b6294396bc0a447648627479c9320f095c2034c0537f687592e0f58622638"}, + {file = "rapidfuzz-3.9.7-cp310-cp310-win32.whl", hash = "sha256:3445a35c4c8d288f2b2011eb61bce1227c633ce85a3154e727170f37c0266bb2"}, + {file = "rapidfuzz-3.9.7-cp310-cp310-win_amd64.whl", hash = "sha256:0d1415a732ee75e74a90af12020b77a0b396b36c60afae1bde3208a78cd2c9fc"}, + {file = "rapidfuzz-3.9.7-cp310-cp310-win_arm64.whl", hash = "sha256:836f4d88b8bd0fff2ebe815dcaab8aa6c8d07d1d566a7e21dd137cf6fe11ed5b"}, + {file = "rapidfuzz-3.9.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d098ce6162eb5e48fceb0745455bc950af059df6113eec83e916c129fca11408"}, + {file = "rapidfuzz-3.9.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:048d55d36c02c6685a2b2741688503c3d15149694506655b6169dcfd3b6c2585"}, + {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c33211cfff9aec425bb1bfedaf94afcf337063aa273754f22779d6dadebef4c2"}, + {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6d9db2fa4e9be171e9bb31cf2d2575574774966b43f5b951062bb2e67885852"}, + {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4e049d5ad61448c9a020d1061eba20944c4887d720c4069724beb6ea1692507"}, + {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cfa74aac64c85898b93d9c80bb935a96bf64985e28d4ee0f1a3d1f3bf11a5106"}, + {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:965693c2e9efd425b0f059f5be50ef830129f82892fa1858e220e424d9d0160f"}, + {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8501000a5eb8037c4b56857724797fe5a8b01853c363de91c8d0d0ad56bef319"}, + {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8d92c552c6b7577402afdd547dcf5d31ea6c8ae31ad03f78226e055cfa37f3c6"}, + {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:1ee2086f490cb501d86b7e386c1eb4e3a0ccbb0c99067089efaa8c79012c8952"}, + {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:1de91e7fd7f525e10ea79a6e62c559d1b0278ec097ad83d9da378b6fab65a265"}, + {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a4da514d13f4433e16960a17f05b67e0af30ac771719c9a9fb877e5004f74477"}, + {file = "rapidfuzz-3.9.7-cp311-cp311-win32.whl", hash = "sha256:a40184c67db8252593ec518e17fb8a6e86d7259dc9f2d6c0bf4ff4db8cf1ad4b"}, + {file = "rapidfuzz-3.9.7-cp311-cp311-win_amd64.whl", hash = "sha256:c4f28f1930b09a2c300357d8465b388cecb7e8b2f454a5d5425561710b7fd07f"}, + {file = "rapidfuzz-3.9.7-cp311-cp311-win_arm64.whl", hash = "sha256:675b75412a943bb83f1f53e2e54fd18c80ef15ed642dc6eb0382d1949419d904"}, + {file = "rapidfuzz-3.9.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1ef6a1a8f0b12f8722f595f15c62950c9a02d5abc64742561299ffd49f6c6944"}, + {file = "rapidfuzz-3.9.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:32532af1d70c6ec02ea5ac7ee2766dfff7c8ae8c761abfe8da9e527314e634e8"}, + {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1a38bade755aa9dd95a81cda949e1bf9cd92b79341ccc5e2189c9e7bdfc5ec"}, + {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d73ee2df41224c87336448d279b5b6a3a75f36e41dd3dcf538c0c9cce36360d8"}, + {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be3a1fc3e2ab3bdf93dc0c83c00acca8afd2a80602297d96cf4a0ba028333cdf"}, + {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:603f48f621272a448ff58bb556feb4371252a02156593303391f5c3281dfaeac"}, + {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:268f8e1ca50fc61c0736f3fe9d47891424adf62d96ed30196f30f4bd8216b41f"}, + {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5f8bf3f0d02935751d8660abda6044821a861f6229f7d359f98bcdcc7e66c39b"}, + {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b997ff3b39d4cee9fb025d6c46b0a24bd67595ce5a5b652a97fb3a9d60beb651"}, + {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ca66676c8ef6557f9b81c5b2b519097817a7c776a6599b8d6fcc3e16edd216fe"}, + {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:35d3044cb635ca6b1b2b7b67b3597bd19f34f1753b129eb6d2ae04cf98cd3945"}, + {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5a93c9e60904cb76e7aefef67afffb8b37c4894f81415ed513db090f29d01101"}, + {file = "rapidfuzz-3.9.7-cp312-cp312-win32.whl", hash = "sha256:579d107102c0725f7c79b4e79f16d3cf4d7c9208f29c66b064fa1fd4641d5155"}, + {file = "rapidfuzz-3.9.7-cp312-cp312-win_amd64.whl", hash = "sha256:953b3780765c8846866faf891ee4290f6a41a6dacf4fbcd3926f78c9de412ca6"}, + {file = "rapidfuzz-3.9.7-cp312-cp312-win_arm64.whl", hash = "sha256:7c20c1474b068c4bd45bf2fd0ad548df284f74e9a14a68b06746c56e3aa8eb70"}, + {file = "rapidfuzz-3.9.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fde81b1da9a947f931711febe2e2bee694e891f6d3e6aa6bc02c1884702aea19"}, + {file = "rapidfuzz-3.9.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:47e92c155a14f44511ea8ebcc6bc1535a1fe8d0a7d67ad3cc47ba61606df7bcf"}, + {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8772b745668260c5c4d069c678bbaa68812e6c69830f3771eaad521af7bc17f8"}, + {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:578302828dd97ee2ba507d2f71d62164e28d2fc7bc73aad0d2d1d2afc021a5d5"}, + {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc3e6081069eea61593f1d6839029da53d00c8c9b205c5534853eaa3f031085c"}, + {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0b1c2d504eddf97bc0f2eba422c8915576dbf025062ceaca2d68aecd66324ad9"}, + {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb76e5a21034f0307c51c5a2fc08856f698c53a4c593b17d291f7d6e9d09ca3"}, + {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d4ba2318ef670ce505f42881a5d2af70f948124646947341a3c6ccb33cd70369"}, + {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:057bb03f39e285047d7e9412e01ecf31bb2d42b9466a5409d715d587460dd59b"}, + {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:a8feac9006d5c9758438906f093befffc4290de75663dbb2098461df7c7d28dd"}, + {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:95b8292383e717e10455f2c917df45032b611141e43d1adf70f71b1566136b11"}, + {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e9fbf659537d246086d0297628b3795dc3e4a384101ecc01e5791c827b8d7345"}, + {file = "rapidfuzz-3.9.7-cp313-cp313-win32.whl", hash = "sha256:1dc516ac6d32027be2b0196bedf6d977ac26debd09ca182376322ad620460feb"}, + {file = "rapidfuzz-3.9.7-cp313-cp313-win_amd64.whl", hash = "sha256:b4f86e09d3064dca0b014cd48688964036a904a2d28048f00c8f4640796d06a8"}, + {file = "rapidfuzz-3.9.7-cp313-cp313-win_arm64.whl", hash = "sha256:19c64d8ddb2940b42a4567b23f1681af77f50a5ff6c9b8e85daba079c210716e"}, + {file = "rapidfuzz-3.9.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fbda3dd68d8b28ccb20ffb6f756fefd9b5ba570a772bedd7643ed441f5793308"}, + {file = "rapidfuzz-3.9.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2379e0b2578ad3ac7004f223251550f08bca873ff76c169b09410ec562ad78d8"}, + {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d1eff95362f993b0276fd3839aee48625b09aac8938bb0c23b40d219cba5dc5"}, + {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd9360e30041690912525a210e48a897b49b230768cc8af1c702e5395690464f"}, + {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a93cd834b3c315ab437f0565ee3a2f42dd33768dc885ccbabf9710b131cf70d2"}, + {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ff196996240db7075f62c7bc4506f40a3c80cd4ae3ab0e79ac6892283a90859"}, + {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:948dcee7aaa1cd14358b2a7ef08bf0be42bf89049c3a906669874a715fc2c937"}, + {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d95751f505a301af1aaf086c19f34536056d6c8efa91b2240de532a3db57b543"}, + {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:90db86fa196eecf96cb6db09f1083912ea945c50c57188039392d810d0b784e1"}, + {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:3171653212218a162540a3c8eb8ae7d3dcc8548540b69eaecaf3b47c14d89c90"}, + {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:36dd6e820379c37a1ffefc8a52b648758e867cd9d78ee5b5dc0c9a6a10145378"}, + {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:7b702de95666a1f7d5c6b47eacadfe2d2794af3742d63d2134767d13e5d1c713"}, + {file = "rapidfuzz-3.9.7-cp38-cp38-win32.whl", hash = "sha256:9030e7238c0df51aed5c9c5ed8eee2bdd47a2ae788e562c1454af2851c3d1906"}, + {file = "rapidfuzz-3.9.7-cp38-cp38-win_amd64.whl", hash = "sha256:f847fb0fbfb72482b1c05c59cbb275c58a55b73708a7f77a83f8035ee3c86497"}, + {file = "rapidfuzz-3.9.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:97f2ce529d2a70a60c290f6ab269a2bbf1d3b47b9724dccc84339b85f7afb044"}, + {file = "rapidfuzz-3.9.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e2957fdad10bb83b1982b02deb3604a3f6911a5e545f518b59c741086f92d152"}, + {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d5262383634626eb45c536017204b8163a03bc43bda880cf1bdd7885db9a163"}, + {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:364587827d7cbd41afa0782adc2d2d19e3f07d355b0750a02a8e33ad27a9c368"}, + {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecc24af7f905f3d6efb371a01680116ffea8d64e266618fb9ad1602a9b4f7934"}, + {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9dc86aa6b29d174713c5f4caac35ffb7f232e3e649113e8d13812b35ab078228"}, + {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3dcfbe7266e74a707173a12a7b355a531f2dcfbdb32f09468e664330da14874"}, + {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b23806fbdd6b510ba9ac93bb72d503066263b0fba44b71b835be9f063a84025f"}, + {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:5551d68264c1bb6943f542da83a4dc8940ede52c5847ef158698799cc28d14f5"}, + {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:13d8675a1fa7e2b19650ca7ef9a6ec01391d4bb12ab9e0793e8eb024538b4a34"}, + {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9b6a5de507b9be6de688dae40143b656f7a93b10995fb8bd90deb555e7875c60"}, + {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:111a20a3c090cf244d9406e60500b6c34b2375ba3a5009e2b38fd806fe38e337"}, + {file = "rapidfuzz-3.9.7-cp39-cp39-win32.whl", hash = "sha256:22589c0b8ccc6c391ce7f776c93a8c92c96ab8d34e1a19f1bd2b12a235332632"}, + {file = "rapidfuzz-3.9.7-cp39-cp39-win_amd64.whl", hash = "sha256:6f83221db5755b8f34222e40607d87f1176a8d5d4dbda4a55a0f0b67d588a69c"}, + {file = "rapidfuzz-3.9.7-cp39-cp39-win_arm64.whl", hash = "sha256:3665b92e788578c3bb334bd5b5fa7ee1a84bafd68be438e3110861d1578c63a0"}, + {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:d7df9c2194c7ec930b33c991c55dbd0c10951bd25800c0b7a7b571994ebbced5"}, + {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:68bd888eafd07b09585dcc8bc2716c5ecdb7eed62827470664d25588982b2873"}, + {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1230e0f9026851a6a432beaa0ce575dda7b39fe689b576f99a0704fbb81fc9c"}, + {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3b36e1c61b796ae1777f3e9e11fd39898b09d351c9384baf6e3b7e6191d8ced"}, + {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9dba13d86806fcf3fe9c9919f58575e0090eadfb89c058bde02bcc7ab24e4548"}, + {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1f1a33e84056b7892c721d84475d3bde49a145126bc4c6efe0d6d0d59cb31c29"}, + {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3492c7a42b7fa9f0051d7fcce9893e95ed91c97c9ec7fb64346f3e070dd318ed"}, + {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:ece45eb2af8b00f90d10f7419322e8804bd42fb1129026f9bfe712c37508b514"}, + {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcd14cf4876f04b488f6e54a7abd3e9b31db5f5a6aba0ce90659917aaa8c088"}, + {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:521c58c72ed8a612b25cda378ff10dee17e6deb4ee99a070b723519a345527b9"}, + {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18669bb6cdf7d40738526d37e550df09ba065b5a7560f3d802287988b6cb63cf"}, + {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7abe2dbae81120a64bb4f8d3fcafe9122f328c9f86d7f327f174187a5af4ed86"}, + {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a3c0783910911f4f24655826d007c9f4360f08107410952c01ee3df98c713eb2"}, + {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:03126f9a040ff21d2a110610bfd6b93b79377ce8b4121edcb791d61b7df6eec5"}, + {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:591908240f4085e2ade5b685c6e8346e2ed44932cffeaac2fb32ddac95b55c7f"}, + {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9012d86c6397edbc9da4ac0132de7f8ee9d6ce857f4194d5684c4ddbcdd1c5c"}, + {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df596ddd3db38aa513d4c0995611267b3946e7cbe5a8761b50e9306dfec720ee"}, + {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3ed5adb752f4308fcc8f4fb6f8eb7aa4082f9d12676fda0a74fa5564242a8107"}, + {file = "rapidfuzz-3.9.7.tar.gz", hash = "sha256:f1c7296534c1afb6f495aa95871f14ccdc197c6db42965854e483100df313030"}, ] [package.extras] -all = ["numpy"] +full = ["numpy"] [[package]] name = "regex" -version = "2024.9.11" +version = "2024.7.24" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" files = [ - {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1494fa8725c285a81d01dc8c06b55287a1ee5e0e382d8413adc0a9197aac6408"}, - {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0e12c481ad92d129c78f13a2a3662317e46ee7ef96c94fd332e1c29131875b7d"}, - {file = "regex-2024.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16e13a7929791ac1216afde26f712802e3df7bf0360b32e4914dca3ab8baeea5"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46989629904bad940bbec2106528140a218b4a36bb3042d8406980be1941429c"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a906ed5e47a0ce5f04b2c981af1c9acf9e8696066900bf03b9d7879a6f679fc8"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a091b0550b3b0207784a7d6d0f1a00d1d1c8a11699c1a4d93db3fbefc3ad35"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ddcd9a179c0a6fa8add279a4444015acddcd7f232a49071ae57fa6e278f1f71"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b41e1adc61fa347662b09398e31ad446afadff932a24807d3ceb955ed865cc8"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ced479f601cd2f8ca1fd7b23925a7e0ad512a56d6e9476f79b8f381d9d37090a"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:635a1d96665f84b292e401c3d62775851aedc31d4f8784117b3c68c4fcd4118d"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c0256beda696edcf7d97ef16b2a33a8e5a875affd6fa6567b54f7c577b30a137"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:3ce4f1185db3fbde8ed8aa223fc9620f276c58de8b0d4f8cc86fd1360829edb6"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:09d77559e80dcc9d24570da3745ab859a9cf91953062e4ab126ba9d5993688ca"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7a22ccefd4db3f12b526eccb129390942fe874a3a9fdbdd24cf55773a1faab1a"}, - {file = "regex-2024.9.11-cp310-cp310-win32.whl", hash = "sha256:f745ec09bc1b0bd15cfc73df6fa4f726dcc26bb16c23a03f9e3367d357eeedd0"}, - {file = "regex-2024.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:01c2acb51f8a7d6494c8c5eafe3d8e06d76563d8a8a4643b37e9b2dd8a2ff623"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2cce2449e5927a0bf084d346da6cd5eb016b2beca10d0013ab50e3c226ffc0df"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b37fa423beefa44919e009745ccbf353d8c981516e807995b2bd11c2c77d268"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64ce2799bd75039b480cc0360907c4fb2f50022f030bf9e7a8705b636e408fad"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4cc92bb6db56ab0c1cbd17294e14f5e9224f0cc6521167ef388332604e92679"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d05ac6fa06959c4172eccd99a222e1fbf17b5670c4d596cb1e5cde99600674c4"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:040562757795eeea356394a7fb13076ad4f99d3c62ab0f8bdfb21f99a1f85664"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6113c008a7780792efc80f9dfe10ba0cd043cbf8dc9a76ef757850f51b4edc50"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e5fb5f77c8745a60105403a774fe2c1759b71d3e7b4ca237a5e67ad066c7199"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:54d9ff35d4515debf14bc27f1e3b38bfc453eff3220f5bce159642fa762fe5d4"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:df5cbb1fbc74a8305b6065d4ade43b993be03dbe0f8b30032cced0d7740994bd"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7fb89ee5d106e4a7a51bce305ac4efb981536301895f7bdcf93ec92ae0d91c7f"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a738b937d512b30bf75995c0159c0ddf9eec0775c9d72ac0202076c72f24aa96"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e28f9faeb14b6f23ac55bfbbfd3643f5c7c18ede093977f1df249f73fd22c7b1"}, - {file = "regex-2024.9.11-cp311-cp311-win32.whl", hash = "sha256:18e707ce6c92d7282dfce370cd205098384b8ee21544e7cb29b8aab955b66fa9"}, - {file = "regex-2024.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:313ea15e5ff2a8cbbad96ccef6be638393041b0a7863183c2d31e0c6116688cf"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b0d0a6c64fcc4ef9c69bd5b3b3626cc3776520a1637d8abaa62b9edc147a58f7"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:49b0e06786ea663f933f3710a51e9385ce0cba0ea56b67107fd841a55d56a231"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5b513b6997a0b2f10e4fd3a1313568e373926e8c252bd76c960f96fd039cd28d"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee439691d8c23e76f9802c42a95cfeebf9d47cf4ffd06f18489122dbb0a7ad64"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8f877c89719d759e52783f7fe6e1c67121076b87b40542966c02de5503ace42"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23b30c62d0f16827f2ae9f2bb87619bc4fba2044911e2e6c2eb1af0161cdb766"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85ab7824093d8f10d44330fe1e6493f756f252d145323dd17ab6b48733ff6c0a"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dee5b4810a89447151999428fe096977346cf2f29f4d5e29609d2e19e0199c9"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98eeee2f2e63edae2181c886d7911ce502e1292794f4c5ee71e60e23e8d26b5d"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:57fdd2e0b2694ce6fc2e5ccf189789c3e2962916fb38779d3e3521ff8fe7a822"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d552c78411f60b1fdaafd117a1fca2f02e562e309223b9d44b7de8be451ec5e0"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a0b2b80321c2ed3fcf0385ec9e51a12253c50f146fddb2abbb10f033fe3d049a"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:18406efb2f5a0e57e3a5881cd9354c1512d3bb4f5c45d96d110a66114d84d23a"}, - {file = "regex-2024.9.11-cp312-cp312-win32.whl", hash = "sha256:e464b467f1588e2c42d26814231edecbcfe77f5ac414d92cbf4e7b55b2c2a776"}, - {file = "regex-2024.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:9e8719792ca63c6b8340380352c24dcb8cd7ec49dae36e963742a275dfae6009"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c157bb447303070f256e084668b702073db99bbb61d44f85d811025fcf38f784"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4db21ece84dfeefc5d8a3863f101995de646c6cb0536952c321a2650aa202c36"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:220e92a30b426daf23bb67a7962900ed4613589bab80382be09b48896d211e92"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1ae19e64c14c7ec1995f40bd932448713d3c73509e82d8cd7744dc00e29e86"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f47cd43a5bfa48f86925fe26fbdd0a488ff15b62468abb5d2a1e092a4fb10e85"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9d4a76b96f398697fe01117093613166e6aa8195d63f1b4ec3f21ab637632963"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ea51dcc0835eea2ea31d66456210a4e01a076d820e9039b04ae8d17ac11dee6"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7aaa315101c6567a9a45d2839322c51c8d6e81f67683d529512f5bcfb99c802"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c57d08ad67aba97af57a7263c2d9006d5c404d721c5f7542f077f109ec2a4a29"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8404bf61298bb6f8224bb9176c1424548ee1181130818fcd2cbffddc768bed8"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dd4490a33eb909ef5078ab20f5f000087afa2a4daa27b4c072ccb3cb3050ad84"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:eee9130eaad130649fd73e5cd92f60e55708952260ede70da64de420cdcad554"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a2644a93da36c784e546de579ec1806bfd2763ef47babc1b03d765fe560c9f8"}, - {file = "regex-2024.9.11-cp313-cp313-win32.whl", hash = "sha256:e997fd30430c57138adc06bba4c7c2968fb13d101e57dd5bb9355bf8ce3fa7e8"}, - {file = "regex-2024.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:042c55879cfeb21a8adacc84ea347721d3d83a159da6acdf1116859e2427c43f"}, - {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:35f4a6f96aa6cb3f2f7247027b07b15a374f0d5b912c0001418d1d55024d5cb4"}, - {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:55b96e7ce3a69a8449a66984c268062fbaa0d8ae437b285428e12797baefce7e"}, - {file = "regex-2024.9.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb130fccd1a37ed894824b8c046321540263013da72745d755f2d35114b81a60"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:323c1f04be6b2968944d730e5c2091c8c89767903ecaa135203eec4565ed2b2b"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be1c8ed48c4c4065ecb19d882a0ce1afe0745dfad8ce48c49586b90a55f02366"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5b029322e6e7b94fff16cd120ab35a253236a5f99a79fb04fda7ae71ca20ae8"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6fff13ef6b5f29221d6904aa816c34701462956aa72a77f1f151a8ec4f56aeb"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:587d4af3979376652010e400accc30404e6c16b7df574048ab1f581af82065e4"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:079400a8269544b955ffa9e31f186f01d96829110a3bf79dc338e9910f794fca"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f9268774428ec173654985ce55fc6caf4c6d11ade0f6f914d48ef4719eb05ebb"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:23f9985c8784e544d53fc2930fc1ac1a7319f5d5332d228437acc9f418f2f168"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2941333154baff9838e88aa71c1d84f4438189ecc6021a12c7573728b5838e"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e93f1c331ca8e86fe877a48ad64e77882c0c4da0097f2212873a69bbfea95d0c"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:846bc79ee753acf93aef4184c040d709940c9d001029ceb7b7a52747b80ed2dd"}, - {file = "regex-2024.9.11-cp38-cp38-win32.whl", hash = "sha256:c94bb0a9f1db10a1d16c00880bdebd5f9faf267273b8f5bd1878126e0fbde771"}, - {file = "regex-2024.9.11-cp38-cp38-win_amd64.whl", hash = "sha256:2b08fce89fbd45664d3df6ad93e554b6c16933ffa9d55cb7e01182baaf971508"}, - {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:07f45f287469039ffc2c53caf6803cd506eb5f5f637f1d4acb37a738f71dd066"}, - {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4838e24ee015101d9f901988001038f7f0d90dc0c3b115541a1365fb439add62"}, - {file = "regex-2024.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6edd623bae6a737f10ce853ea076f56f507fd7726bee96a41ee3d68d347e4d16"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c69ada171c2d0e97a4b5aa78fbb835e0ffbb6b13fc5da968c09811346564f0d3"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02087ea0a03b4af1ed6ebab2c54d7118127fee8d71b26398e8e4b05b78963199"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:69dee6a020693d12a3cf892aba4808fe168d2a4cef368eb9bf74f5398bfd4ee8"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:297f54910247508e6e5cae669f2bc308985c60540a4edd1c77203ef19bfa63ca"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ecea58b43a67b1b79805f1a0255730edaf5191ecef84dbc4cc85eb30bc8b63b9"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eab4bb380f15e189d1313195b062a6aa908f5bd687a0ceccd47c8211e9cf0d4a"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0cbff728659ce4bbf4c30b2a1be040faafaa9eca6ecde40aaff86f7889f4ab39"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:54c4a097b8bc5bb0dfc83ae498061d53ad7b5762e00f4adaa23bee22b012e6ba"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:73d6d2f64f4d894c96626a75578b0bf7d9e56dcda8c3d037a2118fdfe9b1c664"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:e53b5fbab5d675aec9f0c501274c467c0f9a5d23696cfc94247e1fb56501ed89"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0ffbcf9221e04502fc35e54d1ce9567541979c3fdfb93d2c554f0ca583a19b35"}, - {file = "regex-2024.9.11-cp39-cp39-win32.whl", hash = "sha256:e4c22e1ac1f1ec1e09f72e6c44d8f2244173db7eb9629cc3a346a8d7ccc31142"}, - {file = "regex-2024.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:faa3c142464efec496967359ca99696c896c591c56c53506bac1ad465f66e919"}, - {file = "regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd"}, + {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b0d3f567fafa0633aee87f08b9276c7062da9616931382993c03808bb68ce"}, + {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3426de3b91d1bc73249042742f45c2148803c111d1175b283270177fdf669024"}, + {file = "regex-2024.7.24-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f273674b445bcb6e4409bf8d1be67bc4b58e8b46fd0d560055d515b8830063cd"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23acc72f0f4e1a9e6e9843d6328177ae3074b4182167e34119ec7233dfeccf53"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65fd3d2e228cae024c411c5ccdffae4c315271eee4a8b839291f84f796b34eca"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c414cbda77dbf13c3bc88b073a1a9f375c7b0cb5e115e15d4b73ec3a2fbc6f59"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf7a89eef64b5455835f5ed30254ec19bf41f7541cd94f266ab7cbd463f00c41"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19c65b00d42804e3fbea9708f0937d157e53429a39b7c61253ff15670ff62cb5"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7a5486ca56c8869070a966321d5ab416ff0f83f30e0e2da1ab48815c8d165d46"}, + {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6f51f9556785e5a203713f5efd9c085b4a45aecd2a42573e2b5041881b588d1f"}, + {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:a4997716674d36a82eab3e86f8fa77080a5d8d96a389a61ea1d0e3a94a582cf7"}, + {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c0abb5e4e8ce71a61d9446040c1e86d4e6d23f9097275c5bd49ed978755ff0fe"}, + {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:18300a1d78cf1290fa583cd8b7cde26ecb73e9f5916690cf9d42de569c89b1ce"}, + {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:416c0e4f56308f34cdb18c3f59849479dde5b19febdcd6e6fa4d04b6c31c9faa"}, + {file = "regex-2024.7.24-cp310-cp310-win32.whl", hash = "sha256:fb168b5924bef397b5ba13aabd8cf5df7d3d93f10218d7b925e360d436863f66"}, + {file = "regex-2024.7.24-cp310-cp310-win_amd64.whl", hash = "sha256:6b9fc7e9cc983e75e2518496ba1afc524227c163e43d706688a6bb9eca41617e"}, + {file = "regex-2024.7.24-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:382281306e3adaaa7b8b9ebbb3ffb43358a7bbf585fa93821300a418bb975281"}, + {file = "regex-2024.7.24-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4fdd1384619f406ad9037fe6b6eaa3de2749e2e12084abc80169e8e075377d3b"}, + {file = "regex-2024.7.24-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3d974d24edb231446f708c455fd08f94c41c1ff4f04bcf06e5f36df5ef50b95a"}, + {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2ec4419a3fe6cf8a4795752596dfe0adb4aea40d3683a132bae9c30b81e8d73"}, + {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb563dd3aea54c797adf513eeec819c4213d7dbfc311874eb4fd28d10f2ff0f2"}, + {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:45104baae8b9f67569f0f1dca5e1f1ed77a54ae1cd8b0b07aba89272710db61e"}, + {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:994448ee01864501912abf2bad9203bffc34158e80fe8bfb5b031f4f8e16da51"}, + {file = "regex-2024.7.24-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3fac296f99283ac232d8125be932c5cd7644084a30748fda013028c815ba3364"}, + {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7e37e809b9303ec3a179085415cb5f418ecf65ec98cdfe34f6a078b46ef823ee"}, + {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:01b689e887f612610c869421241e075c02f2e3d1ae93a037cb14f88ab6a8934c"}, + {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f6442f0f0ff81775eaa5b05af8a0ffa1dda36e9cf6ec1e0d3d245e8564b684ce"}, + {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:871e3ab2838fbcb4e0865a6e01233975df3a15e6fce93b6f99d75cacbd9862d1"}, + {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c918b7a1e26b4ab40409820ddccc5d49871a82329640f5005f73572d5eaa9b5e"}, + {file = "regex-2024.7.24-cp311-cp311-win32.whl", hash = "sha256:2dfbb8baf8ba2c2b9aa2807f44ed272f0913eeeba002478c4577b8d29cde215c"}, + {file = "regex-2024.7.24-cp311-cp311-win_amd64.whl", hash = "sha256:538d30cd96ed7d1416d3956f94d54e426a8daf7c14527f6e0d6d425fcb4cca52"}, + {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fe4ebef608553aff8deb845c7f4f1d0740ff76fa672c011cc0bacb2a00fbde86"}, + {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:74007a5b25b7a678459f06559504f1eec2f0f17bca218c9d56f6a0a12bfffdad"}, + {file = "regex-2024.7.24-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7df9ea48641da022c2a3c9c641650cd09f0cd15e8908bf931ad538f5ca7919c9"}, + {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a1141a1dcc32904c47f6846b040275c6e5de0bf73f17d7a409035d55b76f289"}, + {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80c811cfcb5c331237d9bad3bea2c391114588cf4131707e84d9493064d267f9"}, + {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7214477bf9bd195894cf24005b1e7b496f46833337b5dedb7b2a6e33f66d962c"}, + {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d55588cba7553f0b6ec33130bc3e114b355570b45785cebdc9daed8c637dd440"}, + {file = "regex-2024.7.24-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:558a57cfc32adcf19d3f791f62b5ff564922942e389e3cfdb538a23d65a6b610"}, + {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a512eed9dfd4117110b1881ba9a59b31433caed0c4101b361f768e7bcbaf93c5"}, + {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:86b17ba823ea76256b1885652e3a141a99a5c4422f4a869189db328321b73799"}, + {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5eefee9bfe23f6df09ffb6dfb23809f4d74a78acef004aa904dc7c88b9944b05"}, + {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:731fcd76bbdbf225e2eb85b7c38da9633ad3073822f5ab32379381e8c3c12e94"}, + {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eaef80eac3b4cfbdd6de53c6e108b4c534c21ae055d1dbea2de6b3b8ff3def38"}, + {file = "regex-2024.7.24-cp312-cp312-win32.whl", hash = "sha256:185e029368d6f89f36e526764cf12bf8d6f0e3a2a7737da625a76f594bdfcbfc"}, + {file = "regex-2024.7.24-cp312-cp312-win_amd64.whl", hash = "sha256:2f1baff13cc2521bea83ab2528e7a80cbe0ebb2c6f0bfad15be7da3aed443908"}, + {file = "regex-2024.7.24-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:66b4c0731a5c81921e938dcf1a88e978264e26e6ac4ec96a4d21ae0354581ae0"}, + {file = "regex-2024.7.24-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:88ecc3afd7e776967fa16c80f974cb79399ee8dc6c96423321d6f7d4b881c92b"}, + {file = "regex-2024.7.24-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:64bd50cf16bcc54b274e20235bf8edbb64184a30e1e53873ff8d444e7ac656b2"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb462f0e346fcf41a901a126b50f8781e9a474d3927930f3490f38a6e73b6950"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a82465ebbc9b1c5c50738536fdfa7cab639a261a99b469c9d4c7dcbb2b3f1e57"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:68a8f8c046c6466ac61a36b65bb2395c74451df2ffb8458492ef49900efed293"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac8e84fff5d27420f3c1e879ce9929108e873667ec87e0c8eeb413a5311adfe"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba2537ef2163db9e6ccdbeb6f6424282ae4dea43177402152c67ef869cf3978b"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:43affe33137fcd679bdae93fb25924979517e011f9dea99163f80b82eadc7e53"}, + {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:c9bb87fdf2ab2370f21e4d5636e5317775e5d51ff32ebff2cf389f71b9b13750"}, + {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:945352286a541406f99b2655c973852da7911b3f4264e010218bbc1cc73168f2"}, + {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:8bc593dcce679206b60a538c302d03c29b18e3d862609317cb560e18b66d10cf"}, + {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3f3b6ca8eae6d6c75a6cff525c8530c60e909a71a15e1b731723233331de4169"}, + {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c51edc3541e11fbe83f0c4d9412ef6c79f664a3745fab261457e84465ec9d5a8"}, + {file = "regex-2024.7.24-cp38-cp38-win32.whl", hash = "sha256:d0a07763776188b4db4c9c7fb1b8c494049f84659bb387b71c73bbc07f189e96"}, + {file = "regex-2024.7.24-cp38-cp38-win_amd64.whl", hash = "sha256:8fd5afd101dcf86a270d254364e0e8dddedebe6bd1ab9d5f732f274fa00499a5"}, + {file = "regex-2024.7.24-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0ffe3f9d430cd37d8fa5632ff6fb36d5b24818c5c986893063b4e5bdb84cdf24"}, + {file = "regex-2024.7.24-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:25419b70ba00a16abc90ee5fce061228206173231f004437730b67ac77323f0d"}, + {file = "regex-2024.7.24-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:33e2614a7ce627f0cdf2ad104797d1f68342d967de3695678c0cb84f530709f8"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d33a0021893ede5969876052796165bab6006559ab845fd7b515a30abdd990dc"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04ce29e2c5fedf296b1a1b0acc1724ba93a36fb14031f3abfb7abda2806c1535"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b16582783f44fbca6fcf46f61347340c787d7530d88b4d590a397a47583f31dd"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:836d3cc225b3e8a943d0b02633fb2f28a66e281290302a79df0e1eaa984ff7c1"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:438d9f0f4bc64e8dea78274caa5af971ceff0f8771e1a2333620969936ba10be"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:973335b1624859cb0e52f96062a28aa18f3a5fc77a96e4a3d6d76e29811a0e6e"}, + {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c5e69fd3eb0b409432b537fe3c6f44ac089c458ab6b78dcec14478422879ec5f"}, + {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fbf8c2f00904eaf63ff37718eb13acf8e178cb940520e47b2f05027f5bb34ce3"}, + {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2757ace61bc4061b69af19e4689fa4416e1a04840f33b441034202b5cd02d4"}, + {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:44fc61b99035fd9b3b9453f1713234e5a7c92a04f3577252b45feefe1b327759"}, + {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:84c312cdf839e8b579f504afcd7b65f35d60b6285d892b19adea16355e8343c9"}, + {file = "regex-2024.7.24-cp39-cp39-win32.whl", hash = "sha256:ca5b2028c2f7af4e13fb9fc29b28d0ce767c38c7facdf64f6c2cd040413055f1"}, + {file = "regex-2024.7.24-cp39-cp39-win_amd64.whl", hash = "sha256:7c479f5ae937ec9985ecaf42e2e10631551d909f203e31308c12d703922742f9"}, + {file = "regex-2024.7.24.tar.gz", hash = "sha256:9cfd009eed1a46b27c14039ad5bbc5e71b6367c5b2e6d5f5da0ea91600817506"}, ] [[package]] @@ -3427,21 +3391,39 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +[[package]] +name = "requests-oauthlib" +version = "2.0.0" +description = "OAuthlib authentication support for Requests." +optional = false +python-versions = ">=3.4" +files = [ + {file = "requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9"}, + {file = "requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36"}, +] + +[package.dependencies] +oauthlib = ">=3.0.0" +requests = ">=2.0.0" + +[package.extras] +rsa = ["oauthlib[signedtoken] (>=3.0.0)"] + [[package]] name = "rich" -version = "13.9.2" +version = "13.8.0" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false -python-versions = ">=3.8.0" +python-versions = ">=3.7.0" files = [ - {file = "rich-13.9.2-py3-none-any.whl", hash = "sha256:8c82a3d3f8dcfe9e734771313e606b39d8247bb6b826e196f4914b333b743cf1"}, - {file = "rich-13.9.2.tar.gz", hash = "sha256:51a2c62057461aaf7152b4d611168f93a9fc73068f8ded2790f29fe2b5366d0c"}, + {file = "rich-13.8.0-py3-none-any.whl", hash = "sha256:2e85306a063b9492dffc86278197a60cbece75bcb766022f3436f567cae11bdc"}, + {file = "rich-13.8.0.tar.gz", hash = "sha256:a5ac1f1cd448ade0d59cc3356f7db7a7ccda2c8cbae9c7a90c28ff463d3e91f4"}, ] [package.dependencies] markdown-it-py = ">=2.2.0" pygments = ">=2.13.0,<3.0.0" -typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} [package.extras] jupyter = ["ipywidgets (>=7.5.1,<9)"] @@ -3462,153 +3444,148 @@ pyasn1 = ">=0.1.3" [[package]] name = "scikit-image" -version = "0.24.0" +version = "0.21.0" description = "Image processing in Python" optional = false -python-versions = ">=3.9" -files = [ - {file = "scikit_image-0.24.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cb3bc0264b6ab30b43c4179ee6156bc18b4861e78bb329dd8d16537b7bbf827a"}, - {file = "scikit_image-0.24.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:9c7a52e20cdd760738da38564ba1fed7942b623c0317489af1a598a8dedf088b"}, - {file = "scikit_image-0.24.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93f46e6ce42e5409f4d09ce1b0c7f80dd7e4373bcec635b6348b63e3c886eac8"}, - {file = "scikit_image-0.24.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39ee0af13435c57351a3397eb379e72164ff85161923eec0c38849fecf1b4764"}, - {file = "scikit_image-0.24.0-cp310-cp310-win_amd64.whl", hash = "sha256:7ac7913b028b8aa780ffae85922894a69e33d1c0bf270ea1774f382fe8bf95e7"}, - {file = "scikit_image-0.24.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:272909e02a59cea3ed4aa03739bb88df2625daa809f633f40b5053cf09241831"}, - {file = "scikit_image-0.24.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:190ebde80b4470fe8838764b9b15f232a964f1a20391663e31008d76f0c696f7"}, - {file = "scikit_image-0.24.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59c98cc695005faf2b79904e4663796c977af22586ddf1b12d6af2fa22842dc2"}, - {file = "scikit_image-0.24.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa27b3a0dbad807b966b8db2d78da734cb812ca4787f7fbb143764800ce2fa9c"}, - {file = "scikit_image-0.24.0-cp311-cp311-win_amd64.whl", hash = "sha256:dacf591ac0c272a111181afad4b788a27fe70d213cfddd631d151cbc34f8ca2c"}, - {file = "scikit_image-0.24.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6fccceb54c9574590abcddc8caf6cefa57c13b5b8b4260ab3ff88ad8f3c252b3"}, - {file = "scikit_image-0.24.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ccc01e4760d655aab7601c1ba7aa4ddd8b46f494ac46ec9c268df6f33ccddf4c"}, - {file = "scikit_image-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18836a18d3a7b6aca5376a2d805f0045826bc6c9fc85331659c33b4813e0b563"}, - {file = "scikit_image-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8579bda9c3f78cb3b3ed8b9425213c53a25fa7e994b7ac01f2440b395babf660"}, - {file = "scikit_image-0.24.0-cp312-cp312-win_amd64.whl", hash = "sha256:82ab903afa60b2da1da2e6f0c8c65e7c8868c60a869464c41971da929b3e82bc"}, - {file = "scikit_image-0.24.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ef04360eda372ee5cd60aebe9be91258639c86ae2ea24093fb9182118008d009"}, - {file = "scikit_image-0.24.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e9aadb442360a7e76f0c5c9d105f79a83d6df0e01e431bd1d5757e2c5871a1f3"}, - {file = "scikit_image-0.24.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e37de6f4c1abcf794e13c258dc9b7d385d5be868441de11c180363824192ff7"}, - {file = "scikit_image-0.24.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4688c18bd7ec33c08d7bf0fd19549be246d90d5f2c1d795a89986629af0a1e83"}, - {file = "scikit_image-0.24.0-cp39-cp39-win_amd64.whl", hash = "sha256:56dab751d20b25d5d3985e95c9b4e975f55573554bd76b0aedf5875217c93e69"}, - {file = "scikit_image-0.24.0.tar.gz", hash = "sha256:5d16efe95da8edbeb363e0c4157b99becbd650a60b77f6e3af5768b66cf007ab"}, +python-versions = ">=3.8" +files = [ + {file = "scikit_image-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:978ac3302252155a8556cdfe067bad2d18d5ccef4e91c2f727bc564ed75566bc"}, + {file = "scikit_image-0.21.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:82c22e008527e5ee26ab08e3ce919998ef164d538ff30b9e5764b223cfda06b1"}, + {file = "scikit_image-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd29d2631d3e975c377066acfc1f4cb2cc95e2257cf70e7fedfcb96441096e88"}, + {file = "scikit_image-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6c12925ceb9f3aede555921e26642d601b2d37d1617002a2636f2cb5178ae2f"}, + {file = "scikit_image-0.21.0-cp310-cp310-win_amd64.whl", hash = "sha256:1f538d4de77e4f3225d068d9ea2965bed3f7dda7f457a8f89634fa22ffb9ad8c"}, + {file = "scikit_image-0.21.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ec9bab6920ac43037d7434058b67b5778d42c60f67b8679239f48a471e7ed6f8"}, + {file = "scikit_image-0.21.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:a54720430dba833ffbb6dedd93d9f0938c5dd47d20ab9ba3e4e61c19d95f6f19"}, + {file = "scikit_image-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e40dd102da14cdadc09210f930b4556c90ff8f99cd9d8bcccf9f73a86c44245"}, + {file = "scikit_image-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff5719c7eb99596a39c3e1d9b564025bae78ecf1da3ee6842d34f6965b5f1474"}, + {file = "scikit_image-0.21.0-cp311-cp311-win_amd64.whl", hash = "sha256:146c3824253eee9ff346c4ad10cb58376f91aefaf4a4bb2fe11aa21691f7de76"}, + {file = "scikit_image-0.21.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4e1b09f81a99c9c390215929194847b3cd358550b4b65bb6e42c5393d69cb74a"}, + {file = "scikit_image-0.21.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:9f7b5fb4a22f0d5ae0fa13beeb887c925280590145cd6d8b2630794d120ff7c7"}, + {file = "scikit_image-0.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4814033717f0b6491fee252facb9df92058d6a72ab78dd6408a50f3915a88b8"}, + {file = "scikit_image-0.21.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b0d6ed6502cca0c9719c444caafa0b8cda0f9e29e01ca42f621a240073284be"}, + {file = "scikit_image-0.21.0-cp38-cp38-win_amd64.whl", hash = "sha256:9194cb7bc21215fde6c1b1e9685d312d2aa8f65ea9736bc6311126a91c860032"}, + {file = "scikit_image-0.21.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54df1ddc854f37a912d42bc724e456e86858107e94048a81a27720bc588f9937"}, + {file = "scikit_image-0.21.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:c01e3ab0a1fabfd8ce30686d4401b7ed36e6126c9d4d05cb94abf6bdc46f7ac9"}, + {file = "scikit_image-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ef5d8d1099317b7b315b530348cbfa68ab8ce32459de3c074d204166951025c"}, + {file = "scikit_image-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b1e96c59cab640ca5c5b22c501524cfaf34cbe0cb51ba73bd9a9ede3fb6e1d"}, + {file = "scikit_image-0.21.0-cp39-cp39-win_amd64.whl", hash = "sha256:9cffcddd2a5594c0a06de2ae3e1e25d662745a26f94fda31520593669677c010"}, + {file = "scikit_image-0.21.0.tar.gz", hash = "sha256:b33e823c54e6f11873ea390ee49ef832b82b9f70752c8759efd09d5a4e3d87f0"}, ] [package.dependencies] -imageio = ">=2.33" -lazy-loader = ">=0.4" +imageio = ">=2.27" +lazy_loader = ">=0.2" networkx = ">=2.8" -numpy = ">=1.23" +numpy = ">=1.21.1" packaging = ">=21" -pillow = ">=9.1" -scipy = ">=1.9" +pillow = ">=9.0.1" +PyWavelets = ">=1.1.1" +scipy = ">=1.8" tifffile = ">=2022.8.12" [package.extras] -build = ["Cython (>=3.0.4)", "build", "meson-python (>=0.15)", "ninja", "numpy (>=2.0.0rc1)", "packaging (>=21)", "pythran", "setuptools (>=67)", "spin (==0.8)", "wheel"] +build = ["Cython (>=0.29.32)", "build", "meson-python (>=0.13)", "ninja", "numpy (>=1.21.1)", "packaging (>=21)", "pythran", "setuptools (>=67)", "spin (==0.3)", "wheel"] data = ["pooch (>=1.6.0)"] -developer = ["ipython", "pre-commit", "tomli"] -docs = ["PyWavelets (>=1.1.1)", "dask[array] (>=2022.9.2)", "ipykernel", "ipywidgets", "kaleido", "matplotlib (>=3.6)", "myst-parser", "numpydoc (>=1.7)", "pandas (>=1.5)", "plotly (>=5.10)", "pooch (>=1.6)", "pydata-sphinx-theme (>=0.15.2)", "pytest-doctestplus", "pytest-runner", "scikit-learn (>=1.1)", "seaborn (>=0.11)", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-gallery (>=0.14)", "sphinx_design (>=0.5)", "tifffile (>=2022.8.12)"] -optional = ["PyWavelets (>=1.1.1)", "SimpleITK", "astropy (>=5.0)", "cloudpickle (>=0.2.1)", "dask[array] (>=2021.1.0)", "matplotlib (>=3.6)", "pooch (>=1.6.0)", "pyamg", "scikit-learn (>=1.1)"] -test = ["asv", "numpydoc (>=1.7)", "pooch (>=1.6.0)", "pytest (>=7.0)", "pytest-cov (>=2.11.0)", "pytest-doctestplus", "pytest-faulthandler", "pytest-localserver"] +default = ["PyWavelets (>=1.1.1)", "imageio (>=2.27)", "lazy_loader (>=0.2)", "networkx (>=2.8)", "numpy (>=1.21.1)", "packaging (>=21)", "pillow (>=9.0.1)", "scipy (>=1.8)", "tifffile (>=2022.8.12)"] +developer = ["pre-commit", "rtoml"] +docs = ["dask[array] (>=2022.9.2)", "ipykernel", "ipywidgets", "kaleido", "matplotlib (>=3.5)", "myst-parser", "numpydoc (>=1.5)", "pandas (>=1.5)", "plotly (>=5.10)", "pooch (>=1.6)", "pydata-sphinx-theme (>=0.13)", "pytest-runner", "scikit-learn (>=0.24.0)", "seaborn (>=0.11)", "sphinx (>=5.0)", "sphinx-copybutton", "sphinx-gallery (>=0.11)", "sphinx_design (>=0.3)", "tifffile (>=2022.8.12)"] +optional = ["SimpleITK", "astropy (>=5.0)", "cloudpickle (>=0.2.1)", "dask[array] (>=2021.1.0)", "matplotlib (>=3.5)", "pooch (>=1.6.0)", "pyamg", "scikit-learn (>=0.24.0)"] +test = ["asv", "matplotlib (>=3.5)", "pooch (>=1.6.0)", "pytest (>=7.0)", "pytest-cov (>=2.11.0)", "pytest-faulthandler", "pytest-localserver"] [[package]] name = "scikit-learn" -version = "1.5.2" +version = "1.3.2" description = "A set of python modules for machine learning and data mining" optional = false -python-versions = ">=3.9" -files = [ - {file = "scikit_learn-1.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:299406827fb9a4f862626d0fe6c122f5f87f8910b86fe5daa4c32dcd742139b6"}, - {file = "scikit_learn-1.5.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:2d4cad1119c77930b235579ad0dc25e65c917e756fe80cab96aa3b9428bd3fb0"}, - {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c412ccc2ad9bf3755915e3908e677b367ebc8d010acbb3f182814524f2e5540"}, - {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a686885a4b3818d9e62904d91b57fa757fc2bed3e465c8b177be652f4dd37c8"}, - {file = "scikit_learn-1.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:c15b1ca23d7c5f33cc2cb0a0d6aaacf893792271cddff0edbd6a40e8319bc113"}, - {file = "scikit_learn-1.5.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03b6158efa3faaf1feea3faa884c840ebd61b6484167c711548fce208ea09445"}, - {file = "scikit_learn-1.5.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1ff45e26928d3b4eb767a8f14a9a6efbf1cbff7c05d1fb0f95f211a89fd4f5de"}, - {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f763897fe92d0e903aa4847b0aec0e68cadfff77e8a0687cabd946c89d17e675"}, - {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8b0ccd4a902836493e026c03256e8b206656f91fbcc4fde28c57a5b752561f1"}, - {file = "scikit_learn-1.5.2-cp311-cp311-win_amd64.whl", hash = "sha256:6c16d84a0d45e4894832b3c4d0bf73050939e21b99b01b6fd59cbb0cf39163b6"}, - {file = "scikit_learn-1.5.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f932a02c3f4956dfb981391ab24bda1dbd90fe3d628e4b42caef3e041c67707a"}, - {file = "scikit_learn-1.5.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:3b923d119d65b7bd555c73be5423bf06c0105678ce7e1f558cb4b40b0a5502b1"}, - {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f60021ec1574e56632be2a36b946f8143bf4e5e6af4a06d85281adc22938e0dd"}, - {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:394397841449853c2290a32050382edaec3da89e35b3e03d6cc966aebc6a8ae6"}, - {file = "scikit_learn-1.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:57cc1786cfd6bd118220a92ede80270132aa353647684efa385a74244a41e3b1"}, - {file = "scikit_learn-1.5.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9a702e2de732bbb20d3bad29ebd77fc05a6b427dc49964300340e4c9328b3f5"}, - {file = "scikit_learn-1.5.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:b0768ad641981f5d3a198430a1d31c3e044ed2e8a6f22166b4d546a5116d7908"}, - {file = "scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:178ddd0a5cb0044464fc1bfc4cca5b1833bfc7bb022d70b05db8530da4bb3dd3"}, - {file = "scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7284ade780084d94505632241bf78c44ab3b6f1e8ccab3d2af58e0e950f9c12"}, - {file = "scikit_learn-1.5.2-cp313-cp313-win_amd64.whl", hash = "sha256:b7b0f9a0b1040830d38c39b91b3a44e1b643f4b36e36567b80b7c6bd2202a27f"}, - {file = "scikit_learn-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:757c7d514ddb00ae249832fe87100d9c73c6ea91423802872d9e74970a0e40b9"}, - {file = "scikit_learn-1.5.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:52788f48b5d8bca5c0736c175fa6bdaab2ef00a8f536cda698db61bd89c551c1"}, - {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:643964678f4b5fbdc95cbf8aec638acc7aa70f5f79ee2cdad1eec3df4ba6ead8"}, - {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca64b3089a6d9b9363cd3546f8978229dcbb737aceb2c12144ee3f70f95684b7"}, - {file = "scikit_learn-1.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:3bed4909ba187aca80580fe2ef370d9180dcf18e621a27c4cf2ef10d279a7efe"}, - {file = "scikit_learn-1.5.2.tar.gz", hash = "sha256:b4237ed7b3fdd0a4882792e68ef2545d5baa50aca3bb45aa7df468138ad8f94d"}, +python-versions = ">=3.8" +files = [ + {file = "scikit-learn-1.3.2.tar.gz", hash = "sha256:a2f54c76accc15a34bfb9066e6c7a56c1e7235dda5762b990792330b52ccfb05"}, + {file = "scikit_learn-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e326c0eb5cf4d6ba40f93776a20e9a7a69524c4db0757e7ce24ba222471ee8a1"}, + {file = "scikit_learn-1.3.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:535805c2a01ccb40ca4ab7d081d771aea67e535153e35a1fd99418fcedd1648a"}, + {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1215e5e58e9880b554b01187b8c9390bf4dc4692eedeaf542d3273f4785e342c"}, + {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ee107923a623b9f517754ea2f69ea3b62fc898a3641766cb7deb2f2ce450161"}, + {file = "scikit_learn-1.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:35a22e8015048c628ad099da9df5ab3004cdbf81edc75b396fd0cff8699ac58c"}, + {file = "scikit_learn-1.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6fb6bc98f234fda43163ddbe36df8bcde1d13ee176c6dc9b92bb7d3fc842eb66"}, + {file = "scikit_learn-1.3.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:18424efee518a1cde7b0b53a422cde2f6625197de6af36da0b57ec502f126157"}, + {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3271552a5eb16f208a6f7f617b8cc6d1f137b52c8a1ef8edf547db0259b2c9fb"}, + {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4144a5004a676d5022b798d9e573b05139e77f271253a4703eed295bde0433"}, + {file = "scikit_learn-1.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:67f37d708f042a9b8d59551cf94d30431e01374e00dc2645fa186059c6c5d78b"}, + {file = "scikit_learn-1.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8db94cd8a2e038b37a80a04df8783e09caac77cbe052146432e67800e430c028"}, + {file = "scikit_learn-1.3.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:61a6efd384258789aa89415a410dcdb39a50e19d3d8410bd29be365bcdd512d5"}, + {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb06f8dce3f5ddc5dee1715a9b9f19f20d295bed8e3cd4fa51e1d050347de525"}, + {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b2de18d86f630d68fe1f87af690d451388bb186480afc719e5f770590c2ef6c"}, + {file = "scikit_learn-1.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:0402638c9a7c219ee52c94cbebc8fcb5eb9fe9c773717965c1f4185588ad3107"}, + {file = "scikit_learn-1.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a19f90f95ba93c1a7f7924906d0576a84da7f3b2282ac3bfb7a08a32801add93"}, + {file = "scikit_learn-1.3.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:b8692e395a03a60cd927125eef3a8e3424d86dde9b2370d544f0ea35f78a8073"}, + {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15e1e94cc23d04d39da797ee34236ce2375ddea158b10bee3c343647d615581d"}, + {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:785a2213086b7b1abf037aeadbbd6d67159feb3e30263434139c98425e3dcfcf"}, + {file = "scikit_learn-1.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:64381066f8aa63c2710e6b56edc9f0894cc7bf59bd71b8ce5613a4559b6145e0"}, + {file = "scikit_learn-1.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6c43290337f7a4b969d207e620658372ba3c1ffb611f8bc2b6f031dc5c6d1d03"}, + {file = "scikit_learn-1.3.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:dc9002fc200bed597d5d34e90c752b74df516d592db162f756cc52836b38fe0e"}, + {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d08ada33e955c54355d909b9c06a4789a729977f165b8bae6f225ff0a60ec4a"}, + {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763f0ae4b79b0ff9cca0bf3716bcc9915bdacff3cebea15ec79652d1cc4fa5c9"}, + {file = "scikit_learn-1.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:ed932ea780517b00dae7431e031faae6b49b20eb6950918eb83bd043237950e0"}, ] [package.dependencies] -joblib = ">=1.2.0" -numpy = ">=1.19.5" -scipy = ">=1.6.0" -threadpoolctl = ">=3.1.0" +joblib = ">=1.1.1" +numpy = ">=1.17.3,<2.0" +scipy = ">=1.5.0" +threadpoolctl = ">=2.0.0" [package.extras] -benchmark = ["matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "pandas (>=1.1.5)"] -build = ["cython (>=3.0.10)", "meson-python (>=0.16.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)"] -docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.5.0)", "sphinx-design (>=0.6.0)", "sphinx-gallery (>=0.16.0)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)"] -examples = ["matplotlib (>=3.3.4)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)"] -install = ["joblib (>=1.2.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)", "threadpoolctl (>=3.1.0)"] -maintenance = ["conda-lock (==2.5.6)"] -tests = ["black (>=24.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.9)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "polars (>=0.20.30)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.2.1)", "scikit-image (>=0.17.2)"] +benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.10.1)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] +examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"] +tests = ["black (>=23.3.0)", "matplotlib (>=3.1.3)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.16.2)"] [[package]] name = "scipy" -version = "1.13.1" +version = "1.10.1" description = "Fundamental algorithms for scientific computing in Python" optional = false -python-versions = ">=3.9" -files = [ - {file = "scipy-1.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:20335853b85e9a49ff7572ab453794298bcf0354d8068c5f6775a0eabf350aca"}, - {file = "scipy-1.13.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d605e9c23906d1994f55ace80e0125c587f96c020037ea6aa98d01b4bd2e222f"}, - {file = "scipy-1.13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cfa31f1def5c819b19ecc3a8b52d28ffdcc7ed52bb20c9a7589669dd3c250989"}, - {file = "scipy-1.13.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26264b282b9da0952a024ae34710c2aff7d27480ee91a2e82b7b7073c24722f"}, - {file = "scipy-1.13.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:eccfa1906eacc02de42d70ef4aecea45415f5be17e72b61bafcfd329bdc52e94"}, - {file = "scipy-1.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:2831f0dc9c5ea9edd6e51e6e769b655f08ec6db6e2e10f86ef39bd32eb11da54"}, - {file = "scipy-1.13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:27e52b09c0d3a1d5b63e1105f24177e544a222b43611aaf5bc44d4a0979e32f9"}, - {file = "scipy-1.13.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:54f430b00f0133e2224c3ba42b805bfd0086fe488835effa33fa291561932326"}, - {file = "scipy-1.13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e89369d27f9e7b0884ae559a3a956e77c02114cc60a6058b4e5011572eea9299"}, - {file = "scipy-1.13.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a78b4b3345f1b6f68a763c6e25c0c9a23a9fd0f39f5f3d200efe8feda560a5fa"}, - {file = "scipy-1.13.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:45484bee6d65633752c490404513b9ef02475b4284c4cfab0ef946def50b3f59"}, - {file = "scipy-1.13.1-cp311-cp311-win_amd64.whl", hash = "sha256:5713f62f781eebd8d597eb3f88b8bf9274e79eeabf63afb4a737abc6c84ad37b"}, - {file = "scipy-1.13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5d72782f39716b2b3509cd7c33cdc08c96f2f4d2b06d51e52fb45a19ca0c86a1"}, - {file = "scipy-1.13.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:017367484ce5498445aade74b1d5ab377acdc65e27095155e448c88497755a5d"}, - {file = "scipy-1.13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:949ae67db5fa78a86e8fa644b9a6b07252f449dcf74247108c50e1d20d2b4627"}, - {file = "scipy-1.13.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de3ade0e53bc1f21358aa74ff4830235d716211d7d077e340c7349bc3542e884"}, - {file = "scipy-1.13.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2ac65fb503dad64218c228e2dc2d0a0193f7904747db43014645ae139c8fad16"}, - {file = "scipy-1.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:cdd7dacfb95fea358916410ec61bbc20440f7860333aee6d882bb8046264e949"}, - {file = "scipy-1.13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:436bbb42a94a8aeef855d755ce5a465479c721e9d684de76bf61a62e7c2b81d5"}, - {file = "scipy-1.13.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:8335549ebbca860c52bf3d02f80784e91a004b71b059e3eea9678ba994796a24"}, - {file = "scipy-1.13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d533654b7d221a6a97304ab63c41c96473ff04459e404b83275b60aa8f4b7004"}, - {file = "scipy-1.13.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:637e98dcf185ba7f8e663e122ebf908c4702420477ae52a04f9908707456ba4d"}, - {file = "scipy-1.13.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a014c2b3697bde71724244f63de2476925596c24285c7a637364761f8710891c"}, - {file = "scipy-1.13.1-cp39-cp39-win_amd64.whl", hash = "sha256:392e4ec766654852c25ebad4f64e4e584cf19820b980bc04960bca0b0cd6eaa2"}, - {file = "scipy-1.13.1.tar.gz", hash = "sha256:095a87a0312b08dfd6a6155cbbd310a8c51800fc931b8c0b84003014b874ed3c"}, +python-versions = "<3.12,>=3.8" +files = [ + {file = "scipy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7354fd7527a4b0377ce55f286805b34e8c54b91be865bac273f527e1b839019"}, + {file = "scipy-1.10.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4b3f429188c66603a1a5c549fb414e4d3bdc2a24792e061ffbd607d3d75fd84e"}, + {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1553b5dcddd64ba9a0d95355e63fe6c3fc303a8fd77c7bc91e77d61363f7433f"}, + {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c0ff64b06b10e35215abce517252b375e580a6125fd5fdf6421b98efbefb2d2"}, + {file = "scipy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:fae8a7b898c42dffe3f7361c40d5952b6bf32d10c4569098d276b4c547905ee1"}, + {file = "scipy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f1564ea217e82c1bbe75ddf7285ba0709ecd503f048cb1236ae9995f64217bd"}, + {file = "scipy-1.10.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d925fa1c81b772882aa55bcc10bf88324dadb66ff85d548c71515f6689c6dac5"}, + {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaea0a6be54462ec027de54fca511540980d1e9eea68b2d5c1dbfe084797be35"}, + {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15a35c4242ec5f292c3dd364a7c71a61be87a3d4ddcc693372813c0b73c9af1d"}, + {file = "scipy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:43b8e0bcb877faf0abfb613d51026cd5cc78918e9530e375727bf0625c82788f"}, + {file = "scipy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5678f88c68ea866ed9ebe3a989091088553ba12c6090244fdae3e467b1139c35"}, + {file = "scipy-1.10.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:39becb03541f9e58243f4197584286e339029e8908c46f7221abeea4b749fa88"}, + {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bce5869c8d68cf383ce240e44c1d9ae7c06078a9396df68ce88a1230f93a30c1"}, + {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07c3457ce0b3ad5124f98a86533106b643dd811dd61b548e78cf4c8786652f6f"}, + {file = "scipy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:049a8bbf0ad95277ffba9b3b7d23e5369cc39e66406d60422c8cfef40ccc8415"}, + {file = "scipy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cd9f1027ff30d90618914a64ca9b1a77a431159df0e2a195d8a9e8a04c78abf9"}, + {file = "scipy-1.10.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:79c8e5a6c6ffaf3a2262ef1be1e108a035cf4f05c14df56057b64acc5bebffb6"}, + {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51af417a000d2dbe1ec6c372dfe688e041a7084da4fdd350aeb139bd3fb55353"}, + {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b4735d6c28aad3cdcf52117e0e91d6b39acd4272f3f5cd9907c24ee931ad601"}, + {file = "scipy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ff7f37b1bf4417baca958d254e8e2875d0cc23aaadbe65b3d5b3077b0eb23ea"}, + {file = "scipy-1.10.1.tar.gz", hash = "sha256:2cf9dfb80a7b4589ba4c40ce7588986d6d5cebc5457cad2c2880f6bc2d42f3a5"}, ] [package.dependencies] -numpy = ">=1.22.4,<2.3" +numpy = ">=1.19.5,<1.27.0" [package.extras] -dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"] -doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.12.0)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0)", "sphinx-design (>=0.4.0)"] -test = ["array-api-strict", "asv", "gmpy2", "hypothesis (>=6.30)", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] +dev = ["click", "doit (>=0.36.0)", "flake8", "mypy", "pycodestyle", "pydevtool", "rich-click", "typing_extensions"] +doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] +test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] [[package]] name = "sentry-sdk" -version = "2.16.0" +version = "2.13.0" description = "Python client for Sentry (https://sentry.io)" optional = false python-versions = ">=3.6" files = [ - {file = "sentry_sdk-2.16.0-py2.py3-none-any.whl", hash = "sha256:49139c31ebcd398f4f6396b18910610a0c1602f6e67083240c33019d1f6aa30c"}, - {file = "sentry_sdk-2.16.0.tar.gz", hash = "sha256:90f733b32e15dfc1999e6b7aca67a38688a567329de4d6e184154a73f96c6892"}, + {file = "sentry_sdk-2.13.0-py2.py3-none-any.whl", hash = "sha256:6beede8fc2ab4043da7f69d95534e320944690680dd9a963178a49de71d726c6"}, + {file = "sentry_sdk-2.13.0.tar.gz", hash = "sha256:8d4a576f7a98eb2fdb40e13106e41f330e5c79d72a68be1316e7852cf4995260"}, ] [package.dependencies] @@ -3631,7 +3608,6 @@ falcon = ["falcon (>=1.4)"] fastapi = ["fastapi (>=0.79.0)"] flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"] grpcio = ["grpcio (>=1.21.1)", "protobuf (>=3.8.0)"] -http2 = ["httpcore[http2] (==1.*)"] httpx = ["httpx (>=0.16.0)"] huey = ["huey (>=2)"] huggingface-hub = ["huggingface-hub (>=0.22)"] @@ -3754,18 +3730,18 @@ test = ["pytest"] [[package]] name = "setuptools" -version = "75.2.0" +version = "74.1.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-75.2.0-py3-none-any.whl", hash = "sha256:a7fcb66f68b4d9e8e66b42f9876150a3371558f98fa32222ffaa5bced76406f8"}, - {file = "setuptools-75.2.0.tar.gz", hash = "sha256:753bb6ebf1f465a1912e19ed1d41f403a79173a9acf66a42e7e6aec45c3c16ec"}, + {file = "setuptools-74.1.0-py3-none-any.whl", hash = "sha256:cee604bd76cc092355a4e43ec17aee5369095974f41f088676724dc6bc2c9ef8"}, + {file = "setuptools-74.1.0.tar.gz", hash = "sha256:bea195a800f510ba3a2bc65645c88b7e016fe36709fefc58a880c4ae8a0138d7"}, ] [package.extras] check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"] -core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.text (>=3.7)", "more-itertools (>=8.8)", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] @@ -3836,68 +3812,60 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.36" +version = "2.0.32" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59b8f3adb3971929a3e660337f5dacc5942c2cdb760afcabb2614ffbda9f9f72"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37350015056a553e442ff672c2d20e6f4b6d0b2495691fa239d8aa18bb3bc908"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8318f4776c85abc3f40ab185e388bee7a6ea99e7fa3a30686580b209eaa35c08"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c245b1fbade9c35e5bd3b64270ab49ce990369018289ecfde3f9c318411aaa07"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69f93723edbca7342624d09f6704e7126b152eaed3cdbb634cb657a54332a3c5"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f9511d8dd4a6e9271d07d150fb2f81874a3c8c95e11ff9af3a2dfc35fe42ee44"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-win32.whl", hash = "sha256:c3f3631693003d8e585d4200730616b78fafd5a01ef8b698f6967da5c605b3fa"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-win_amd64.whl", hash = "sha256:a86bfab2ef46d63300c0f06936bd6e6c0105faa11d509083ba8f2f9d237fb5b5"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fd3a55deef00f689ce931d4d1b23fa9f04c880a48ee97af488fd215cf24e2a6c"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f5e9cd989b45b73bd359f693b935364f7e1f79486e29015813c338450aa5a71"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-win32.whl", hash = "sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-win_amd64.whl", hash = "sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7b64e6ec3f02c35647be6b4851008b26cff592a95ecb13b6788a54ef80bbdd4"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46331b00096a6db1fdc052d55b101dbbfc99155a548e20a0e4a8e5e4d1362855"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:be9812b766cad94a25bc63bec11f88c4ad3629a0cec1cd5d4ba48dc23860486b"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aae840ebbd6cdd41af1c14590e5741665e5272d2fee999306673a1bb1fdb4d"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4557e1f11c5f653ebfdd924f3f9d5ebfc718283b0b9beebaa5dd6b77ec290971"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07b441f7d03b9a66299ce7ccf3ef2900abc81c0db434f42a5694a37bd73870f2"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:28120ef39c92c2dd60f2721af9328479516844c6b550b077ca450c7d7dc68575"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-win32.whl", hash = "sha256:b81ee3d84803fd42d0b154cb6892ae57ea6b7c55d8359a02379965706c7efe6c"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-win_amd64.whl", hash = "sha256:f942a799516184c855e1a32fbc7b29d7e571b52612647866d4ec1c3242578fcb"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3d6718667da04294d7df1670d70eeddd414f313738d20a6f1d1f379e3139a545"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:72c28b84b174ce8af8504ca28ae9347d317f9dba3999e5981a3cd441f3712e24"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b11d0cfdd2b095e7b0686cf5fabeb9c67fae5b06d265d8180715b8cfa86522e3"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e32092c47011d113dc01ab3e1d3ce9f006a47223b18422c5c0d150af13a00687"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6a440293d802d3011028e14e4226da1434b373cbaf4a4bbb63f845761a708346"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c54a1e53a0c308a8e8a7dffb59097bff7facda27c70c286f005327f21b2bd6b1"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-win32.whl", hash = "sha256:1e0d612a17581b6616ff03c8e3d5eff7452f34655c901f75d62bd86449d9750e"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-win_amd64.whl", hash = "sha256:8958b10490125124463095bbdadda5aa22ec799f91958e410438ad6c97a7b793"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dc022184d3e5cacc9579e41805a681187650e170eb2fd70e28b86192a479dcaa"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b817d41d692bf286abc181f8af476c4fbef3fd05e798777492618378448ee689"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4e46a888b54be23d03a89be510f24a7652fe6ff660787b96cd0e57a4ebcb46d"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4ae3005ed83f5967f961fd091f2f8c5329161f69ce8480aa8168b2d7fe37f06"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03e08af7a5f9386a43919eda9de33ffda16b44eb11f3b313e6822243770e9763"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3dbb986bad3ed5ceaf090200eba750b5245150bd97d3e67343a3cfed06feecf7"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-win32.whl", hash = "sha256:9fe53b404f24789b5ea9003fc25b9a3988feddebd7e7b369c8fac27ad6f52f28"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-win_amd64.whl", hash = "sha256:af148a33ff0349f53512a049c6406923e4e02bf2f26c5fb285f143faf4f0e46a"}, - {file = "SQLAlchemy-2.0.36-py3-none-any.whl", hash = "sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e"}, - {file = "sqlalchemy-2.0.36.tar.gz", hash = "sha256:7f2767680b6d2398aea7082e45a774b2b0767b5c8d8ffb9c8b683088ea9b29c5"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0c9045ecc2e4db59bfc97b20516dfdf8e41d910ac6fb667ebd3a79ea54084619"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1467940318e4a860afd546ef61fefb98a14d935cd6817ed07a228c7f7c62f389"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5954463675cb15db8d4b521f3566a017c8789222b8316b1e6934c811018ee08b"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:167e7497035c303ae50651b351c28dc22a40bb98fbdb8468cdc971821b1ae533"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b27dfb676ac02529fb6e343b3a482303f16e6bc3a4d868b73935b8792edb52d0"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bf2360a5e0f7bd75fa80431bf8ebcfb920c9f885e7956c7efde89031695cafb8"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-win32.whl", hash = "sha256:306fe44e754a91cd9d600a6b070c1f2fadbb4a1a257b8781ccf33c7067fd3e4d"}, + {file = "SQLAlchemy-2.0.32-cp310-cp310-win_amd64.whl", hash = "sha256:99db65e6f3ab42e06c318f15c98f59a436f1c78179e6a6f40f529c8cc7100b22"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:21b053be28a8a414f2ddd401f1be8361e41032d2ef5884b2f31d31cb723e559f"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b178e875a7a25b5938b53b006598ee7645172fccafe1c291a706e93f48499ff5"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723a40ee2cc7ea653645bd4cf024326dea2076673fc9d3d33f20f6c81db83e1d"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:295ff8689544f7ee7e819529633d058bd458c1fd7f7e3eebd0f9268ebc56c2a0"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:49496b68cd190a147118af585173ee624114dfb2e0297558c460ad7495f9dfe2"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:acd9b73c5c15f0ec5ce18128b1fe9157ddd0044abc373e6ecd5ba376a7e5d961"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-win32.whl", hash = "sha256:9365a3da32dabd3e69e06b972b1ffb0c89668994c7e8e75ce21d3e5e69ddef28"}, + {file = "SQLAlchemy-2.0.32-cp311-cp311-win_amd64.whl", hash = "sha256:8bd63d051f4f313b102a2af1cbc8b80f061bf78f3d5bd0843ff70b5859e27924"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6bab3db192a0c35e3c9d1560eb8332463e29e5507dbd822e29a0a3c48c0a8d92"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:19d98f4f58b13900d8dec4ed09dd09ef292208ee44cc9c2fe01c1f0a2fe440e9"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd33c61513cb1b7371fd40cf221256456d26a56284e7d19d1f0b9f1eb7dd7e8"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d6ba0497c1d066dd004e0f02a92426ca2df20fac08728d03f67f6960271feec"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2b6be53e4fde0065524f1a0a7929b10e9280987b320716c1509478b712a7688c"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:916a798f62f410c0b80b63683c8061f5ebe237b0f4ad778739304253353bc1cb"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-win32.whl", hash = "sha256:31983018b74908ebc6c996a16ad3690301a23befb643093fcfe85efd292e384d"}, + {file = "SQLAlchemy-2.0.32-cp312-cp312-win_amd64.whl", hash = "sha256:4363ed245a6231f2e2957cccdda3c776265a75851f4753c60f3004b90e69bfeb"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b8afd5b26570bf41c35c0121801479958b4446751a3971fb9a480c1afd85558e"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c750987fc876813f27b60d619b987b057eb4896b81117f73bb8d9918c14f1cad"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ada0102afff4890f651ed91120c1120065663506b760da4e7823913ebd3258be"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:78c03d0f8a5ab4f3034c0e8482cfcc415a3ec6193491cfa1c643ed707d476f16"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:3bd1cae7519283ff525e64645ebd7a3e0283f3c038f461ecc1c7b040a0c932a1"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-win32.whl", hash = "sha256:01438ebcdc566d58c93af0171c74ec28efe6a29184b773e378a385e6215389da"}, + {file = "SQLAlchemy-2.0.32-cp37-cp37m-win_amd64.whl", hash = "sha256:4979dc80fbbc9d2ef569e71e0896990bc94df2b9fdbd878290bd129b65ab579c"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c742be912f57586ac43af38b3848f7688863a403dfb220193a882ea60e1ec3a"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:62e23d0ac103bcf1c5555b6c88c114089587bc64d048fef5bbdb58dfd26f96da"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:251f0d1108aab8ea7b9aadbd07fb47fb8e3a5838dde34aa95a3349876b5a1f1d"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ef18a84e5116340e38eca3e7f9eeaaef62738891422e7c2a0b80feab165905f"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3eb6a97a1d39976f360b10ff208c73afb6a4de86dd2a6212ddf65c4a6a2347d5"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0c1c9b673d21477cec17ab10bc4decb1322843ba35b481585facd88203754fc5"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-win32.whl", hash = "sha256:c41a2b9ca80ee555decc605bd3c4520cc6fef9abde8fd66b1cf65126a6922d65"}, + {file = "SQLAlchemy-2.0.32-cp38-cp38-win_amd64.whl", hash = "sha256:8a37e4d265033c897892279e8adf505c8b6b4075f2b40d77afb31f7185cd6ecd"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:52fec964fba2ef46476312a03ec8c425956b05c20220a1a03703537824b5e8e1"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:328429aecaba2aee3d71e11f2477c14eec5990fb6d0e884107935f7fb6001632"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85a01b5599e790e76ac3fe3aa2f26e1feba56270023d6afd5550ed63c68552b3"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaf04784797dcdf4c0aa952c8d234fa01974c4729db55c45732520ce12dd95b4"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4488120becf9b71b3ac718f4138269a6be99a42fe023ec457896ba4f80749525"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:14e09e083a5796d513918a66f3d6aedbc131e39e80875afe81d98a03312889e6"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-win32.whl", hash = "sha256:0d322cc9c9b2154ba7e82f7bf25ecc7c36fbe2d82e2933b3642fc095a52cfc78"}, + {file = "SQLAlchemy-2.0.32-cp39-cp39-win_amd64.whl", hash = "sha256:7dd8583df2f98dea28b5cd53a1beac963f4f9d087888d75f22fcc93a07cf8d84"}, + {file = "SQLAlchemy-2.0.32-py3-none-any.whl", hash = "sha256:e567a8793a692451f706b363ccf3c45e056b67d90ead58c3bc9471af5d212202"}, + {file = "SQLAlchemy-2.0.32.tar.gz", hash = "sha256:c1b88cc8b02b6a5f0efb0345a03672d4c897dc7d92585176f88c67346f565ea8"}, ] [package.dependencies] @@ -3910,7 +3878,7 @@ aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] asyncio = ["greenlet (!=0.4.17)"] asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] -mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] mssql = ["pyodbc"] mssql-pymssql = ["pymssql"] mssql-pyodbc = ["pyodbc"] @@ -3946,13 +3914,13 @@ doc = ["sphinx"] [[package]] name = "sympy" -version = "1.13.3" +version = "1.13.2" description = "Computer algebra system (CAS) in Python" optional = false python-versions = ">=3.8" files = [ - {file = "sympy-1.13.3-py3-none-any.whl", hash = "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73"}, - {file = "sympy-1.13.3.tar.gz", hash = "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9"}, + {file = "sympy-1.13.2-py3-none-any.whl", hash = "sha256:c51d75517712f1aed280d4ce58506a4a88d635d6b5dd48b39102a7ae1f3fcfe9"}, + {file = "sympy-1.13.2.tar.gz", hash = "sha256:401449d84d07be9d0c7a46a64bd54fe097667d5e7181bfe67ec777be9e01cb13"}, ] [package.dependencies] @@ -3963,25 +3931,27 @@ dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"] [[package]] name = "tensorboard" -version = "2.18.0" +version = "2.14.0" description = "TensorBoard lets you watch Tensors Flow" optional = false -python-versions = ">=3.9" +python-versions = ">=3.8" files = [ - {file = "tensorboard-2.18.0-py3-none-any.whl", hash = "sha256:107ca4821745f73e2aefa02c50ff70a9b694f39f790b11e6f682f7d326745eab"}, + {file = "tensorboard-2.14.0-py3-none-any.whl", hash = "sha256:3667f9745d99280836ad673022362c840f60ed8fefd5a3e30bf071f5a8fd0017"}, ] [package.dependencies] absl-py = ">=0.4" +google-auth = ">=1.6.3,<3" +google-auth-oauthlib = ">=0.5,<1.1" grpcio = ">=1.48.2" markdown = ">=2.6.8" numpy = ">=1.12.0" -packaging = "*" -protobuf = ">=3.19.6,<4.24.0 || >4.24.0" +protobuf = ">=3.19.6" +requests = ">=2.21.0,<3" setuptools = ">=41.0.0" -six = ">1.9" tensorboard-data-server = ">=0.7.0,<0.8.0" werkzeug = ">=1.0.1" +wheel = ">=0.26" [[package]] name = "tensorboard-data-server" @@ -4008,25 +3978,20 @@ files = [ [[package]] name = "tifffile" -version = "2024.8.30" +version = "2023.7.10" description = "Read and write TIFF files" optional = false -python-versions = ">=3.9" +python-versions = ">=3.8" files = [ - {file = "tifffile-2024.8.30-py3-none-any.whl", hash = "sha256:8bc59a8f02a2665cd50a910ec64961c5373bee0b8850ec89d3b7b485bf7be7ad"}, - {file = "tifffile-2024.8.30.tar.gz", hash = "sha256:2c9508fe768962e30f87def61819183fb07692c258cb175b3c114828368485a4"}, + {file = "tifffile-2023.7.10-py3-none-any.whl", hash = "sha256:94dfdec321ace96abbfe872a66cfd824800c099a2db558443453eebc2c11b304"}, + {file = "tifffile-2023.7.10.tar.gz", hash = "sha256:c06ec460926d16796eeee249a560bcdddf243daae36ac62af3c84a953cd60b4a"}, ] [package.dependencies] numpy = "*" [package.extras] -all = ["defusedxml", "fsspec", "imagecodecs (>=2023.8.12)", "lxml", "matplotlib", "zarr"] -codecs = ["imagecodecs (>=2023.8.12)"] -plot = ["matplotlib"] -test = ["cmapfile", "czifile", "dask", "defusedxml", "fsspec", "imagecodecs", "lfdfiles", "lxml", "ndtiff", "oiffile", "psdtags", "pytest", "roifile", "xarray", "zarr"] -xml = ["defusedxml", "lxml"] -zarr = ["fsspec", "zarr"] +all = ["defusedxml", "fsspec", "imagecodecs (>=2023.1.23)", "lxml", "matplotlib", "zarr"] [[package]] name = "toml" @@ -4041,42 +4006,42 @@ files = [ [[package]] name = "tomli" -version = "2.0.2" +version = "2.0.1" description = "A lil' TOML parser" optional = false -python-versions = ">=3.8" +python-versions = ">=3.7" files = [ - {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, - {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] [[package]] name = "torch" -version = "2.4.1" +version = "2.4.0" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" optional = false python-versions = ">=3.8.0" files = [ - {file = "torch-2.4.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:362f82e23a4cd46341daabb76fba08f04cd646df9bfaf5da50af97cb60ca4971"}, - {file = "torch-2.4.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:e8ac1985c3ff0f60d85b991954cfc2cc25f79c84545aead422763148ed2759e3"}, - {file = "torch-2.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:91e326e2ccfb1496e3bee58f70ef605aeb27bd26be07ba64f37dcaac3d070ada"}, - {file = "torch-2.4.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:d36a8ef100f5bff3e9c3cea934b9e0d7ea277cb8210c7152d34a9a6c5830eadd"}, - {file = "torch-2.4.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:0b5f88afdfa05a335d80351e3cea57d38e578c8689f751d35e0ff36bce872113"}, - {file = "torch-2.4.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:ef503165f2341942bfdf2bd520152f19540d0c0e34961232f134dc59ad435be8"}, - {file = "torch-2.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:092e7c2280c860eff762ac08c4bdcd53d701677851670695e0c22d6d345b269c"}, - {file = "torch-2.4.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:ddddbd8b066e743934a4200b3d54267a46db02106876d21cf31f7da7a96f98ea"}, - {file = "torch-2.4.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:fdc4fe11db3eb93c1115d3e973a27ac7c1a8318af8934ffa36b0370efe28e042"}, - {file = "torch-2.4.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:18835374f599207a9e82c262153c20ddf42ea49bc76b6eadad8e5f49729f6e4d"}, - {file = "torch-2.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:ebea70ff30544fc021d441ce6b219a88b67524f01170b1c538d7d3ebb5e7f56c"}, - {file = "torch-2.4.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:72b484d5b6cec1a735bf3fa5a1c4883d01748698c5e9cfdbeb4ffab7c7987e0d"}, - {file = "torch-2.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:c99e1db4bf0c5347107845d715b4aa1097e601bdc36343d758963055e9599d93"}, - {file = "torch-2.4.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:b57f07e92858db78c5b72857b4f0b33a65b00dc5d68e7948a8494b0314efb880"}, - {file = "torch-2.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:f18197f3f7c15cde2115892b64f17c80dbf01ed72b008020e7da339902742cf6"}, - {file = "torch-2.4.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:5fc1d4d7ed265ef853579caf272686d1ed87cebdcd04f2a498f800ffc53dab71"}, - {file = "torch-2.4.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:40f6d3fe3bae74efcf08cb7f8295eaddd8a838ce89e9d26929d4edd6d5e4329d"}, - {file = "torch-2.4.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:c9299c16c9743001ecef515536ac45900247f4338ecdf70746f2461f9e4831db"}, - {file = "torch-2.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:6bce130f2cd2d52ba4e2c6ada461808de7e5eccbac692525337cfb4c19421846"}, - {file = "torch-2.4.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:a38de2803ee6050309aac032676536c3d3b6a9804248537e38e098d0e14817ec"}, + {file = "torch-2.4.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:4ed94583e244af51d6a8d28701ca5a9e02d1219e782f5a01dd401f90af17d8ac"}, + {file = "torch-2.4.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:c4ca297b7bd58b506bfd6e78ffd14eb97c0e7797dcd7965df62f50bb575d8954"}, + {file = "torch-2.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:2497cbc7b3c951d69b276ca51fe01c2865db67040ac67f5fc20b03e41d16ea4a"}, + {file = "torch-2.4.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:685418ab93730efbee71528821ff54005596970dd497bf03c89204fb7e3f71de"}, + {file = "torch-2.4.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:e743adadd8c8152bb8373543964551a7cb7cc20ba898dc8f9c0cdbe47c283de0"}, + {file = "torch-2.4.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:7334325c0292cbd5c2eac085f449bf57d3690932eac37027e193ba775703c9e6"}, + {file = "torch-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:97730014da4c57ffacb3c09298c6ce05400606e890bd7a05008d13dd086e46b1"}, + {file = "torch-2.4.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:f169b4ea6dc93b3a33319611fcc47dc1406e4dd539844dcbd2dec4c1b96e166d"}, + {file = "torch-2.4.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:997084a0f9784d2a89095a6dc67c7925e21bf25dea0b3d069b41195016ccfcbb"}, + {file = "torch-2.4.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:bc3988e8b36d1e8b998d143255d9408d8c75da4ab6dd0dcfd23b623dfb0f0f57"}, + {file = "torch-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:3374128bbf7e62cdaed6c237bfd39809fbcfaa576bee91e904706840c3f2195c"}, + {file = "torch-2.4.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:91aaf00bfe1ffa44dc5b52809d9a95129fca10212eca3ac26420eb11727c6288"}, + {file = "torch-2.4.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:cc30457ea5489c62747d3306438af00c606b509d78822a88f804202ba63111ed"}, + {file = "torch-2.4.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:a046491aaf96d1215e65e1fa85911ef2ded6d49ea34c8df4d0638879f2402eef"}, + {file = "torch-2.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:688eec9240f3ce775f22e1e1a5ab9894f3d5fe60f3f586deb7dbd23a46a83916"}, + {file = "torch-2.4.0-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:3af4de2a618fb065e78404c4ba27a818a7b7957eaeff28c6c66ce7fb504b68b8"}, + {file = "torch-2.4.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:618808d3f610d5f180e47a697d4ec90b810953bb1e020f424b2ac7fb0884b545"}, + {file = "torch-2.4.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:ed765d232d23566052ba83632ec73a4fccde00b4c94ad45d63b471b09d63b7a7"}, + {file = "torch-2.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:a2feb98ac470109472fb10dfef38622a7ee08482a16c357863ebc7bc7db7c8f7"}, + {file = "torch-2.4.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:8940fc8b97a4c61fdb5d46a368f21f4a3a562a17879e932eb51a5ec62310cb31"}, ] [package.dependencies] @@ -4095,7 +4060,6 @@ nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \" nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-nccl-cu12 = {version = "2.20.5", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -setuptools = "*" sympy = "*" triton = {version = "3.0.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.13\""} typing-extensions = ">=4.8.0" @@ -4135,37 +4099,42 @@ plot = ["matplotlib"] [[package]] name = "torchvision" -version = "0.19.1" +version = "0.19.0" description = "image and video datasets and models for torch deep learning" optional = false python-versions = ">=3.8" files = [ - {file = "torchvision-0.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:54e8513099e6f586356c70f809d34f391af71ad182fe071cc328a28af2c40608"}, - {file = "torchvision-0.19.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:20a1f5e02bfdad7714e55fa3fa698347c11d829fa65e11e5a84df07d93350eed"}, - {file = "torchvision-0.19.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:7b063116164be52fc6deb4762de7f8c90bfa3a65f8d5caf17f8e2d5aadc75a04"}, - {file = "torchvision-0.19.1-cp310-cp310-win_amd64.whl", hash = "sha256:f40b6acabfa886da1bc3768f47679c61feee6bde90deb979d9f300df8c8a0145"}, - {file = "torchvision-0.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:40514282b4896d62765b8e26d7091c32e17c35817d00ec4be2362ea3ba3d1787"}, - {file = "torchvision-0.19.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:5a91be061ae5d6d5b95e833b93e57ca4d3c56c5a57444dd15da2e3e7fba96050"}, - {file = "torchvision-0.19.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:d71a6a6fe3a5281ca3487d4c56ad4aad20ff70f82f1d7c79bcb6e7b0c2af00c8"}, - {file = "torchvision-0.19.1-cp311-cp311-win_amd64.whl", hash = "sha256:70dea324174f5e9981b68e4b7cd524512c106ba64aedef560a86a0bbf2fbf62c"}, - {file = "torchvision-0.19.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:27ece277ff0f6cdc7fed0627279c632dcb2e58187da771eca24b0fbcf3f8590d"}, - {file = "torchvision-0.19.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:c659ff92a61f188a1a7baef2850f3c0b6c85685447453c03d0e645ba8f1dcc1c"}, - {file = "torchvision-0.19.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:c07bf43c2a145d792ecd9d0503d6c73577147ece508d45600d8aac77e4cdfcf9"}, - {file = "torchvision-0.19.1-cp312-cp312-win_amd64.whl", hash = "sha256:b4283d283675556bb0eae31d29996f53861b17cbdcdf3509e6bc050414ac9289"}, - {file = "torchvision-0.19.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c4e4f5b24ea6b087b02ed492ab1e21bba3352c4577e2def14248cfc60732338"}, - {file = "torchvision-0.19.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:9281d63ead929bb19143731154cd1d8bf0b5e9873dff8578a40e90a6bec3c6fa"}, - {file = "torchvision-0.19.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:4d10bc9083c4d5fadd7edd7b729700a7be48dab4f62278df3bc73fa48e48a155"}, - {file = "torchvision-0.19.1-cp38-cp38-win_amd64.whl", hash = "sha256:ccf085ef1824fb9e16f1901285bf89c298c62dfd93267a39e8ee42c71255242f"}, - {file = "torchvision-0.19.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:731f434d91586769e255b5d70ed1a4457e0a1394a95f4aacf0e1e7e21f80c098"}, - {file = "torchvision-0.19.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:febe4f14d4afcb47cc861d8be7760ab6a123cd0817f97faf5771488cb6aa90f4"}, - {file = "torchvision-0.19.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:e328309b8670a2e889b2fe76a1c2744a099c11c984da9a822357bd9debd699a5"}, - {file = "torchvision-0.19.1-cp39-cp39-win_amd64.whl", hash = "sha256:6616f12e00a22e7f3fedbd0fccb0804c05e8fe22871668f10eae65cf3f283614"}, + {file = "torchvision-0.19.0-1-cp310-cp310-win_amd64.whl", hash = "sha256:6ed066aae5c50465d7c4761357aefe5dbd2eb7075a33ab8c14b352fc2353ad4c"}, + {file = "torchvision-0.19.0-1-cp311-cp311-win_amd64.whl", hash = "sha256:6b1bce2e4c003d890a18f14ff289528707d918e38539ff890ef02aa31dae1b56"}, + {file = "torchvision-0.19.0-1-cp312-cp312-win_amd64.whl", hash = "sha256:13aee7a46e049c8c1e7d35a0394b0587a7e62ff3d1a822cd2bbbacb675ac4a09"}, + {file = "torchvision-0.19.0-1-cp38-cp38-win_amd64.whl", hash = "sha256:2acc436d043d4f81b3bc6929cbfa4ef1cdae4d8a0b04ec72ec30a497e9a38179"}, + {file = "torchvision-0.19.0-1-cp39-cp39-win_amd64.whl", hash = "sha256:b5f70f5a8bd9c8b00a076bf466b39b5cd679ef62587c47cc048adb04d9c5f155"}, + {file = "torchvision-0.19.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ec874ef85dcb24c69e600f6e276af892c80cde3ffdaeb7275efda463242bc2a8"}, + {file = "torchvision-0.19.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:106842b1e475b14d9a04ee0d6f5477d43100e3bb78e9d31e37422384d0d84179"}, + {file = "torchvision-0.19.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d467d434005fd05a227a2ba7af4c591bb67e6d4a97bbd06eda8da83f43e9fd07"}, + {file = "torchvision-0.19.0-cp310-cp310-win_amd64.whl", hash = "sha256:f77ac31f7337d0f6f4b58e65582c6c93b9d9eeec7dfd7478896b5cdc19a2d60d"}, + {file = "torchvision-0.19.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dbf3aa71a3899244fc884303ed3c4604a160824fefac77e82317a5463efc1d9b"}, + {file = "torchvision-0.19.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:ec4162dc71d9db7f0b51d0f92491929c1419605ff436e1305e50de13504a1c30"}, + {file = "torchvision-0.19.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:4e6aa4fa3f0bc3599fa071c149e651a3e6bdd67c9161794478f9f91471c406a2"}, + {file = "torchvision-0.19.0-cp311-cp311-win_amd64.whl", hash = "sha256:ac5525d5cc09e425b5cf5752ecf66eefbbbd8c8cd945198ce35eb01a694e6069"}, + {file = "torchvision-0.19.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c09ef8ed184fa877f6251b620226e74f682b8f1d6b341456428d4955b8d9c670"}, + {file = "torchvision-0.19.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:02f1dd5cfc897957535b41b0258ec452d30de044e20c2de2c75869f7708e7656"}, + {file = "torchvision-0.19.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:be0f27a28b8e9f2ae98a31af34a4bdd2a5bf154d92bd73a5797c8d2156fb3ab6"}, + {file = "torchvision-0.19.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6ba7756f75c80212e51d3576f85ea204589e0c16efdb9b835dd677bc8929a67"}, + {file = "torchvision-0.19.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:854e967a16a9409e941b5bbe5aa357b23f7158bccb9de35ae20fd4945f05ecd1"}, + {file = "torchvision-0.19.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:d9afb8a3c3ce99a161a64c2a3b91cb545632a72118053cbfb84e87a02a8dcd02"}, + {file = "torchvision-0.19.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:079a696e0b2cb52e4be30afa8e9b3d7d280f02a2b5ffedd7e821fa1efd1a5a8d"}, + {file = "torchvision-0.19.0-cp38-cp38-win_amd64.whl", hash = "sha256:aaa338ff3a55a8c0f94e0e64eff6fe2af1fc933a95fd43812760e72ea66e986b"}, + {file = "torchvision-0.19.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd1279571d4b68d5a53d9b7a35aedf91c4cb1e0b08099f6a1effa7b25b8c95e7"}, + {file = "torchvision-0.19.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:4d54b5e19b7ebebca7d0b08497b4c6335264cad04c94c05fa35988d9e9eed0c4"}, + {file = "torchvision-0.19.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:5f9a598dcf82bdfc8e4436ce74763b3877dabec3b33f94613b94ede13e3e4dee"}, + {file = "torchvision-0.19.0-cp39-cp39-win_amd64.whl", hash = "sha256:ec1281c10402234d470bfd4d53663d81f4364f293b2f8fe24d4a7a1adc78c90c"}, ] [package.dependencies] numpy = "*" pillow = ">=5.3.0,<8.3.dev0 || >=8.4.dev0" -torch = "2.4.1" +torch = "2.4.0" [package.extras] gdown = ["gdown (>=4.7.3)"] @@ -4232,13 +4201,13 @@ typing-extensions = ">=3.7.4.3" [[package]] name = "types-python-dateutil" -version = "2.9.0.20241003" +version = "2.9.0.20240821" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" files = [ - {file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"}, - {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, + {file = "types-python-dateutil-2.9.0.20240821.tar.gz", hash = "sha256:9649d1dcb6fef1046fb18bebe9ea2aa0028b160918518c34589a46045f6ebd98"}, + {file = "types_python_dateutil-2.9.0.20240821-py3-none-any.whl", hash = "sha256:f5889fcb4e63ed4aaa379b44f93c32593d50b9a94c9a60a0c854d8cc3511cd57"}, ] [[package]] @@ -4252,17 +4221,6 @@ files = [ {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] -[[package]] -name = "tzdata" -version = "2024.2" -description = "Provider of IANA time zone data" -optional = false -python-versions = ">=2" -files = [ - {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, - {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, -] - [[package]] name = "urllib3" version = "1.26.20" @@ -4281,13 +4239,13 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "virtualenv" -version = "20.26.6" +version = "20.26.3" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" files = [ - {file = "virtualenv-20.26.6-py3-none-any.whl", hash = "sha256:7345cc5b25405607a624d8418154577459c3e0277f5466dd79c49d5e492995f2"}, - {file = "virtualenv-20.26.6.tar.gz", hash = "sha256:280aede09a2a5c317e409a00102e7077c6432c5a38f0ef938e643805a7ad2c48"}, + {file = "virtualenv-20.26.3-py3-none-any.whl", hash = "sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589"}, + {file = "virtualenv-20.26.3.tar.gz", hash = "sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a"}, ] [package.dependencies] @@ -4332,6 +4290,7 @@ docker-pycreds = ">=0.4.0" GitPython = ">=1.0.0,<3.1.29 || >3.1.29" pathtools = "*" protobuf = [ + {version = ">=3.12.0,<4.21.0 || >4.21.0,<5", markers = "python_version < \"3.9\" and sys_platform == \"linux\""}, {version = ">=3.15.0,<4.21.0 || >4.21.0,<5", markers = "python_version == \"3.9\" and sys_platform == \"linux\""}, {version = ">=3.19.0,<4.21.0 || >4.21.0,<5", markers = "python_version > \"3.9\" or sys_platform != \"linux\""}, ] @@ -4358,41 +4317,46 @@ sweeps = ["sweeps (>=0.2.0)"] [[package]] name = "watchdog" -version = "5.0.3" +version = "4.0.2" description = "Filesystem events monitoring" optional = false -python-versions = ">=3.9" -files = [ - {file = "watchdog-5.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:85527b882f3facda0579bce9d743ff7f10c3e1e0db0a0d0e28170a7d0e5ce2ea"}, - {file = "watchdog-5.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:53adf73dcdc0ef04f7735066b4a57a4cd3e49ef135daae41d77395f0b5b692cb"}, - {file = "watchdog-5.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e25adddab85f674acac303cf1f5835951345a56c5f7f582987d266679979c75b"}, - {file = "watchdog-5.0.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f01f4a3565a387080dc49bdd1fefe4ecc77f894991b88ef927edbfa45eb10818"}, - {file = "watchdog-5.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:91b522adc25614cdeaf91f7897800b82c13b4b8ac68a42ca959f992f6990c490"}, - {file = "watchdog-5.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d52db5beb5e476e6853da2e2d24dbbbed6797b449c8bf7ea118a4ee0d2c9040e"}, - {file = "watchdog-5.0.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:94d11b07c64f63f49876e0ab8042ae034674c8653bfcdaa8c4b32e71cfff87e8"}, - {file = "watchdog-5.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:349c9488e1d85d0a58e8cb14222d2c51cbc801ce11ac3936ab4c3af986536926"}, - {file = "watchdog-5.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:53a3f10b62c2d569e260f96e8d966463dec1a50fa4f1b22aec69e3f91025060e"}, - {file = "watchdog-5.0.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:950f531ec6e03696a2414b6308f5c6ff9dab7821a768c9d5788b1314e9a46ca7"}, - {file = "watchdog-5.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae6deb336cba5d71476caa029ceb6e88047fc1dc74b62b7c4012639c0b563906"}, - {file = "watchdog-5.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1021223c08ba8d2d38d71ec1704496471ffd7be42cfb26b87cd5059323a389a1"}, - {file = "watchdog-5.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:752fb40efc7cc8d88ebc332b8f4bcbe2b5cc7e881bccfeb8e25054c00c994ee3"}, - {file = "watchdog-5.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a2e8f3f955d68471fa37b0e3add18500790d129cc7efe89971b8a4cc6fdeb0b2"}, - {file = "watchdog-5.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b8ca4d854adcf480bdfd80f46fdd6fb49f91dd020ae11c89b3a79e19454ec627"}, - {file = "watchdog-5.0.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:90a67d7857adb1d985aca232cc9905dd5bc4803ed85cfcdcfcf707e52049eda7"}, - {file = "watchdog-5.0.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:720ef9d3a4f9ca575a780af283c8fd3a0674b307651c1976714745090da5a9e8"}, - {file = "watchdog-5.0.3-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:223160bb359281bb8e31c8f1068bf71a6b16a8ad3d9524ca6f523ac666bb6a1e"}, - {file = "watchdog-5.0.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:560135542c91eaa74247a2e8430cf83c4342b29e8ad4f520ae14f0c8a19cfb5b"}, - {file = "watchdog-5.0.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dd021efa85970bd4824acacbb922066159d0f9e546389a4743d56919b6758b91"}, - {file = "watchdog-5.0.3-py3-none-manylinux2014_armv7l.whl", hash = "sha256:78864cc8f23dbee55be34cc1494632a7ba30263951b5b2e8fc8286b95845f82c"}, - {file = "watchdog-5.0.3-py3-none-manylinux2014_i686.whl", hash = "sha256:1e9679245e3ea6498494b3028b90c7b25dbb2abe65c7d07423ecfc2d6218ff7c"}, - {file = "watchdog-5.0.3-py3-none-manylinux2014_ppc64.whl", hash = "sha256:9413384f26b5d050b6978e6fcd0c1e7f0539be7a4f1a885061473c5deaa57221"}, - {file = "watchdog-5.0.3-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:294b7a598974b8e2c6123d19ef15de9abcd282b0fbbdbc4d23dfa812959a9e05"}, - {file = "watchdog-5.0.3-py3-none-manylinux2014_s390x.whl", hash = "sha256:26dd201857d702bdf9d78c273cafcab5871dd29343748524695cecffa44a8d97"}, - {file = "watchdog-5.0.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:0f9332243355643d567697c3e3fa07330a1d1abf981611654a1f2bf2175612b7"}, - {file = "watchdog-5.0.3-py3-none-win32.whl", hash = "sha256:c66f80ee5b602a9c7ab66e3c9f36026590a0902db3aea414d59a2f55188c1f49"}, - {file = "watchdog-5.0.3-py3-none-win_amd64.whl", hash = "sha256:f00b4cf737f568be9665563347a910f8bdc76f88c2970121c86243c8cfdf90e9"}, - {file = "watchdog-5.0.3-py3-none-win_ia64.whl", hash = "sha256:49f4d36cb315c25ea0d946e018c01bb028048023b9e103d3d3943f58e109dd45"}, - {file = "watchdog-5.0.3.tar.gz", hash = "sha256:108f42a7f0345042a854d4d0ad0834b741d421330d5f575b81cb27b883500176"}, +python-versions = ">=3.8" +files = [ + {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ede7f010f2239b97cc79e6cb3c249e72962404ae3865860855d5cbe708b0fd22"}, + {file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a2cffa171445b0efa0726c561eca9a27d00a1f2b83846dbd5a4f639c4f8ca8e1"}, + {file = "watchdog-4.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c50f148b31b03fbadd6d0b5980e38b558046b127dc483e5e4505fcef250f9503"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7c7d4bf585ad501c5f6c980e7be9c4f15604c7cc150e942d82083b31a7548930"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:914285126ad0b6eb2258bbbcb7b288d9dfd655ae88fa28945be05a7b475a800b"}, + {file = "watchdog-4.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:984306dc4720da5498b16fc037b36ac443816125a3705dfde4fd90652d8028ef"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1cdcfd8142f604630deef34722d695fb455d04ab7cfe9963055df1fc69e6727a"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7ab624ff2f663f98cd03c8b7eedc09375a911794dfea6bf2a359fcc266bff29"}, + {file = "watchdog-4.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:132937547a716027bd5714383dfc40dc66c26769f1ce8a72a859d6a48f371f3a"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:cd67c7df93eb58f360c43802acc945fa8da70c675b6fa37a241e17ca698ca49b"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcfd02377be80ef3b6bc4ce481ef3959640458d6feaae0bd43dd90a43da90a7d"}, + {file = "watchdog-4.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:980b71510f59c884d684b3663d46e7a14b457c9611c481e5cef08f4dd022eed7"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:aa160781cafff2719b663c8a506156e9289d111d80f3387cf3af49cedee1f040"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f6ee8dedd255087bc7fe82adf046f0b75479b989185fb0bdf9a98b612170eac7"}, + {file = "watchdog-4.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0b4359067d30d5b864e09c8597b112fe0a0a59321a0f331498b013fb097406b4"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:770eef5372f146997638d737c9a3c597a3b41037cfbc5c41538fc27c09c3a3f9"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eeea812f38536a0aa859972d50c76e37f4456474b02bd93674d1947cf1e39578"}, + {file = "watchdog-4.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b2c45f6e1e57ebb4687690c05bc3a2c1fb6ab260550c4290b8abb1335e0fd08b"}, + {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:10b6683df70d340ac3279eff0b2766813f00f35a1d37515d2c99959ada8f05fa"}, + {file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:f7c739888c20f99824f7aa9d31ac8a97353e22d0c0e54703a547a218f6637eb3"}, + {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c100d09ac72a8a08ddbf0629ddfa0b8ee41740f9051429baa8e31bb903ad7508"}, + {file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f5315a8c8dd6dd9425b974515081fc0aadca1d1d61e078d2246509fd756141ee"}, + {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:2d468028a77b42cc685ed694a7a550a8d1771bb05193ba7b24006b8241a571a1"}, + {file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f15edcae3830ff20e55d1f4e743e92970c847bcddc8b7509bcd172aa04de506e"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:936acba76d636f70db8f3c66e76aa6cb5136a936fc2a5088b9ce1c7a3508fc83"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_armv7l.whl", hash = "sha256:e252f8ca942a870f38cf785aef420285431311652d871409a64e2a0a52a2174c"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_i686.whl", hash = "sha256:0e83619a2d5d436a7e58a1aea957a3c1ccbf9782c43c0b4fed80580e5e4acd1a"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64.whl", hash = "sha256:88456d65f207b39f1981bf772e473799fcdc10801062c36fd5ad9f9d1d463a73"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:32be97f3b75693a93c683787a87a0dc8db98bb84701539954eef991fb35f5fbc"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_s390x.whl", hash = "sha256:c82253cfc9be68e3e49282831afad2c1f6593af80c0daf1287f6a92657986757"}, + {file = "watchdog-4.0.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c0b14488bd336c5b1845cee83d3e631a1f8b4e9c5091ec539406e4a324f882d8"}, + {file = "watchdog-4.0.2-py3-none-win32.whl", hash = "sha256:0d8a7e523ef03757a5aa29f591437d64d0d894635f8a50f370fe37f913ce4e19"}, + {file = "watchdog-4.0.2-py3-none-win_amd64.whl", hash = "sha256:c344453ef3bf875a535b0488e3ad28e341adbd5a9ffb0f7d62cefacc8824ef2b"}, + {file = "watchdog-4.0.2-py3-none-win_ia64.whl", hash = "sha256:baececaa8edff42cd16558a639a9b0ddf425f93d892e8392a56bf904f5eff22c"}, + {file = "watchdog-4.0.2.tar.gz", hash = "sha256:b4dfbb6c49221be4535623ea4474a4d6ee0a9cef4a80b20c28db4d858b64e270"}, ] [package.extras] @@ -4426,6 +4390,20 @@ MarkupSafe = ">=2.1.1" [package.extras] watchdog = ["watchdog (>=2.3)"] +[[package]] +name = "wheel" +version = "0.44.0" +description = "A built-package format for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "wheel-0.44.0-py3-none-any.whl", hash = "sha256:2376a90c98cc337d18623527a97c31797bd02bad0033d41547043a1cbfbe448f"}, + {file = "wheel-0.44.0.tar.gz", hash = "sha256:a29c3f2817e95ab89aa4660681ad547c0e9547f20e75b0562fe7723c9a2a9d49"}, +] + +[package.extras] +test = ["pytest (>=6.0.0)", "setuptools (>=65)"] + [[package]] name = "wrapt" version = "1.16.0" @@ -4507,13 +4485,13 @@ files = [ [[package]] name = "zipp" -version = "3.20.2" +version = "3.20.1" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" files = [ - {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"}, - {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"}, + {file = "zipp-3.20.1-py3-none-any.whl", hash = "sha256:9960cd8967c8f85a56f920d5d507274e74f9ff813a0ab8889a5b5be2daf44064"}, + {file = "zipp-3.20.1.tar.gz", hash = "sha256:c22b14cc4763c5a5b04134207736c107db42e9d3ef2d9779d465f5f1bcba572b"}, ] [package.extras] @@ -4526,5 +4504,5 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" -python-versions = ">=3.9,<3.13" -content-hash = "dcae373e463fd4168abbcdea643d9e1efd8cc185f58c8de0d5c645ce88a8c28b" +python-versions = ">=3.8,<3.12" +content-hash = "b5097c7c1112ac5503db1fd169eb8b6e3fc67f01f85dbf1fb163aa97e0dbb8d0" diff --git a/pyproject.toml b/pyproject.toml index 273fdb370..ae7e9e033 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,15 +27,15 @@ classifiers = [ ] [tool.poetry.dependencies] -python = ">=3.9,<3.13" -torch = "^2.3.0" +python = ">=3.8,<3.12" +torch = "^2.1.0" torchvision = "*" tensorboard = "*" toml = "*" -pandas = "^2" -numpy = "^2" -scikit-learn = "^1" -scikit-image = "^0.24" +pandas = "^1.2" +numpy = "^1.17" +scikit-learn = "^1.0" +scikit-image = "^0.21" joblib = "^1.2.0" click = "^8" click-option-group = "^0.5" From 0b4381c79f59afc2431e1653ab1fd0693236ddae Mon Sep 17 00:00:00 2001 From: Thibault de Varax <154365476+thibaultdvx@users.noreply.github.com> Date: Fri, 18 Oct 2024 12:54:58 +0200 Subject: [PATCH 07/16] Basic neural networks (#660) * add customizable networks (MLP, ConvEncoder, ConvDecoder, CNN, Generator, AutoEncoder, VAE) * add sota networks (ResNet, DenseNet, SE-ResNet, UNet, Attention-UNet, Vision Transformer) *update config classes *update factory function --- clinicadl/monai_networks/__init__.py | 4 +- clinicadl/monai_networks/config/__init__.py | 3 +- .../monai_networks/config/autoencoder.py | 92 +-- clinicadl/monai_networks/config/base.py | 210 +++---- clinicadl/monai_networks/config/classifier.py | 63 -- clinicadl/monai_networks/config/cnn.py | 24 + .../monai_networks/config/conv_decoder.py | 65 ++ .../monai_networks/config/conv_encoder.py | 64 ++ clinicadl/monai_networks/config/densenet.py | 91 ++- clinicadl/monai_networks/config/factory.py | 44 +- clinicadl/monai_networks/config/fcn.py | 79 --- clinicadl/monai_networks/config/generator.py | 40 +- clinicadl/monai_networks/config/mlp.py | 52 ++ clinicadl/monai_networks/config/regressor.py | 34 -- clinicadl/monai_networks/config/resnet.py | 177 ++---- clinicadl/monai_networks/config/senet.py | 60 ++ clinicadl/monai_networks/config/unet.py | 56 +- clinicadl/monai_networks/config/utils/enum.py | 129 ---- clinicadl/monai_networks/config/vit.py | 186 ++---- clinicadl/monai_networks/factory.py | 119 +++- clinicadl/monai_networks/nn/__init__.py | 13 + clinicadl/monai_networks/nn/att_unet.py | 207 +++++++ clinicadl/monai_networks/nn/autoencoder.py | 416 +++++++++++++ clinicadl/monai_networks/nn/cnn.py | 124 ++++ clinicadl/monai_networks/nn/conv_decoder.py | 388 ++++++++++++ clinicadl/monai_networks/nn/conv_encoder.py | 392 ++++++++++++ clinicadl/monai_networks/nn/densenet.py | 312 ++++++++++ clinicadl/monai_networks/nn/generator.py | 131 ++++ .../monai_networks/nn/layers/__init__.py | 0 clinicadl/monai_networks/nn/layers/resnet.py | 124 ++++ clinicadl/monai_networks/nn/layers/senet.py | 142 +++++ clinicadl/monai_networks/nn/layers/unet.py | 102 ++++ clinicadl/monai_networks/nn/layers/unpool.py | 87 +++ .../nn/layers/utils/__init__.py | 19 + .../monai_networks/nn/layers/utils/enum.py | 65 ++ .../monai_networks/nn/layers/utils/types.py | 37 ++ clinicadl/monai_networks/nn/layers/vit.py | 94 +++ clinicadl/monai_networks/nn/mlp.py | 146 +++++ clinicadl/monai_networks/nn/resnet.py | 566 ++++++++++++++++++ clinicadl/monai_networks/nn/senet.py | 214 +++++++ clinicadl/monai_networks/nn/unet.py | 250 ++++++++ clinicadl/monai_networks/nn/utils/__init__.py | 14 + clinicadl/monai_networks/nn/utils/checks.py | 167 ++++++ clinicadl/monai_networks/nn/utils/shapes.py | 203 +++++++ clinicadl/monai_networks/nn/vae.py | 200 +++++++ clinicadl/monai_networks/nn/vit.py | 420 +++++++++++++ clinicadl/utils/enum.py | 11 + .../monai_networks/config/__init__.py | 0 .../monai_networks/config/test_autoencoder.py | 171 ------ .../monai_networks/config/test_classifier.py | 132 ---- .../monai_networks/config/test_config.py | 232 +++++++ .../monai_networks/config/test_densenet.py | 48 -- .../monai_networks/config/test_factory.py | 6 +- .../monai_networks/config/test_fcn.py | 97 --- .../monai_networks/config/test_generator.py | 72 --- .../monai_networks/config/test_regressor.py | 72 --- .../monai_networks/config/test_resnet.py | 83 --- .../config/test_resnet_features.py | 56 -- .../monai_networks/config/test_segresnet.py | 41 -- .../monai_networks/config/test_unet.py | 133 ---- .../monai_networks/config/test_vit.py | 162 ----- tests/unittests/monai_networks/nn/__init__.py | 0 .../monai_networks/nn/test_att_unet.py | 134 +++++ .../monai_networks/nn/test_autoencoder.py | 215 +++++++ tests/unittests/monai_networks/nn/test_cnn.py | 62 ++ .../monai_networks/nn/test_conv_decoder.py | 407 +++++++++++++ .../monai_networks/nn/test_conv_encoder.py | 400 +++++++++++++ .../monai_networks/nn/test_densenet.py | 138 +++++ .../monai_networks/nn/test_generator.py | 67 +++ tests/unittests/monai_networks/nn/test_mlp.py | 125 ++++ .../monai_networks/nn/test_resnet.py | 173 ++++++ .../unittests/monai_networks/nn/test_senet.py | 172 ++++++ .../unittests/monai_networks/nn/test_unet.py | 127 ++++ tests/unittests/monai_networks/nn/test_vae.py | 99 +++ tests/unittests/monai_networks/nn/test_vit.py | 279 +++++++++ .../monai_networks/nn/utils/__init__.py | 0 .../monai_networks/nn/utils/test_checks.py | 127 ++++ .../monai_networks/nn/utils/test_shapes.py | 281 +++++++++ .../unittests/monai_networks/test_factory.py | 318 +++++++--- 79 files changed, 8837 insertions(+), 2028 deletions(-) delete mode 100644 clinicadl/monai_networks/config/classifier.py create mode 100644 clinicadl/monai_networks/config/cnn.py create mode 100644 clinicadl/monai_networks/config/conv_decoder.py create mode 100644 clinicadl/monai_networks/config/conv_encoder.py delete mode 100644 clinicadl/monai_networks/config/fcn.py create mode 100644 clinicadl/monai_networks/config/mlp.py delete mode 100644 clinicadl/monai_networks/config/regressor.py create mode 100644 clinicadl/monai_networks/config/senet.py delete mode 100644 clinicadl/monai_networks/config/utils/enum.py create mode 100644 clinicadl/monai_networks/nn/__init__.py create mode 100644 clinicadl/monai_networks/nn/att_unet.py create mode 100644 clinicadl/monai_networks/nn/autoencoder.py create mode 100644 clinicadl/monai_networks/nn/cnn.py create mode 100644 clinicadl/monai_networks/nn/conv_decoder.py create mode 100644 clinicadl/monai_networks/nn/conv_encoder.py create mode 100644 clinicadl/monai_networks/nn/densenet.py create mode 100644 clinicadl/monai_networks/nn/generator.py create mode 100644 clinicadl/monai_networks/nn/layers/__init__.py create mode 100644 clinicadl/monai_networks/nn/layers/resnet.py create mode 100644 clinicadl/monai_networks/nn/layers/senet.py create mode 100644 clinicadl/monai_networks/nn/layers/unet.py create mode 100644 clinicadl/monai_networks/nn/layers/unpool.py create mode 100644 clinicadl/monai_networks/nn/layers/utils/__init__.py create mode 100644 clinicadl/monai_networks/nn/layers/utils/enum.py create mode 100644 clinicadl/monai_networks/nn/layers/utils/types.py create mode 100644 clinicadl/monai_networks/nn/layers/vit.py create mode 100644 clinicadl/monai_networks/nn/mlp.py create mode 100644 clinicadl/monai_networks/nn/resnet.py create mode 100644 clinicadl/monai_networks/nn/senet.py create mode 100644 clinicadl/monai_networks/nn/unet.py create mode 100644 clinicadl/monai_networks/nn/utils/__init__.py create mode 100644 clinicadl/monai_networks/nn/utils/checks.py create mode 100644 clinicadl/monai_networks/nn/utils/shapes.py create mode 100644 clinicadl/monai_networks/nn/vae.py create mode 100644 clinicadl/monai_networks/nn/vit.py create mode 100644 tests/unittests/monai_networks/config/__init__.py delete mode 100644 tests/unittests/monai_networks/config/test_autoencoder.py delete mode 100644 tests/unittests/monai_networks/config/test_classifier.py create mode 100644 tests/unittests/monai_networks/config/test_config.py delete mode 100644 tests/unittests/monai_networks/config/test_densenet.py delete mode 100644 tests/unittests/monai_networks/config/test_fcn.py delete mode 100644 tests/unittests/monai_networks/config/test_generator.py delete mode 100644 tests/unittests/monai_networks/config/test_regressor.py delete mode 100644 tests/unittests/monai_networks/config/test_resnet.py delete mode 100644 tests/unittests/monai_networks/config/test_resnet_features.py delete mode 100644 tests/unittests/monai_networks/config/test_segresnet.py delete mode 100644 tests/unittests/monai_networks/config/test_unet.py delete mode 100644 tests/unittests/monai_networks/config/test_vit.py create mode 100644 tests/unittests/monai_networks/nn/__init__.py create mode 100644 tests/unittests/monai_networks/nn/test_att_unet.py create mode 100644 tests/unittests/monai_networks/nn/test_autoencoder.py create mode 100644 tests/unittests/monai_networks/nn/test_cnn.py create mode 100644 tests/unittests/monai_networks/nn/test_conv_decoder.py create mode 100644 tests/unittests/monai_networks/nn/test_conv_encoder.py create mode 100644 tests/unittests/monai_networks/nn/test_densenet.py create mode 100644 tests/unittests/monai_networks/nn/test_generator.py create mode 100644 tests/unittests/monai_networks/nn/test_mlp.py create mode 100644 tests/unittests/monai_networks/nn/test_resnet.py create mode 100644 tests/unittests/monai_networks/nn/test_senet.py create mode 100644 tests/unittests/monai_networks/nn/test_unet.py create mode 100644 tests/unittests/monai_networks/nn/test_vae.py create mode 100644 tests/unittests/monai_networks/nn/test_vit.py create mode 100644 tests/unittests/monai_networks/nn/utils/__init__.py create mode 100644 tests/unittests/monai_networks/nn/utils/test_checks.py create mode 100644 tests/unittests/monai_networks/nn/utils/test_shapes.py diff --git a/clinicadl/monai_networks/__init__.py b/clinicadl/monai_networks/__init__.py index 1d74473d4..ea44f7516 100644 --- a/clinicadl/monai_networks/__init__.py +++ b/clinicadl/monai_networks/__init__.py @@ -1,2 +1,2 @@ -from .config import ImplementedNetworks, NetworkConfig, create_network_config -from .factory import get_network +from .config import ImplementedNetworks, NetworkConfig +from .factory import get_network, get_network_from_config diff --git a/clinicadl/monai_networks/config/__init__.py b/clinicadl/monai_networks/config/__init__.py index 10b8795dc..1c39fa4fa 100644 --- a/clinicadl/monai_networks/config/__init__.py +++ b/clinicadl/monai_networks/config/__init__.py @@ -1,3 +1,2 @@ -from .base import NetworkConfig +from .base import ImplementedNetworks, NetworkConfig, NetworkType from .factory import create_network_config -from .utils.enum import ImplementedNetworks diff --git a/clinicadl/monai_networks/config/autoencoder.py b/clinicadl/monai_networks/config/autoencoder.py index a6df1a20c..b19108573 100644 --- a/clinicadl/monai_networks/config/autoencoder.py +++ b/clinicadl/monai_networks/config/autoencoder.py @@ -1,89 +1,45 @@ -from typing import Optional, Tuple, Union +from typing import Optional, Sequence, Union -from pydantic import ( - NonNegativeInt, - PositiveInt, - computed_field, - model_validator, -) +from pydantic import PositiveInt, computed_field +from clinicadl.monai_networks.nn.layers.utils import ( + ActivationParameters, + UnpoolingMode, +) from clinicadl.utils.factories import DefaultFromLibrary -from .base import VaryingDepthNetworkConfig -from .utils.enum import ImplementedNetworks - -__all__ = ["AutoEncoderConfig", "VarAutoEncoderConfig"] - +from .base import ImplementedNetworks, NetworkConfig +from .conv_encoder import ConvEncoderOptions +from .mlp import MLPOptions -class AutoEncoderConfig(VaryingDepthNetworkConfig): - """Config class for autoencoders.""" - spatial_dims: PositiveInt - in_channels: PositiveInt - out_channels: PositiveInt +class AutoEncoderConfig(NetworkConfig): + """Config class for AutoEncoder.""" - inter_channels: Union[ - Optional[Tuple[PositiveInt, ...]], DefaultFromLibrary - ] = DefaultFromLibrary.YES - inter_dilations: Union[ - Optional[Tuple[PositiveInt, ...]], DefaultFromLibrary + in_shape: Sequence[PositiveInt] + latent_size: PositiveInt + conv_args: ConvEncoderOptions + mlp_args: Union[Optional[MLPOptions], DefaultFromLibrary] = DefaultFromLibrary.YES + out_channels: Union[ + Optional[PositiveInt], DefaultFromLibrary ] = DefaultFromLibrary.YES - num_inter_units: Union[NonNegativeInt, DefaultFromLibrary] = DefaultFromLibrary.YES - padding: Union[ - Optional[Union[PositiveInt, Tuple[PositiveInt, ...]]], DefaultFromLibrary + output_act: Union[ + Optional[ActivationParameters], DefaultFromLibrary ] = DefaultFromLibrary.YES + unpooling_mode: Union[UnpoolingMode, DefaultFromLibrary] = DefaultFromLibrary.YES @computed_field @property - def network(self) -> ImplementedNetworks: + def name(self) -> ImplementedNetworks: """The name of the network.""" return ImplementedNetworks.AE - @computed_field - @property - def dim(self) -> int: - """Dimension of the images.""" - return self.spatial_dims - - @model_validator(mode="after") - def model_validator(self): - """Checks coherence between parameters.""" - if self.padding != DefaultFromLibrary.YES: - assert self._check_dimensions( - self.padding - ), f"You must passed an int or a sequence of {self.dim} ints (the dimensionality of your images) for padding. You passed {self.padding}." - if isinstance(self.inter_channels, tuple) and isinstance( - self.inter_dilations, tuple - ): - assert len(self.inter_channels) == len( - self.inter_dilations - ), "inter_channels and inter_dilations muust have the same size." - elif isinstance(self.inter_dilations, tuple) and not isinstance( - self.inter_channels, tuple - ): - raise ValueError( - "You passed inter_dilations but didn't pass inter_channels." - ) - return self - -class VarAutoEncoderConfig(AutoEncoderConfig): - """Config class for variational autoencoders.""" - - in_shape: Tuple[PositiveInt, ...] - in_channels: Optional[int] = None - latent_size: PositiveInt - use_sigmoid: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES +class VAEConfig(AutoEncoderConfig): + """Config class for Variational AutoEncoder.""" @computed_field @property - def network(self) -> ImplementedNetworks: + def name(self) -> ImplementedNetworks: """The name of the network.""" return ImplementedNetworks.VAE - - @model_validator(mode="after") - def model_validator_bis(self): - """Checks coherence between parameters.""" - assert ( - len(self.in_shape[1:]) == self.spatial_dims - ), f"You passed {self.spatial_dims} for spatial_dims, but in_shape suggests {len(self.in_shape[1:])} spatial dimensions." diff --git a/clinicadl/monai_networks/config/base.py b/clinicadl/monai_networks/config/base.py index 6e0ff1b6b..d5c0a6f9b 100644 --- a/clinicadl/monai_networks/config/base.py +++ b/clinicadl/monai_networks/config/base.py @@ -1,168 +1,98 @@ -from __future__ import annotations - from abc import ABC, abstractmethod from enum import Enum -from typing import Any, Dict, Optional, Tuple, Union - -from pydantic import ( - BaseModel, - ConfigDict, - NonNegativeFloat, - NonNegativeInt, - PositiveInt, - computed_field, - field_validator, - model_validator, -) +from typing import Optional, Union + +from pydantic import BaseModel, ConfigDict, PositiveInt, computed_field +from clinicadl.monai_networks.nn.layers.utils import ActivationParameters from clinicadl.utils.factories import DefaultFromLibrary -from .utils.enum import ( - ImplementedActFunctions, - ImplementedNetworks, - ImplementedNormLayers, -) + +class ImplementedNetworks(str, Enum): + """Implemented neural networks in ClinicaDL.""" + + MLP = "MLP" + CONV_ENCODER = "ConvEncoder" + CONV_DECODER = "ConvDecoder" + CNN = "CNN" + GENERATOR = "Generator" + AE = "AutoEncoder" + VAE = "VAE" + DENSENET = "DenseNet" + DENSENET_121 = "DenseNet-121" + DENSENET_161 = "DenseNet-161" + DENSENET_169 = "DenseNet-169" + DENSENET_201 = "DenseNet-201" + RESNET = "ResNet" + RESNET_18 = "ResNet-18" + RESNET_34 = "ResNet-34" + RESNET_50 = "ResNet-50" + RESNET_101 = "ResNet-101" + RESNET_152 = "ResNet-152" + SE_RESNET = "SEResNet" + SE_RESNET_50 = "SEResNet-50" + SE_RESNET_101 = "SEResNet-101" + SE_RESNET_152 = "SEResNet-152" + UNET = "UNet" + ATT_UNET = "AttentionUNet" + VIT = "ViT" + VIT_B_16 = "ViT-B/16" + VIT_B_32 = "ViT-B/32" + VIT_L_16 = "ViT-L/16" + VIT_L_32 = "ViT-L/32" + + @classmethod + def _missing_(cls, value): + raise ValueError( + f"{value} is not implemented. Implemented neural networks are: " + + ", ".join([repr(m.value) for m in cls]) + ) + + +class NetworkType(str, Enum): + """ + Useful to know where to look for the network. + See :py:func:`clinicadl.monai_networks.factory.get_network` + """ + + CUSTOM = "custom" # our own networks + RESNET = "sota-ResNet" + DENSENET = "sota-DenseNet" + SE_RESNET = "sota-SEResNet" + VIT = "sota-ViT" class NetworkConfig(BaseModel, ABC): """Base config class to configure neural networks.""" - kernel_size: Union[ - PositiveInt, Tuple[PositiveInt, ...], DefaultFromLibrary - ] = DefaultFromLibrary.YES - up_kernel_size: Union[ - PositiveInt, Tuple[PositiveInt, ...], DefaultFromLibrary - ] = DefaultFromLibrary.YES - num_res_units: Union[NonNegativeInt, DefaultFromLibrary] = DefaultFromLibrary.YES - act: Union[ - ImplementedActFunctions, - Tuple[ImplementedActFunctions, Dict[str, Any]], - DefaultFromLibrary, - ] = DefaultFromLibrary.YES - norm: Union[ - ImplementedNormLayers, - Tuple[ImplementedNormLayers, Dict[str, Any]], - DefaultFromLibrary, - ] = DefaultFromLibrary.YES - bias: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES - adn_ordering: Union[Optional[str], DefaultFromLibrary] = DefaultFromLibrary.YES # pydantic config model_config = ConfigDict( validate_assignment=True, use_enum_values=True, validate_default=True, - protected_namespaces=(), ) @computed_field @property @abstractmethod - def network(self) -> ImplementedNetworks: + def name(self) -> ImplementedNetworks: """The name of the network.""" @computed_field @property - @abstractmethod - def dim(self) -> int: - """Dimension of the images.""" + def _type(self) -> NetworkType: + """ + To know where to look for the network. + Default to 'custom'. + """ + return NetworkType.CUSTOM - @classmethod - def base_validator_dropout(cls, v): - """Checks that dropout is between 0 and 1.""" - if isinstance(v, float): - assert ( - 0 <= v <= 1 - ), f"dropout must be between 0 and 1 but it has been set to {v}." - return v - - @field_validator("kernel_size", "up_kernel_size") - @classmethod - def base_is_odd(cls, value, field): - """Checks if a field is odd.""" - if value != DefaultFromLibrary.YES: - if isinstance(value, int): - value_ = (value,) - else: - value_ = value - for v in value_: - assert v % 2 == 1, f"{field.field_name} must be odd." - return value - - @field_validator("adn_ordering", mode="after") - @classmethod - def base_adn_validator(cls, v): - """Checks ADN sequence.""" - if v != DefaultFromLibrary.YES: - for letter in v: - assert ( - letter in {"A", "D", "N"} - ), f"adn_ordering must be composed by 'A', 'D' or/and 'N'. You passed {letter}." - assert len(v) == len( - set(v) - ), "adn_ordering cannot contain duplicated letter." - - return v - @classmethod - def base_at_least_2d(cls, v, ctx): - """Checks that a tuple has at least a length of two.""" - if isinstance(v, tuple): - assert ( - len(v) >= 2 - ), f"{ctx.field_name} should have at least two dimensions (with the first one for the channel)." - return v - - @model_validator(mode="after") - def base_model_validator(self): - """Checks coherence between parameters.""" - if self.kernel_size != DefaultFromLibrary.YES: - assert self._check_dimensions( - self.kernel_size - ), f"You must passed an int or a sequence of {self.dim} ints (the dimensionality of your images) for kernel_size. You passed {self.kernel_size}." - if self.up_kernel_size != DefaultFromLibrary.YES: - assert self._check_dimensions( - self.up_kernel_size - ), f"You must passed an int or a sequence of {self.dim} ints (the dimensionality of your images) for up_kernel_size. You passed {self.up_kernel_size}." - return self - - def _check_dimensions( - self, - value: Union[float, Tuple[float, ...]], - ) -> bool: - """Checks if a tuple has the right dimension.""" - if isinstance(value, tuple): - return len(value) == self.dim - return True - - -class VaryingDepthNetworkConfig(NetworkConfig, ABC): - """ - Base config class to configure neural networks. - More precisely, we refer to MONAI's networks with 'channels' and 'strides' parameters. - """ +class PreTrainedConfig(NetworkConfig): + """Base config class for SOTA networks.""" - channels: Tuple[PositiveInt, ...] - strides: Tuple[Union[PositiveInt, Tuple[PositiveInt, ...]], ...] - dropout: Union[ - Optional[NonNegativeFloat], DefaultFromLibrary + num_outputs: Optional[PositiveInt] + output_act: Union[ + Optional[ActivationParameters], DefaultFromLibrary ] = DefaultFromLibrary.YES - - @field_validator("dropout") - @classmethod - def validator_dropout(cls, v): - """Checks that dropout is between 0 and 1.""" - return cls.base_validator_dropout(v) - - @model_validator(mode="after") - def channels_strides_validator(self): - """Checks coherence between parameters.""" - n_layers = len(self.channels) - assert ( - len(self.strides) == n_layers - ), f"There are {n_layers} layers but you passed {len(self.strides)} strides." - for s in self.strides: - assert self._check_dimensions( - s - ), f"You must passed an int or a sequence of {self.dim} ints (the dimensionality of your images) for strides. You passed {s}." - - return self + pretrained: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES diff --git a/clinicadl/monai_networks/config/classifier.py b/clinicadl/monai_networks/config/classifier.py deleted file mode 100644 index a01bd0efc..000000000 --- a/clinicadl/monai_networks/config/classifier.py +++ /dev/null @@ -1,63 +0,0 @@ -from __future__ import annotations - -from typing import Any, Dict, Optional, Tuple, Union - -from pydantic import PositiveInt, computed_field - -from clinicadl.utils.factories import DefaultFromLibrary - -from .regressor import RegressorConfig -from .utils.enum import ImplementedActFunctions, ImplementedNetworks - -__all__ = ["ClassifierConfig", "DiscriminatorConfig", "CriticConfig"] - - -class ClassifierConfig(RegressorConfig): - """Config class for classifiers.""" - - classes: PositiveInt - out_shape: Optional[Tuple[PositiveInt, ...]] = None - last_act: Optional[ - Union[ - ImplementedActFunctions, - Tuple[ImplementedActFunctions, Dict[str, Any]], - DefaultFromLibrary, - ] - ] = DefaultFromLibrary.YES - - @computed_field - @property - def network(self) -> ImplementedNetworks: - """The name of the network.""" - return ImplementedNetworks.CLASSIFIER - - -class DiscriminatorConfig(ClassifierConfig): - """Config class for discriminators.""" - - classes: Optional[PositiveInt] = None - - @computed_field - @property - def network(self) -> ImplementedNetworks: - """The name of the network.""" - return ImplementedNetworks.DISCRIMINATOR - - -class CriticConfig(ClassifierConfig): - """Config class for discriminators.""" - - classes: Optional[PositiveInt] = None - last_act: Optional[ - Union[ - ImplementedActFunctions, - Tuple[ImplementedActFunctions, Dict[str, Any]], - DefaultFromLibrary, - ] - ] = None - - @computed_field - @property - def network(self) -> ImplementedNetworks: - """The name of the network.""" - return ImplementedNetworks.CRITIC diff --git a/clinicadl/monai_networks/config/cnn.py b/clinicadl/monai_networks/config/cnn.py new file mode 100644 index 000000000..a7d2043db --- /dev/null +++ b/clinicadl/monai_networks/config/cnn.py @@ -0,0 +1,24 @@ +from typing import Optional, Sequence, Union + +from pydantic import PositiveInt, computed_field + +from clinicadl.utils.factories import DefaultFromLibrary + +from .base import ImplementedNetworks, NetworkConfig +from .conv_encoder import ConvEncoderOptions +from .mlp import MLPOptions + + +class CNNConfig(NetworkConfig): + """Config class for CNN.""" + + in_shape: Sequence[PositiveInt] + num_outputs: PositiveInt + conv_args: ConvEncoderOptions + mlp_args: Union[Optional[MLPOptions], DefaultFromLibrary] = DefaultFromLibrary.YES + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.CNN diff --git a/clinicadl/monai_networks/config/conv_decoder.py b/clinicadl/monai_networks/config/conv_decoder.py new file mode 100644 index 000000000..5dc78dfec --- /dev/null +++ b/clinicadl/monai_networks/config/conv_decoder.py @@ -0,0 +1,65 @@ +from typing import Optional, Sequence, Union + +from pydantic import BaseModel, ConfigDict, PositiveFloat, PositiveInt, computed_field + +from clinicadl.monai_networks.nn.layers.utils import ( + ActivationParameters, + ConvNormalizationParameters, + ConvParameters, + UnpoolingParameters, +) +from clinicadl.utils.factories import DefaultFromLibrary + +from .base import ImplementedNetworks, NetworkConfig + + +class ConvDecoderOptions(BaseModel): + """ + Config class for ConvDecoder when it is a submodule. + See for example: :py:class:`clinicadl.monai_networks.nn.generator.Generator` + """ + + channels: Sequence[PositiveInt] + kernel_size: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + stride: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + padding: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + output_padding: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + dilation: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + unpooling: Union[ + Optional[UnpoolingParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + unpooling_indices: Union[ + Optional[Sequence[int]], DefaultFromLibrary + ] = DefaultFromLibrary.YES + act: Union[ + Optional[ActivationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + output_act: Union[ + Optional[ActivationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + norm: Union[ + Optional[ConvNormalizationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + dropout: Union[Optional[PositiveFloat], DefaultFromLibrary] = DefaultFromLibrary.YES + bias: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES + adn_ordering: Union[str, DefaultFromLibrary] = DefaultFromLibrary.YES + + # pydantic config + model_config = ConfigDict( + validate_assignment=True, + use_enum_values=True, + validate_default=True, + ) + + +class ConvDecoderConfig(NetworkConfig, ConvDecoderOptions): + """Config class for ConvDecoder.""" + + spatial_dims: PositiveInt + in_channels: PositiveInt + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.CONV_DECODER diff --git a/clinicadl/monai_networks/config/conv_encoder.py b/clinicadl/monai_networks/config/conv_encoder.py new file mode 100644 index 000000000..499f69b19 --- /dev/null +++ b/clinicadl/monai_networks/config/conv_encoder.py @@ -0,0 +1,64 @@ +from typing import Optional, Sequence, Union + +from pydantic import BaseModel, ConfigDict, PositiveFloat, PositiveInt, computed_field + +from clinicadl.monai_networks.nn.layers.utils import ( + ActivationParameters, + ConvNormalizationParameters, + ConvParameters, + PoolingParameters, +) +from clinicadl.utils.factories import DefaultFromLibrary + +from .base import ImplementedNetworks, NetworkConfig + + +class ConvEncoderOptions(BaseModel): + """ + Config class for ConvEncoder when it is a submodule. + See for example: :py:class:`clinicadl.monai_networks.nn.cnn.CNN` + """ + + channels: Sequence[PositiveInt] + kernel_size: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + stride: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + padding: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + dilation: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + pooling: Union[ + Optional[PoolingParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + pooling_indices: Union[ + Optional[Sequence[int]], DefaultFromLibrary + ] = DefaultFromLibrary.YES + act: Union[ + Optional[ActivationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + output_act: Union[ + Optional[ActivationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + norm: Union[ + Optional[ConvNormalizationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + dropout: Union[Optional[PositiveFloat], DefaultFromLibrary] = DefaultFromLibrary.YES + bias: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES + adn_ordering: Union[str, DefaultFromLibrary] = DefaultFromLibrary.YES + + # pydantic config + model_config = ConfigDict( + validate_assignment=True, + use_enum_values=True, + validate_default=True, + ) + + +class ConvEncoderConfig(NetworkConfig, ConvEncoderOptions): + """Config class for ConvEncoder.""" + + spatial_dims: PositiveInt + in_channels: PositiveInt + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.CONV_ENCODER diff --git a/clinicadl/monai_networks/config/densenet.py b/clinicadl/monai_networks/config/densenet.py index 796d82203..4984d010b 100644 --- a/clinicadl/monai_networks/config/densenet.py +++ b/clinicadl/monai_networks/config/densenet.py @@ -1,20 +1,11 @@ -from __future__ import annotations +from typing import Optional, Sequence, Union -from typing import Tuple, Union - -from pydantic import ( - NonNegativeFloat, - PositiveInt, - computed_field, - field_validator, -) +from pydantic import PositiveFloat, PositiveInt, computed_field +from clinicadl.monai_networks.nn.layers.utils import ActivationParameters from clinicadl.utils.factories import DefaultFromLibrary -from .base import NetworkConfig -from .utils.enum import ImplementedNetworks - -__all__ = ["DenseNetConfig"] +from .base import ImplementedNetworks, NetworkConfig, NetworkType, PreTrainedConfig class DenseNetConfig(NetworkConfig): @@ -22,29 +13,71 @@ class DenseNetConfig(NetworkConfig): spatial_dims: PositiveInt in_channels: PositiveInt - out_channels: PositiveInt + num_outputs: Optional[PositiveInt] + n_dense_layers: Union[ + Sequence[PositiveInt], DefaultFromLibrary + ] = DefaultFromLibrary.YES init_features: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES growth_rate: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES - block_config: Union[ - Tuple[PositiveInt, ...], DefaultFromLibrary + bottleneck_factor: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES + act: Union[ActivationParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + output_act: Union[ + Optional[ActivationParameters], DefaultFromLibrary ] = DefaultFromLibrary.YES - bn_size: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES - dropout_prob: Union[NonNegativeFloat, DefaultFromLibrary] = DefaultFromLibrary.YES + dropout: Union[Optional[PositiveFloat], DefaultFromLibrary] = DefaultFromLibrary.YES @computed_field @property - def network(self) -> ImplementedNetworks: + def name(self) -> ImplementedNetworks: """The name of the network.""" - return ImplementedNetworks.DENSE_NET + return ImplementedNetworks.DENSENET + + +class PreTrainedDenseNetConfig(PreTrainedConfig): + """Base config class for SOTA DenseNets.""" @computed_field @property - def dim(self) -> int: - """Dimension of the images.""" - return self.spatial_dims - - @field_validator("dropout_prob") - @classmethod - def validator_dropout(cls, v): - """Checks that dropout is between 0 and 1.""" - return cls.base_validator_dropout(v) + def _type(self) -> NetworkType: + """To know where to look for the network.""" + return NetworkType.DENSENET + + +class DenseNet121Config(PreTrainedDenseNetConfig): + """Config class for DenseNet-121.""" + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.DENSENET_121 + + +class DenseNet161Config(PreTrainedDenseNetConfig): + """Config class for DenseNet-161.""" + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.DENSENET_161 + + +class DenseNet169Config(PreTrainedDenseNetConfig): + """Config class for DenseNet-169.""" + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.DENSENET_169 + + +class DenseNet201Config(PreTrainedDenseNetConfig): + """Config class for DenseNet-201.""" + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.DENSENET_201 diff --git a/clinicadl/monai_networks/config/factory.py b/clinicadl/monai_networks/config/factory.py index 55e0fad39..2b7e5bdc1 100644 --- a/clinicadl/monai_networks/config/factory.py +++ b/clinicadl/monai_networks/config/factory.py @@ -1,16 +1,36 @@ from typing import Type, Union -from .autoencoder import * -from .base import NetworkConfig -from .classifier import * -from .densenet import * -from .fcn import * -from .generator import * -from .regressor import * -from .resnet import * -from .unet import * -from .utils.enum import ImplementedNetworks -from .vit import * +# pylint: disable=unused-import +from .autoencoder import AutoEncoderConfig, VAEConfig +from .base import ImplementedNetworks, NetworkConfig +from .cnn import CNNConfig +from .conv_decoder import ConvDecoderConfig +from .conv_encoder import ConvEncoderConfig +from .densenet import ( + DenseNet121Config, + DenseNet161Config, + DenseNet169Config, + DenseNet201Config, + DenseNetConfig, +) +from .generator import GeneratorConfig +from .mlp import MLPConfig +from .resnet import ( + ResNet18Config, + ResNet34Config, + ResNet50Config, + ResNet101Config, + ResNet152Config, + ResNetConfig, +) +from .senet import ( + SEResNet50Config, + SEResNet101Config, + SEResNet152Config, + SEResNetConfig, +) +from .unet import AttentionUNetConfig, UNetConfig +from .vit import ViTB16Config, ViTB32Config, ViTConfig, ViTL16Config, ViTL32Config def create_network_config( @@ -29,7 +49,7 @@ def create_network_config( Type[NetworkConfig] The config class. """ - network = ImplementedNetworks(network) + network = ImplementedNetworks(network).value.replace("-", "").replace("/", "") config_name = "".join([network, "Config"]) config = globals()[config_name] diff --git a/clinicadl/monai_networks/config/fcn.py b/clinicadl/monai_networks/config/fcn.py deleted file mode 100644 index 3bb23d6cb..000000000 --- a/clinicadl/monai_networks/config/fcn.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import annotations - -from typing import Optional, Tuple, Union - -from pydantic import ( - NonNegativeFloat, - PositiveInt, - computed_field, - field_validator, -) - -from clinicadl.utils.factories import DefaultFromLibrary - -from .base import NetworkConfig -from .utils.enum import ImplementedNetworks - -__all__ = ["FullyConnectedNetConfig", "VarFullyConnectedNetConfig"] - - -class FullyConnectedNetConfig(NetworkConfig): - """Config class for fully connected networks.""" - - in_channels: PositiveInt - out_channels: PositiveInt - hidden_channels: Tuple[PositiveInt, ...] - - dropout: Union[ - Optional[NonNegativeFloat], DefaultFromLibrary - ] = DefaultFromLibrary.YES - - @computed_field - @property - def network(self) -> ImplementedNetworks: - """The name of the network.""" - return ImplementedNetworks.FCN - - @computed_field - @property - def dim(self) -> Optional[int]: - """Dimension of the images.""" - return None - - @field_validator("dropout") - @classmethod - def validator_dropout(cls, v): - """Checks that dropout is between 0 and 1.""" - return cls.base_validator_dropout(v) - - -class VarFullyConnectedNetConfig(NetworkConfig): - """Config class for fully connected networks.""" - - in_channels: PositiveInt - out_channels: PositiveInt - latent_size: PositiveInt - encode_channels: Tuple[PositiveInt, ...] - decode_channels: Tuple[PositiveInt, ...] - - dropout: Union[ - Optional[NonNegativeFloat], DefaultFromLibrary - ] = DefaultFromLibrary.YES - - @computed_field - @property - def network(self) -> ImplementedNetworks: - """The name of the network.""" - return ImplementedNetworks.VAR_FCN - - @computed_field - @property - def dim(self) -> Optional[int]: - """Dimension of the images.""" - return None - - @field_validator("dropout") - @classmethod - def validator_dropout(cls, v): - """Checks that dropout is between 0 and 1.""" - return cls.base_validator_dropout(v) diff --git a/clinicadl/monai_networks/config/generator.py b/clinicadl/monai_networks/config/generator.py index b864d371d..6c7836474 100644 --- a/clinicadl/monai_networks/config/generator.py +++ b/clinicadl/monai_networks/config/generator.py @@ -1,38 +1,24 @@ -from __future__ import annotations +from typing import Optional, Sequence, Union -from typing import Tuple +from pydantic import PositiveInt, computed_field -from pydantic import ( - PositiveInt, - computed_field, - field_validator, -) +from clinicadl.utils.factories import DefaultFromLibrary -from .base import VaryingDepthNetworkConfig -from .utils.enum import ImplementedNetworks +from .base import ImplementedNetworks, NetworkConfig +from .conv_decoder import ConvDecoderOptions +from .mlp import MLPOptions -__all__ = ["GeneratorConfig"] +class GeneratorConfig(NetworkConfig): + """Config class for Generator.""" -class GeneratorConfig(VaryingDepthNetworkConfig): - """Config class for generators.""" - - latent_shape: Tuple[PositiveInt, ...] - start_shape: Tuple[PositiveInt, ...] + latent_size: PositiveInt + start_shape: Sequence[PositiveInt] + conv_args: ConvDecoderOptions + mlp_args: Union[Optional[MLPOptions], DefaultFromLibrary] = DefaultFromLibrary.YES @computed_field @property - def network(self) -> ImplementedNetworks: + def name(self) -> ImplementedNetworks: """The name of the network.""" return ImplementedNetworks.GENERATOR - - @computed_field - @property - def dim(self) -> int: - """Dimension of the images.""" - return len(self.start_shape[1:]) - - @field_validator("start_shape") - def at_least_2d(cls, v, field): - """Checks that a tuple has at least a length of two.""" - return cls.base_at_least_2d(v, field) diff --git a/clinicadl/monai_networks/config/mlp.py b/clinicadl/monai_networks/config/mlp.py new file mode 100644 index 000000000..5d12f303f --- /dev/null +++ b/clinicadl/monai_networks/config/mlp.py @@ -0,0 +1,52 @@ +from typing import Optional, Sequence, Union + +from pydantic import BaseModel, ConfigDict, PositiveFloat, PositiveInt, computed_field + +from clinicadl.monai_networks.nn.layers.utils import ( + ActivationParameters, + NormalizationParameters, +) +from clinicadl.utils.factories import DefaultFromLibrary + +from .base import ImplementedNetworks, NetworkConfig + + +class MLPOptions(BaseModel): + """ + Config class for MLP when it is a submodule. + See for example: :py:class:`clinicadl.monai_networks.nn.cnn.CNN` + """ + + hidden_channels: Sequence[PositiveInt] + act: Union[ + Optional[ActivationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + output_act: Union[ + Optional[ActivationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + norm: Union[ + Optional[NormalizationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + dropout: Union[Optional[PositiveFloat], DefaultFromLibrary] = DefaultFromLibrary.YES + bias: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES + adn_ordering: Union[str, DefaultFromLibrary] = DefaultFromLibrary.YES + + # pydantic config + model_config = ConfigDict( + validate_assignment=True, + use_enum_values=True, + validate_default=True, + ) + + +class MLPConfig(NetworkConfig, MLPOptions): + """Config class for Multi Layer Perceptron.""" + + in_channels: PositiveInt + out_channels: PositiveInt + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.MLP diff --git a/clinicadl/monai_networks/config/regressor.py b/clinicadl/monai_networks/config/regressor.py deleted file mode 100644 index 5410e31fa..000000000 --- a/clinicadl/monai_networks/config/regressor.py +++ /dev/null @@ -1,34 +0,0 @@ -from __future__ import annotations - -from typing import Tuple - -from pydantic import PositiveInt, computed_field, field_validator - -from .base import VaryingDepthNetworkConfig -from .utils.enum import ImplementedNetworks - -__all__ = ["RegressorConfig"] - - -class RegressorConfig(VaryingDepthNetworkConfig): - """Config class for regressors.""" - - in_shape: Tuple[PositiveInt, ...] - out_shape: Tuple[PositiveInt, ...] - - @computed_field - @property - def network(self) -> ImplementedNetworks: - """The name of the network.""" - return ImplementedNetworks.REGRESSOR - - @computed_field - @property - def dim(self) -> int: - """Dimension of the images.""" - return len(self.in_shape[1:]) - - @field_validator("in_shape") - def at_least_2d(cls, v, ctx): - """Checks that a tuple has at least a length of two.""" - return cls.base_at_least_2d(v, ctx) diff --git a/clinicadl/monai_networks/config/resnet.py b/clinicadl/monai_networks/config/resnet.py index 96bb6e193..0f3141dd8 100644 --- a/clinicadl/monai_networks/config/resnet.py +++ b/clinicadl/monai_networks/config/resnet.py @@ -1,148 +1,103 @@ -from __future__ import annotations +from typing import Optional, Sequence, Union -from enum import Enum -from typing import Optional, Tuple, Union - -from pydantic import ( - NonNegativeFloat, - PositiveFloat, - PositiveInt, - computed_field, - field_validator, - model_validator, -) +from pydantic import PositiveInt, computed_field +from clinicadl.monai_networks.nn.layers.utils import ActivationParameters +from clinicadl.monai_networks.nn.resnet import ResNetBlockType from clinicadl.utils.factories import DefaultFromLibrary -from .base import NetworkConfig -from .utils.enum import ( - ImplementedNetworks, - ResNetBlocks, - ResNets, - ShortcutTypes, - UpsampleModes, -) - -__all__ = ["ResNetConfig", "ResNetFeaturesConfig", "SegResNetConfig"] +from .base import ImplementedNetworks, NetworkConfig, NetworkType, PreTrainedConfig class ResNetConfig(NetworkConfig): """Config class for ResNet.""" - block: ResNetBlocks - layers: Tuple[PositiveInt, PositiveInt, PositiveInt, PositiveInt] - block_inplanes: Tuple[PositiveInt, PositiveInt, PositiveInt, PositiveInt] - - spatial_dims: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES - n_input_channels: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES - conv1_t_size: Union[ - PositiveInt, Tuple[PositiveInt, ...], DefaultFromLibrary + spatial_dims: PositiveInt + in_channels: PositiveInt + num_outputs: Optional[PositiveInt] + block_type: Union[str, ResNetBlockType, DefaultFromLibrary] = DefaultFromLibrary.YES + n_res_blocks: Union[ + Sequence[PositiveInt], DefaultFromLibrary + ] = DefaultFromLibrary.YES + n_features: Union[ + Sequence[PositiveInt], DefaultFromLibrary + ] = DefaultFromLibrary.YES + init_conv_size: Union[ + Sequence[PositiveInt], PositiveInt, DefaultFromLibrary + ] = DefaultFromLibrary.YES + init_conv_stride: Union[ + Sequence[PositiveInt], PositiveInt, DefaultFromLibrary ] = DefaultFromLibrary.YES - conv1_t_stride: Union[ - PositiveInt, Tuple[PositiveInt, ...], DefaultFromLibrary + bottleneck_reduction: Union[ + PositiveInt, DefaultFromLibrary + ] = DefaultFromLibrary.YES + act: Union[ActivationParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + output_act: Union[ + Optional[ActivationParameters], DefaultFromLibrary ] = DefaultFromLibrary.YES - no_max_pool: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES - shortcut_type: Union[ShortcutTypes, DefaultFromLibrary] = DefaultFromLibrary.YES - widen_factor: Union[PositiveFloat, DefaultFromLibrary] = DefaultFromLibrary.YES - num_classes: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES - feed_forward: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES - bias_downsample: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES @computed_field @property - def network(self) -> ImplementedNetworks: + def name(self) -> ImplementedNetworks: """The name of the network.""" - return ImplementedNetworks.RES_NET + return ImplementedNetworks.RESNET + + +class PreTrainedResNetConfig(PreTrainedConfig): + """Base config class for SOTA ResNets.""" @computed_field @property - def dim(self) -> int: - """Dimension of the images.""" - return self.spatial_dims if self.spatial_dims != DefaultFromLibrary.YES else 3 + def _type(self) -> NetworkType: + """To know where to look for the network.""" + return NetworkType.RESNET - @model_validator(mode="after") - def model_validator(self): - """Checks coherence between parameters.""" - if self.conv1_t_size != DefaultFromLibrary.YES: - assert self._check_dimensions( - self.conv1_t_size - ), f"You must passed an int or a sequence of {self.dim} ints (the dimensionality of your images) for conv1_t_size. You passed {self.conv1_t_size}." - if self.conv1_t_stride != DefaultFromLibrary.YES: - assert self._check_dimensions( - self.conv1_t_stride - ), f"You must passed an int or a sequence of {self.dim} ints (the dimensionality of your images) for conv1_t_stride. You passed {self.conv1_t_stride}." - return self +class ResNet18Config(PreTrainedResNetConfig): + """Config class for ResNet-18.""" + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.RESNET_18 -class ResNetFeaturesConfig(NetworkConfig): - """Config class for ResNet backbones.""" - - model_name: ResNets - pretrained: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES - spatial_dims: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES - in_channels: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES +class ResNet34Config(PreTrainedResNetConfig): + """Config class for ResNet-34.""" @computed_field @property - def network(self) -> ImplementedNetworks: + def name(self) -> ImplementedNetworks: """The name of the network.""" - return ImplementedNetworks.RES_NET_FEATURES + return ImplementedNetworks.RESNET_34 + + +class ResNet50Config(PreTrainedResNetConfig): + """Config class for ResNet-50.""" @computed_field @property - def dim(self) -> int: - """Dimension of the images.""" - return self.spatial_dims if self.spatial_dims != DefaultFromLibrary.YES else 3 - - @model_validator(mode="after") - def model_validator(self): - """Checks coherence between parameters.""" - if self.pretrained == DefaultFromLibrary.YES or self.pretrained: - assert ( - self.spatial_dims == DefaultFromLibrary.YES or self.spatial_dims == 3 - ), "Pretrained weights are only available with spatial_dims=3. Otherwise, set pretrained to False." - assert ( - self.in_channels == DefaultFromLibrary.YES or self.in_channels == 1 - ), "Pretrained weights are only available with in_channels=1. Otherwise, set pretrained to False." - - return self - - -class SegResNetConfig(NetworkConfig): - """Config class for SegResNet.""" - - spatial_dims: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES - init_filters: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES - in_channels: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES - out_channels: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES - dropout_prob: Union[ - Optional[NonNegativeFloat], DefaultFromLibrary - ] = DefaultFromLibrary.YES - use_conv_final: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES - blocks_down: Union[ - Tuple[PositiveInt, ...], DefaultFromLibrary - ] = DefaultFromLibrary.YES - blocks_up: Union[ - Tuple[PositiveInt, ...], DefaultFromLibrary - ] = DefaultFromLibrary.YES - upsample_mode: Union[UpsampleModes, DefaultFromLibrary] = DefaultFromLibrary.YES + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.RESNET_50 + + +class ResNet101Config(PreTrainedResNetConfig): + """Config class for ResNet-101.""" @computed_field @property - def network(self) -> ImplementedNetworks: + def name(self) -> ImplementedNetworks: """The name of the network.""" - return ImplementedNetworks.SEG_RES_NET + return ImplementedNetworks.RESNET_101 + + +class ResNet152Config(PreTrainedResNetConfig): + """Config class for ResNet-152.""" @computed_field @property - def dim(self) -> int: - """Dimension of the images.""" - return self.spatial_dims if self.spatial_dims != DefaultFromLibrary.YES else 3 - - @field_validator("dropout_prob") - @classmethod - def validator_dropout(cls, v): - """Checks that dropout is between 0 and 1.""" - return cls.base_validator_dropout(v) + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.RESNET_152 diff --git a/clinicadl/monai_networks/config/senet.py b/clinicadl/monai_networks/config/senet.py new file mode 100644 index 000000000..79a356726 --- /dev/null +++ b/clinicadl/monai_networks/config/senet.py @@ -0,0 +1,60 @@ +from typing import Union + +from pydantic import PositiveInt, computed_field + +from clinicadl.utils.factories import DefaultFromLibrary + +from .base import ImplementedNetworks, NetworkType, PreTrainedConfig +from .resnet import ResNetConfig + + +class SEResNetConfig(ResNetConfig): + """Config class for Squeeze-and-Excitation ResNet.""" + + se_reduction: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.SE_RESNET + + +class PreTrainedSEResNetConfig(PreTrainedConfig): + """Base config class for SOTA SE-ResNets.""" + + @computed_field + @property + def _type(self) -> NetworkType: + """To know where to look for the network.""" + return NetworkType.SE_RESNET + + +class SEResNet50Config(PreTrainedSEResNetConfig): + """Config class for SE-ResNet-50.""" + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.SE_RESNET_50 + + +class SEResNet101Config(PreTrainedSEResNetConfig): + """Config class for SE-ResNet-101.""" + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.SE_RESNET_101 + + +class SEResNet152Config(PreTrainedSEResNetConfig): + """Config class for SE-ResNet-152.""" + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.SE_RESNET_152 diff --git a/clinicadl/monai_networks/config/unet.py b/clinicadl/monai_networks/config/unet.py index e7fd3498b..abd87817e 100644 --- a/clinicadl/monai_networks/config/unet.py +++ b/clinicadl/monai_networks/config/unet.py @@ -1,64 +1,38 @@ -from __future__ import annotations +from typing import Optional, Sequence, Union -from typing import Union - -from pydantic import ( - PositiveInt, - computed_field, - model_validator, -) +from pydantic import PositiveFloat, PositiveInt, computed_field +from clinicadl.monai_networks.nn.layers.utils import ActivationParameters from clinicadl.utils.factories import DefaultFromLibrary -from .base import VaryingDepthNetworkConfig -from .utils.enum import ImplementedNetworks - -__all__ = ["UNetConfig", "AttentionUnetConfig"] +from .base import ImplementedNetworks, NetworkConfig -class UNetConfig(VaryingDepthNetworkConfig): +class UNetConfig(NetworkConfig): """Config class for UNet.""" spatial_dims: PositiveInt in_channels: PositiveInt out_channels: PositiveInt - adn_ordering: Union[str, DefaultFromLibrary] = DefaultFromLibrary.YES + channels: Union[Sequence[PositiveInt], DefaultFromLibrary] = DefaultFromLibrary.YES + act: Union[ActivationParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + output_act: Union[ + Optional[ActivationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + dropout: Union[Optional[PositiveFloat], DefaultFromLibrary] = DefaultFromLibrary.YES @computed_field @property - def network(self) -> ImplementedNetworks: + def name(self) -> ImplementedNetworks: """The name of the network.""" return ImplementedNetworks.UNET - @computed_field - @property - def dim(self) -> int: - """Dimension of the images.""" - return self.spatial_dims - - @model_validator(mode="after") - def channels_strides_validator(self): - """Checks coherence between parameters.""" - n_layers = len(self.channels) - assert ( - n_layers >= 2 - ), f"Channels must be at least of length 2. You passed {self.channels}." - assert ( - len(self.strides) == n_layers - 1 - ), f"Length of strides must be equal to len(channels)-1. You passed channels={self.channels} and strides={self.strides}." - for s in self.strides: - assert self._check_dimensions( - s - ), f"You must passed an int or a sequence of {self.dim} ints (the dimensionality of your images) for strides. You passed {s}." - - return self - -class AttentionUnetConfig(UNetConfig): - """Config class for Attention UNet.""" +class AttentionUNetConfig(UNetConfig): + """Config class for AttentionUNet.""" @computed_field @property - def network(self) -> ImplementedNetworks: + def name(self) -> ImplementedNetworks: """The name of the network.""" return ImplementedNetworks.ATT_UNET diff --git a/clinicadl/monai_networks/config/utils/enum.py b/clinicadl/monai_networks/config/utils/enum.py deleted file mode 100644 index 941e34972..000000000 --- a/clinicadl/monai_networks/config/utils/enum.py +++ /dev/null @@ -1,129 +0,0 @@ -from enum import Enum - - -class ImplementedNetworks(str, Enum): - """Implemented neural networks in ClinicaDL.""" - - REGRESSOR = "Regressor" - CLASSIFIER = "Classifier" - DISCRIMINATOR = "Discriminator" - CRITIC = "Critic" - AE = "AutoEncoder" - VAE = "VarAutoEncoder" - DENSE_NET = "DenseNet" - FCN = "FullyConnectedNet" - VAR_FCN = "VarFullyConnectedNet" - GENERATOR = "Generator" - RES_NET = "ResNet" - RES_NET_FEATURES = "ResNetFeatures" - SEG_RES_NET = "SegResNet" - UNET = "UNet" - ATT_UNET = "AttentionUnet" - VIT = "ViT" - VIT_AE = "ViTAutoEnc" - - @classmethod - def _missing_(cls, value): - raise ValueError( - f"{value} is not implemented. Implemented neural networks are: " - + ", ".join([repr(m.value) for m in cls]) - ) - - -class CaseInsensitiveEnum(str, Enum): - @classmethod - def _missing_(cls, value): - if isinstance(value, str): - value = value.lower() - for member in cls: - if member.lower() == value: - return member - return None - - -class ImplementedActFunctions(CaseInsensitiveEnum): - """Supported activation functions in ClinicaDL.""" - - ELU = "elu" - RELU = "relu" - LEAKY_RELU = "leakyrelu" - PRELU = "prelu" - RELU6 = "relu6" - SELU = "selu" - CELU = "celu" - GELU = "gelu" - SIGMOID = "sigmoid" - TANH = "tanh" - SOFTMAX = "softmax" - LOGSOFTMAX = "logsoftmax" - SWISH = "swish" - MEMSWISH = "memswish" - MISH = "mish" - GEGLU = "geglu" - - -class ImplementedNormLayers(CaseInsensitiveEnum): - """Supported normalization layers in ClinicaDL.""" - - GROUP = "group" - LAYER = "layer" - LOCAL_RESPONSE = "localresponse" - SYNCBATCH = "syncbatch" - INSTANCE_NVFUSER = "instance_nvfuser" - BATCH = "batch" - INSTANCE = "instance" - - -class ResNetBlocks(str, Enum): - """Supported ResNet blocks.""" - - BASIC = "basic" - BOTTLENECK = "bottleneck" - - -class ShortcutTypes(str, Enum): - """Supported shortcut types for ResNets.""" - - A = "A" - B = "B" - - -class ResNets(str, Enum): - """Supported ResNet networks.""" - - RESNET_10 = "resnet10" - RESNET_18 = "resnet18" - RESNET_34 = "resnet34" - RESNET_50 = "resnet50" - RESNET_101 = "resnet101" - RESNET_152 = "resnet152" - RESNET_200 = "resnet200" - - -class UpsampleModes(str, Enum): - """Supported upsampling modes for ResNets.""" - - DECONV = "deconv" - NON_TRAINABLE = "nontrainable" - PIXEL_SHUFFLE = "pixelshuffle" - - -class PatchEmbeddingTypes(str, Enum): - """Supported patch embedding types for VITs.""" - - CONV = "conv" - PERCEPTRON = "perceptron" - - -class PosEmbeddingTypes(str, Enum): - """Supported positional embedding types for VITs.""" - - NONE = "none" - LEARNABLE = "learnable" - SINCOS = "sincos" - - -class ClassificationActivation(str, Enum): - """Supported activation layer for classification in ViT.""" - - TANH = "Tanh" diff --git a/clinicadl/monai_networks/config/vit.py b/clinicadl/monai_networks/config/vit.py index 206d0d881..5059df790 100644 --- a/clinicadl/monai_networks/config/vit.py +++ b/clinicadl/monai_networks/config/vit.py @@ -1,154 +1,84 @@ -from enum import Enum -from typing import Optional, Tuple, Union +from typing import Optional, Sequence, Union -from pydantic import ( - NonNegativeFloat, - PositiveInt, - computed_field, - field_validator, - model_validator, -) +from pydantic import PositiveFloat, PositiveInt, computed_field +from clinicadl.monai_networks.nn.layers.utils import ActivationParameters +from clinicadl.monai_networks.nn.vit import PosEmbedType from clinicadl.utils.factories import DefaultFromLibrary -from .base import NetworkConfig -from .utils.enum import ( - ClassificationActivation, - ImplementedNetworks, - PatchEmbeddingTypes, - PosEmbeddingTypes, -) - -__all__ = ["ViTConfig", "ViTAutoEncConfig"] +from .base import ImplementedNetworks, NetworkConfig, NetworkType, PreTrainedConfig class ViTConfig(NetworkConfig): """Config class for ViT networks.""" - in_channels: PositiveInt - img_size: Union[PositiveInt, Tuple[PositiveInt, ...]] - patch_size: Union[PositiveInt, Tuple[PositiveInt, ...]] - - hidden_size: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES - mlp_dim: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES + in_shape: Sequence[PositiveInt] + patch_size: Union[Sequence[PositiveInt], PositiveInt] + num_outputs: Optional[PositiveInt] + embedding_dim: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES num_layers: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES num_heads: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES - proj_type: Union[PatchEmbeddingTypes, DefaultFromLibrary] = DefaultFromLibrary.YES + mlp_dim: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES pos_embed_type: Union[ - PosEmbeddingTypes, DefaultFromLibrary + Optional[Union[str, PosEmbedType]], DefaultFromLibrary ] = DefaultFromLibrary.YES - classification: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES - num_classes: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES - dropout_rate: Union[NonNegativeFloat, DefaultFromLibrary] = DefaultFromLibrary.YES - spatial_dims: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES - post_activation: Union[ - Optional[ClassificationActivation], DefaultFromLibrary + output_act: Union[ + Optional[ActivationParameters], DefaultFromLibrary ] = DefaultFromLibrary.YES - qkv_bias: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES - save_attn: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES + dropout: Union[Optional[PositiveFloat], DefaultFromLibrary] = DefaultFromLibrary.YES @computed_field @property - def network(self) -> ImplementedNetworks: + def name(self) -> ImplementedNetworks: """The name of the network.""" return ImplementedNetworks.VIT + +class PreTrainedViTConfig(PreTrainedConfig): + """Base config class for SOTA ResNets.""" + + @computed_field + @property + def _type(self) -> NetworkType: + """To know where to look for the network.""" + return NetworkType.VIT + + +class ViTB16Config(PreTrainedViTConfig): + """Config class for ViT-B/16.""" + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.VIT_B_16 + + +class ViTB32Config(PreTrainedViTConfig): + """Config class for ViT-B/32.""" + @computed_field @property - def dim(self) -> int: - """Dimension of the images.""" - return self.spatial_dims if self.spatial_dims != DefaultFromLibrary.YES else 3 - - @field_validator("dropout_rate") - def validator_dropout(cls, v): - """Checks that dropout is between 0 and 1.""" - return cls.base_validator_dropout(v) - - @model_validator(mode="before") - def check_einops(self): - """Checks if the library einops is installed.""" - from importlib import util - - spec = util.find_spec("einops") - if spec is None: - raise ModuleNotFoundError("einops is not installed") - return self - - @model_validator(mode="after") - def model_validator(self): - """Checks coherence between parameters.""" - assert self._check_dimensions( - self.img_size - ), f"You must passed an int or a sequence of {self.dim} ints (the dimensionality of your images) for img_size. You passed {self.img_size}." - assert self._check_dimensions( - self.patch_size - ), f"You must passed an int or a sequence of {self.dim} ints (the dimensionality of your images) for patch_size. You passed {self.patch_size}." - - if ( - self.hidden_size != DefaultFromLibrary.YES - and self.num_heads != DefaultFromLibrary.YES - ): - assert self._divide( - self.hidden_size, self.num_heads - ), f"hidden_size must be divisible by num_heads. You passed hidden_size={self.hidden_size} and num_heads={self.num_heads}." - elif ( - self.hidden_size != DefaultFromLibrary.YES - and self.num_heads == DefaultFromLibrary.YES - ): - raise ValueError("If you pass hidden_size, please also pass num_heads.") - elif ( - self.hidden_size == DefaultFromLibrary.YES - and self.num_heads != DefaultFromLibrary.YES - ): - raise ValueError("If you pass num_head, please also pass hidden_size.") - - return self - - def _divide( - self, - numerator: Union[int, Tuple[int, ...]], - denominator: Union[int, Tuple[int, ...]], - ) -> bool: - print(self.dim) - """Checks if numerator is divisible by denominator.""" - if isinstance(numerator, int): - numerator = (numerator,) * self.dim - if isinstance(denominator, int): - denominator = (denominator,) * self.dim - for n, d in zip(numerator, denominator): - if n % d != 0: - return False - return True - - -class ViTAutoEncConfig(ViTConfig): - """Config class for ViT autoencoders.""" - - out_channels: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES - deconv_chns: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.VIT_B_32 + + +class ViTL16Config(PreTrainedViTConfig): + """Config class for ViT-L/16.""" + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.VIT_L_16 + + +class ViTL32Config(PreTrainedViTConfig): + """Config class for ViT-L/32.""" @computed_field @property - def network(self) -> ImplementedNetworks: + def name(self) -> ImplementedNetworks: """The name of the network.""" - return ImplementedNetworks.VIT_AE - - @model_validator(mode="after") - def model_validator_bis(self): - """Checks coherence between parameters.""" - assert self._divide( - self.img_size, self.patch_size - ), f"img_size must be divisible by patch_size. You passed hidden_size={self.img_size} and num_heads={self.patch_size}." - assert self._is_sqrt( - self.patch_size - ), f"patch_size must be square number(s). You passed {self.patch_size}." - - return self - - def _is_sqrt(self, value: Union[int, Tuple[int, ...]]) -> bool: - """Checks if value is a square number.""" - import math - - if isinstance(value, int): - value = (value,) * self.dim - return all([int(math.sqrt(v)) == math.sqrt(v) for v in value]) + return ImplementedNetworks.VIT_L_32 diff --git a/clinicadl/monai_networks/factory.py b/clinicadl/monai_networks/factory.py index 1e509f3d1..36a4d1d46 100644 --- a/clinicadl/monai_networks/factory.py +++ b/clinicadl/monai_networks/factory.py @@ -1,38 +1,123 @@ -from typing import Tuple +from copy import deepcopy +from typing import Any, Callable, Tuple, Union -import monai.networks.nets as networks import torch.nn as nn +from pydantic import BaseModel +import clinicadl.monai_networks.nn as nets from clinicadl.utils.factories import DefaultFromLibrary, get_args_and_defaults -from .config.base import NetworkConfig +from .config import ( + ImplementedNetworks, + NetworkConfig, + NetworkType, + create_network_config, +) +from .config.conv_decoder import ConvDecoderOptions +from .config.conv_encoder import ConvEncoderOptions +from .config.mlp import MLPOptions +from .nn import MLP, ConvDecoder, ConvEncoder -def get_network(config: NetworkConfig) -> Tuple[nn.Module, NetworkConfig]: +def get_network( + name: Union[str, ImplementedNetworks], return_config: bool = False, **kwargs: Any +) -> Union[nn.Module, Tuple[nn.Module, NetworkConfig]]: """ - Factory function to get a Neural Network from MONAI. + Factory function to get a neural network from its name and parameters. + + Parameters + ---------- + name : Union[str, ImplementedNetworks] + the name of the neural network. Check our documentation to know + available networks. + return_config : bool (optional, default=False) + if the function should return the config class regrouping the parameters of the + neural network. Useful to keep track of the hyperparameters. + kwargs : Any + the parameters of the neural network. Check our documentation on networks to + know these parameters. + + Returns + ------- + nnn.Module + the neural network. + NetworkConfig + the associated config class. Only returned if `return_config` is True. + """ + config = create_network_config(name)(**kwargs) + network, updated_config = get_network_from_config(config) + + return network if not return_config else (network, updated_config) + + +def get_network_from_config(config: NetworkConfig) -> Tuple[nn.Module, NetworkConfig]: + """ + Factory function to get a neural network from a NetworkConfig instance. Parameters ---------- config : NetworkConfig - The config class with the parameters of the network. + the configuration object. Returns ------- nn.Module - The neural network. + the neural network. NetworkConfig - The updated config class: the arguments set to default will be updated - with their effective values (the default values from the library). + the updated config class: the arguments set to default will be updated + with their effective values (the default values from the network). Useful for reproducibility. """ - network_class = getattr(networks, config.network) - expected_args, config_dict = get_args_and_defaults(network_class.__init__) - for arg, value in config.model_dump().items(): - if arg in expected_args and value != DefaultFromLibrary.YES: - config_dict[arg] = value + config = deepcopy(config) + network_type = config._type # pylint: disable=protected-access + + if network_type == NetworkType.CUSTOM: + network_class: type[nn.Module] = getattr(nets, config.name) + if config.name == ImplementedNetworks.SE_RESNET: + _update_config_with_defaults( + config, getattr(nets, ImplementedNetworks.RESNET.value).__init__ + ) # SEResNet has some default values in ResNet + elif config.name == ImplementedNetworks.ATT_UNET: + _update_config_with_defaults( + config, getattr(nets, ImplementedNetworks.UNET.value).__init__ + ) + _update_config_with_defaults(config, network_class.__init__) + + config_dict = config.model_dump(exclude={"name", "_type"}) + network = network_class(**config_dict) + + else: # sota networks + if network_type == NetworkType.RESNET: + getter: Callable[..., nn.Module] = nets.get_resnet + elif network_type == NetworkType.DENSENET: + getter: Callable[..., nn.Module] = nets.get_densenet + elif network_type == NetworkType.SE_RESNET: + getter: Callable[..., nn.Module] = nets.get_seresnet + elif network_type == NetworkType.VIT: + getter: Callable[..., nn.Module] = nets.get_vit + _update_config_with_defaults(config, getter) # pylint: disable=possibly-used-before-assignment - network = network_class(**config_dict) - updated_config = config.model_copy(update=config_dict) + config_dict = config.model_dump(exclude={"_type"}) + network = getter(**config_dict) + + return network, config + + +def _update_config_with_defaults(config: BaseModel, function: Callable) -> BaseModel: + """ + Updates a config object by setting the parameters left to 'default' to their actual + default values, extracted from 'function'. + """ + _, defaults = get_args_and_defaults(function) - return network, updated_config + for arg, value in config: + if isinstance(value, MLPOptions): + _update_config_with_defaults( + value, MLP.__init__ + ) # we need to update the sub config object + elif isinstance(value, ConvEncoderOptions): + _update_config_with_defaults(value, ConvEncoder.__init__) + elif isinstance(value, ConvDecoderOptions): + _update_config_with_defaults(value, ConvDecoder.__init__) + elif value == DefaultFromLibrary.YES and arg in defaults: + setattr(config, arg, defaults[arg]) diff --git a/clinicadl/monai_networks/nn/__init__.py b/clinicadl/monai_networks/nn/__init__.py new file mode 100644 index 000000000..0e1c7054a --- /dev/null +++ b/clinicadl/monai_networks/nn/__init__.py @@ -0,0 +1,13 @@ +from .att_unet import AttentionUNet +from .autoencoder import AutoEncoder +from .cnn import CNN +from .conv_decoder import ConvDecoder +from .conv_encoder import ConvEncoder +from .densenet import DenseNet, get_densenet +from .generator import Generator +from .mlp import MLP +from .resnet import ResNet, get_resnet +from .senet import SEResNet, get_seresnet +from .unet import UNet +from .vae import VAE +from .vit import ViT, get_vit diff --git a/clinicadl/monai_networks/nn/att_unet.py b/clinicadl/monai_networks/nn/att_unet.py new file mode 100644 index 000000000..77ef02081 --- /dev/null +++ b/clinicadl/monai_networks/nn/att_unet.py @@ -0,0 +1,207 @@ +from typing import Any + +import torch +from monai.networks.nets.attentionunet import AttentionBlock + +from .layers.unet import ConvBlock, UpSample +from .unet import BaseUNet + + +class AttentionUNet(BaseUNet): + """ + Attention-UNet based on [Attention U-Net: Learning Where to Look for the Pancreas](https://arxiv.org/pdf/1804.03999). + + The user can customize the number of encoding blocks, the number of channels in each block, as well as other parameters + like the activation function. + + .. warning:: AttentionUNet works only with images whose dimensions are high enough powers of 2. More precisely, if n is the + number of max pooling operation in your AttentionUNet (which is equal to `len(channels)-1`), the image must have :math:`2^{k}` + pixels in each dimension, with :math:`k \\geq n` (e.g. shape (:math:`2^{n}`, :math:`2^{n+3}`) for a 2D image). + + Parameters + ---------- + spatial_dims : int + number of spatial dimensions of the input image. + in_channels : int + number of channels in the input image. + out_channels : int + number of output channels. + kwargs : Any + any optional argument accepted by (:py:class:`clinicadl.monai_networks.nn.unet.UNet`). + + Examples + -------- + >>> AttentionUNet( + spatial_dims=2, + in_channels=1, + out_channels=2, + channels=(4, 8), + act="elu", + output_act=("softmax", {"dim": 1}), + dropout=0.1, + ) + AttentionUNet( + (doubleconv): ConvBlock( + (0): Convolution( + (conv): Conv2d(1, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (1): Convolution( + (conv): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + ) + (down1): DownBlock( + (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) + (doubleconv): ConvBlock( + (0): Convolution( + (conv): Conv2d(4, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (1): Convolution( + (conv): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + ) + ) + (upsample1): UpSample( + (0): Upsample(scale_factor=2.0, mode='nearest') + (1): Convolution( + (conv): Conv2d(8, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + ) + (attention1): AttentionBlock( + (W_g): Sequential( + (0): Convolution( + (conv): Conv2d(4, 2, kernel_size=(1, 1), stride=(1, 1)) + ) + (1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + (W_x): Sequential( + (0): Convolution( + (conv): Conv2d(4, 2, kernel_size=(1, 1), stride=(1, 1)) + ) + (1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + (psi): Sequential( + (0): Convolution( + (conv): Conv2d(2, 1, kernel_size=(1, 1), stride=(1, 1)) + ) + (1): BatchNorm2d(1, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): Sigmoid() + ) + (relu): ReLU() + ) + (doubleconv1): ConvBlock( + (0): Convolution( + (conv): Conv2d(8, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (1): Convolution( + (conv): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + ) + (reduce_channels): Convolution( + (conv): Conv2d(4, 2, kernel_size=(1, 1), stride=(1, 1)) + ) + (output_act): Softmax(dim=1) + ) + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + **kwargs: Any, + ): + super().__init__( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + **kwargs, + ) + + def _build_decoder(self): + for i in range(len(self.channels) - 1, 0, -1): + self.add_module( + f"upsample{i}", + UpSample( + spatial_dims=self.spatial_dims, + in_channels=self.channels[i], + out_channels=self.channels[i - 1], + act=self.act, + dropout=self.dropout, + ), + ) + self.add_module( + f"attention{i}", + AttentionBlock( + spatial_dims=self.spatial_dims, + f_l=self.channels[i - 1], + f_g=self.channels[i - 1], + f_int=self.channels[i - 1] // 2, + dropout=self.dropout, + ), + ) + self.add_module( + f"doubleconv{i}", + ConvBlock( + spatial_dims=self.spatial_dims, + in_channels=self.channels[i - 1] * 2, + out_channels=self.channels[i - 1], + act=self.act, + dropout=self.dropout, + ), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x_history = [self.doubleconv(x)] + + for i in range(1, len(self.channels)): + x = self.get_submodule(f"down{i}")(x_history[-1]) + x_history.append(x) + + x_history.pop() # the output of bottelneck is not used as a gating signal + for i in range(len(self.channels) - 1, 0, -1): + up = self.get_submodule(f"upsample{i}")(x) + att_res = self.get_submodule(f"attention{i}")(g=x_history.pop(), x=up) + merged = torch.cat((att_res, up), dim=1) + x = self.get_submodule(f"doubleconv{i}")(merged) + + out = self.reduce_channels(x) + + if self.output_act is not None: + out = self.output_act(out) + + return out diff --git a/clinicadl/monai_networks/nn/autoencoder.py b/clinicadl/monai_networks/nn/autoencoder.py new file mode 100644 index 000000000..5cf823eeb --- /dev/null +++ b/clinicadl/monai_networks/nn/autoencoder.py @@ -0,0 +1,416 @@ +from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Union + +import numpy as np +import torch.nn as nn + +from .cnn import CNN +from .conv_encoder import ConvEncoder +from .generator import Generator +from .layers.utils import ( + ActivationParameters, + PoolingLayer, + SingleLayerPoolingParameters, + SingleLayerUnpoolingParameters, + UnpoolingLayer, + UnpoolingMode, +) +from .mlp import MLP +from .utils import ( + calculate_conv_out_shape, + calculate_convtranspose_out_shape, + calculate_pool_out_shape, +) + + +class AutoEncoder(nn.Sequential): + """ + An autoencoder with convolutional and fully connected layers. + + The user must pass the arguments to build an encoder, from its convolutional and + fully connected parts, and the decoder will be automatically built by taking the + symmetrical network. + + More precisely, to build the decoder, the order of the encoding layers is reverted, convolutions are + replaced by transposed convolutions and pooling layers are replaced by either upsampling or transposed + convolution layers. + Please note that the order of `Activation`, `Dropout` and `Normalization`, defined with the + argument `adn_ordering` in `conv_args`, is the same for the encoder and the decoder. + + Note that an `AutoEncoder` is an aggregation of a `CNN` (:py:class:`clinicadl.monai_networks.nn. + cnn.CNN`) and a `Generator` (:py:class:`clinicadl.monai_networks.nn.generator.Generator`). + + Parameters + ---------- + in_shape : Sequence[int] + sequence of integers stating the dimension of the input tensor (minus batch dimension). + latent_size : int + size of the latent vector. + conv_args : Dict[str, Any] + the arguments for the convolutional part of the encoder. The arguments are those accepted + by :py:class:`clinicadl.monai_networks.nn.conv_encoder.ConvEncoder`, except `in_shape` that + is specified here. So, the only mandatory argument is `channels`. + mlp_args : Optional[Dict[str, Any]] (optional, default=None) + the arguments for the MLP part of the encoder . The arguments are those accepted by + :py:class:`clinicadl.monai_networks.nn.mlp.MLP`, except `in_channels` that is inferred + from the output of the convolutional part, and `out_channels` that is set to `latent_size`. + So, the only mandatory argument is `hidden_channels`.\n + If None, the MLP part will be reduced to a single linear layer. + out_channels : Optional[int] (optional, default=None) + number of output channels. If None, the output will have the same number of channels as the + input. + output_act : Optional[ActivationParameters] (optional, default=None) + a potential activation layer applied to the output of the network, and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + unpooling_mode : Union[str, UnpoolingMode] (optional, default=UnpoolingMode.NEAREST) + type of unpooling. Can be either `"nearest"`, `"linear"`, `"bilinear"`, `"bicubic"`, `"trilinear"` or + `"convtranspose"`.\n + - `nearest`: unpooling is performed by upsampling with the :italic:`nearest` algorithm (see [PyTorch's Upsample layer] + (https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html)). + - `linear`: unpooling is performed by upsampling with the :italic:`linear` algorithm. Only works with 1D images (excluding the + channel dimension). + - `bilinear`: unpooling is performed by upsampling with the :italic:`bilinear` algorithm. Only works with 2D images. + - `bicubic`: unpooling is performed by upsampling with the :italic:`bicubic` algorithm. Only works with 2D images. + - `trilinear`: unpooling is performed by upsampling with the :italic:`trilinear` algorithm. Only works with 3D images. + - `convtranspose`: unpooling is performed with a transposed convolution, whose parameters (kernel size, stride, etc.) are + computed to reverse the pooling operation. + + Examples + -------- + >>> AutoEncoder( + in_shape=(1, 16, 16), + latent_size=8, + conv_args={ + "channels": [2, 4], + "pooling_indices": [0], + "pooling": ("avg", {"kernel_size": 2}), + }, + mlp_args={"hidden_channels": [32], "output_act": "relu"}, + out_channels=2, + output_act="sigmoid", + unpooling_mode="bilinear", + ) + AutoEncoder( + (encoder): CNN( + (convolutions): ConvEncoder( + (layer0): Convolution( + (conv): Conv2d(1, 2, kernel_size=(3, 3), stride=(1, 1)) + (adn): ADN( + (N): InstanceNorm2d(2, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False) + (A): PReLU(num_parameters=1) + ) + ) + (pool0): AvgPool2d(kernel_size=2, stride=2, padding=0) + (layer1): Convolution( + (conv): Conv2d(2, 4, kernel_size=(3, 3), stride=(1, 1)) + ) + ) + (mlp): MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (hidden0): Sequential( + (linear): Linear(in_features=100, out_features=32, bias=True) + (adn): ADN( + (N): BatchNorm1d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (A): PReLU(num_parameters=1) + ) + ) + (output): Sequential( + (linear): Linear(in_features=32, out_features=8, bias=True) + (output_act): ReLU() + ) + ) + ) + (decoder): Generator( + (mlp): MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (hidden0): Sequential( + (linear): Linear(in_features=8, out_features=32, bias=True) + (adn): ADN( + (N): BatchNorm1d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (A): PReLU(num_parameters=1) + ) + ) + (output): Sequential( + (linear): Linear(in_features=32, out_features=100, bias=True) + (output_act): ReLU() + ) + ) + (reshape): Reshape() + (convolutions): ConvDecoder( + (layer0): Convolution( + (conv): ConvTranspose2d(4, 4, kernel_size=(3, 3), stride=(1, 1)) + (adn): ADN( + (N): InstanceNorm2d(4, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False) + (A): PReLU(num_parameters=1) + ) + ) + (unpool0): Upsample(size=(14, 14), mode=) + (layer1): Convolution( + (conv): ConvTranspose2d(4, 2, kernel_size=(3, 3), stride=(1, 1)) + ) + (output_act): Sigmoid() + ) + ) + ) + + """ + + def __init__( + self, + in_shape: Sequence[int], + latent_size: int, + conv_args: Dict[str, Any], + mlp_args: Optional[Dict[str, Any]] = None, + out_channels: Optional[int] = None, + output_act: Optional[ActivationParameters] = None, + unpooling_mode: Union[str, UnpoolingMode] = UnpoolingMode.NEAREST, + ) -> None: + super().__init__() + self.in_shape = in_shape + self.latent_size = latent_size + self.out_channels = out_channels if out_channels else self.in_shape[0] + self._output_act = output_act + self.unpooling_mode = self._check_unpooling_mode(unpooling_mode) + self.spatial_dims = len(in_shape[1:]) + + self.encoder = CNN( + in_shape=self.in_shape, + num_outputs=latent_size, + conv_args=conv_args, + mlp_args=mlp_args, + ) + inter_channels = ( + conv_args["channels"][-1] if len(conv_args["channels"]) > 0 else in_shape[0] + ) + inter_shape = (inter_channels, *self.encoder.convolutions.final_size) + self.decoder = Generator( + latent_size=latent_size, + start_shape=inter_shape, + conv_args=self._invert_conv_args(conv_args, self.encoder.convolutions), + mlp_args=self._invert_mlp_args(mlp_args, self.encoder.mlp), + ) + + @classmethod + def _invert_mlp_args(cls, args: Dict[str, Any], mlp: MLP) -> Dict[str, Any]: + """ + Inverts arguments passed for the MLP part of the encoder, to get the MLP part of + the decoder. + """ + if args is None: + args = {} + args["hidden_channels"] = cls._invert_list_arg(mlp.hidden_channels) + + return args + + def _invert_conv_args( + self, args: Dict[str, Any], conv: ConvEncoder + ) -> Dict[str, Any]: + """ + Inverts arguments passed for the convolutional part of the encoder, to get the convolutional + part of the decoder. + """ + if len(args["channels"]) == 0: + args["channels"] = [] + else: + args["channels"] = self._invert_list_arg(conv.channels[:-1]) + [ + self.out_channels + ] + args["kernel_size"] = self._invert_list_arg(conv.kernel_size) + args["stride"] = self._invert_list_arg(conv.stride) + args["dilation"] = self._invert_list_arg(conv.dilation) + args["padding"], args["output_padding"] = self._get_paddings_list(conv) + + args["unpooling_indices"] = ( + conv.n_layers - np.array(conv.pooling_indices) - 2 + ).astype(int) + args["unpooling"] = [] + sizes_before_pooling = [ + size + for size, (layer_name, _) in zip(conv.size_details, conv.named_children()) + if "pool" in layer_name + ] + for size, pooling in zip(sizes_before_pooling[::-1], conv.pooling[::-1]): + args["unpooling"].append(self._invert_pooling_layer(size, pooling)) + + if "pooling" in args: + del args["pooling"] + if "pooling_indices" in args: + del args["pooling_indices"] + + args["output_act"] = self._output_act if self._output_act else None + + return args + + @classmethod + def _invert_list_arg(cls, arg: Union[Any, List[Any]]) -> Union[Any, List[Any]]: + """ + Reverses lists. + """ + return list(arg[::-1]) if isinstance(arg, Sequence) else arg + + def _invert_pooling_layer( + self, + size_before_pool: Sequence[int], + pooling: SingleLayerPoolingParameters, + ) -> SingleLayerUnpoolingParameters: + """ + Gets the unpooling layer. + """ + if self.unpooling_mode == UnpoolingMode.CONV_TRANS: + return ( + UnpoolingLayer.CONV_TRANS, + self._invert_pooling_with_convtranspose(size_before_pool, pooling), + ) + else: + return ( + UnpoolingLayer.UPSAMPLE, + {"size": size_before_pool, "mode": self.unpooling_mode}, + ) + + @classmethod + def _invert_pooling_with_convtranspose( + cls, + size_before_pool: Sequence[int], + pooling: SingleLayerPoolingParameters, + ) -> Dict[str, Any]: + """ + Computes the arguments of the transposed convolution, based on the pooling layer. + """ + pooling_mode, pooling_args = pooling + if ( + pooling_mode == PoolingLayer.ADAPT_AVG + or pooling_mode == PoolingLayer.ADAPT_MAX + ): + input_size_np = np.array(size_before_pool) + output_size_np = np.array(pooling_args["output_size"]) + stride_np = input_size_np // output_size_np # adaptive pooling formulas + kernel_size_np = ( + input_size_np - (output_size_np - 1) * stride_np + ) # adaptive pooling formulas + args = { + "kernel_size": tuple(int(k) for k in kernel_size_np), + "stride": tuple(int(s) for s in stride_np), + } + padding, output_padding = cls._find_convtranspose_paddings( + pooling_mode, + size_before_pool, + output_size=pooling_args["output_size"], + **args, + ) + + elif pooling_mode == PoolingLayer.MAX or pooling_mode == PoolingLayer.AVG: + if "stride" not in pooling_args: + pooling_args["stride"] = pooling_args["kernel_size"] + args = { + arg: value + for arg, value in pooling_args.items() + if arg in ["kernel_size", "stride", "padding", "dilation"] + } + padding, output_padding = cls._find_convtranspose_paddings( + pooling_mode, + size_before_pool, + **pooling_args, + ) + + args["padding"] = padding # pylint: disable=possibly-used-before-assignment + args["output_padding"] = output_padding # pylint: disable=possibly-used-before-assignment + + return args + + @classmethod + def _get_paddings_list(cls, conv: ConvEncoder) -> List[Tuple[int, ...]]: + """ + Finds output padding list. + """ + padding = [] + output_padding = [] + size_before_convs = [ + size + for size, (layer_name, _) in zip(conv.size_details, conv.named_children()) + if "layer" in layer_name + ] + for size, k, s, p, d in zip( + size_before_convs, + conv.kernel_size, + conv.stride, + conv.padding, + conv.dilation, + ): + p, out_p = cls._find_convtranspose_paddings( + "conv", size, kernel_size=k, stride=s, padding=p, dilation=d + ) + padding.append(p) + output_padding.append(out_p) + + return cls._invert_list_arg(padding), cls._invert_list_arg(output_padding) + + @classmethod + def _find_convtranspose_paddings( + cls, + layer_type: Union[Literal["conv"], PoolingLayer], + in_shape: Union[Sequence[int], int], + padding: Union[Sequence[int], int] = 0, + **kwargs, + ) -> Tuple[Tuple[int, ...], Tuple[int, ...]]: + """ + Finds padding and output padding necessary to recover the right image size after + a transposed convolution. + """ + if layer_type == "conv": + layer_out_shape = calculate_conv_out_shape(in_shape, **kwargs) + elif layer_type in list(PoolingLayer): + layer_out_shape = calculate_pool_out_shape(layer_type, in_shape, **kwargs) + + convt_out_shape = calculate_convtranspose_out_shape(layer_out_shape, **kwargs) # pylint: disable=possibly-used-before-assignment + output_padding = np.atleast_1d(in_shape) - np.atleast_1d(convt_out_shape) + + if ( + output_padding < 0 + ).any(): # can happen with ceil_mode=True for maxpool. Then, add some padding + padding = np.atleast_1d(padding) * np.ones_like( + output_padding + ) # to have the same shape as output_padding + padding[output_padding < 0] += np.maximum(np.abs(output_padding) // 2, 1)[ + output_padding < 0 + ] # //2 because 2*padding pixels are removed + + convt_out_shape = calculate_convtranspose_out_shape( + layer_out_shape, padding=padding, **kwargs + ) + output_padding = np.atleast_1d(in_shape) - np.atleast_1d(convt_out_shape) + padding = tuple(int(s) for s in padding) + + return padding, tuple(int(s) for s in output_padding) + + def _check_unpooling_mode( + self, unpooling_mode: Union[str, UnpoolingMode] + ) -> UnpoolingMode: + """ + Checks consistency between data shape and unpooling mode. + """ + unpooling_mode = UnpoolingMode(unpooling_mode) + if unpooling_mode == UnpoolingMode.LINEAR and len(self.in_shape) != 2: + raise ValueError( + f"unpooling mode `linear` only works with 2D data (counting the channel dimension). " + f"Got in_shape={self.in_shape}, which is understood as {len(self.in_shape)}D data." + ) + elif unpooling_mode == UnpoolingMode.BILINEAR and len(self.in_shape) != 3: + raise ValueError( + f"unpooling mode `bilinear` only works with 3D data (counting the channel dimension). " + f"Got in_shape={self.in_shape}, which is understood as {len(self.in_shape)}D data." + ) + elif unpooling_mode == UnpoolingMode.BICUBIC and len(self.in_shape) != 3: + raise ValueError( + f"unpooling mode `bicubic` only works with 3D data (counting the channel dimension). " + f"Got in_shape={self.in_shape}, which is understood as {len(self.in_shape)}D data." + ) + elif unpooling_mode == UnpoolingMode.TRILINEAR and len(self.in_shape) != 4: + raise ValueError( + f"unpooling mode `trilinear` only works with 4D data (counting the channel dimension). " + f"Got in_shape={self.in_shape}, which is understood as {len(self.in_shape)}D data." + ) + + return unpooling_mode diff --git a/clinicadl/monai_networks/nn/cnn.py b/clinicadl/monai_networks/nn/cnn.py new file mode 100644 index 000000000..1479ecaea --- /dev/null +++ b/clinicadl/monai_networks/nn/cnn.py @@ -0,0 +1,124 @@ +from typing import Any, Dict, Optional, Sequence + +import numpy as np +import torch.nn as nn + +from .conv_encoder import ConvEncoder +from .mlp import MLP +from .utils import check_conv_args, check_mlp_args + + +class CNN(nn.Sequential): + """ + A regressor/classifier with first convolutional layers and then fully connected layers. + + This network is a simple aggregation of a Fully Convolutional Network (:py:class:`clinicadl. + monai_networks.nn.conv_encoder.ConvEncoder`) and a Multi Layer Perceptron (:py:class:`clinicadl. + monai_networks.nn.mlp.MLP`). + + Parameters + ---------- + in_shape : Sequence[int] + sequence of integers stating the dimension of the input tensor (minus batch dimension). + num_outputs : int + number of variables to predict. + conv_args : Dict[str, Any] + the arguments for the convolutional part. The arguments are those accepted by + :py:class:`clinicadl.monai_networks.nn.conv_encoder.ConvEncoder`, except `in_shape` + that is specified here. So, the only mandatory argument is `channels`. + mlp_args : Optional[Dict[str, Any]] (optional, default=None) + the arguments for the MLP part. The arguments are those accepted by + :py:class:`clinicadl.monai_networks.nn.mlp.MLP`, except `in_channels` that is inferred + from the output of the convolutional part, and `out_channels` that is set to `num_outputs`. + So, the only mandatory argument is `hidden_channels`.\n + If None, the MLP part will be reduced to a single linear layer. + + Examples + -------- + # a classifier + >>> CNN( + in_shape=(1, 10, 10), + num_outputs=2, + conv_args={"channels": [2, 4], "norm": None, "act": None}, + mlp_args={"hidden_channels": [5], "act": "elu", "norm": None, "output_act": "softmax"}, + ) + CNN( + (convolutions): ConvEncoder( + (layer0): Convolution( + (conv): Conv2d(1, 2, kernel_size=(3, 3), stride=(1, 1)) + ) + (layer1): Convolution( + (conv): Conv2d(2, 4, kernel_size=(3, 3), stride=(1, 1)) + ) + ) + (mlp): MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (hidden0): Sequential( + (linear): Linear(in_features=144, out_features=5, bias=True) + (adn): ADN( + (A): ELU(alpha=1.0) + ) + ) + (output): Sequential( + (linear): Linear(in_features=5, out_features=2, bias=True) + (output_act): Softmax(dim=None) + ) + ) + ) + + # a regressor + >>> CNN( + in_shape=(1, 10, 10), + num_outputs=2, + conv_args={"channels": [2, 4], "norm": None, "act": None}, + ) + CNN( + (convolutions): ConvEncoder( + (layer0): Convolution( + (conv): Conv2d(1, 2, kernel_size=(3, 3), stride=(1, 1)) + ) + (layer1): Convolution( + (conv): Conv2d(2, 4, kernel_size=(3, 3), stride=(1, 1)) + ) + ) + (mlp): MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (output): Linear(in_features=144, out_features=2, bias=True) + ) + ) + """ + + def __init__( + self, + in_shape: Sequence[int], + num_outputs: int, + conv_args: Dict[str, Any], + mlp_args: Optional[Dict[str, Any]] = None, + ) -> None: + super().__init__() + check_conv_args(conv_args) + check_mlp_args(mlp_args) + self.in_shape = in_shape + self.num_outputs = num_outputs + + in_channels, *input_size = in_shape + spatial_dims = len(input_size) + + self.convolutions = ConvEncoder( + in_channels=in_channels, + spatial_dims=spatial_dims, + _input_size=tuple(input_size), + **conv_args, + ) + + n_channels = ( + conv_args["channels"][-1] if len(conv_args["channels"]) > 0 else in_shape[0] + ) + flatten_shape = int(np.prod(self.convolutions.final_size) * n_channels) + if mlp_args is None: + mlp_args = {"hidden_channels": []} + self.mlp = MLP( + in_channels=flatten_shape, + out_channels=num_outputs, + **mlp_args, + ) diff --git a/clinicadl/monai_networks/nn/conv_decoder.py b/clinicadl/monai_networks/nn/conv_decoder.py new file mode 100644 index 000000000..28c9be96f --- /dev/null +++ b/clinicadl/monai_networks/nn/conv_decoder.py @@ -0,0 +1,388 @@ +from typing import Callable, Optional, Sequence, Tuple + +import torch.nn as nn +from monai.networks.blocks import Convolution +from monai.networks.layers.utils import get_act_layer +from monai.utils.misc import ensure_tuple + +from .layers.unpool import get_unpool_layer +from .layers.utils import ( + ActFunction, + ActivationParameters, + ConvNormalizationParameters, + ConvNormLayer, + ConvParameters, + NormLayer, + SingleLayerUnpoolingParameters, + UnpoolingLayer, + UnpoolingParameters, +) +from .utils import ( + calculate_convtranspose_out_shape, + calculate_unpool_out_shape, + check_adn_ordering, + check_norm_layer, + check_pool_indices, + ensure_list_of_tuples, +) + + +class ConvDecoder(nn.Sequential): + """ + Fully convolutional decoder network with transposed convolutions, unpooling, normalization, activation + and dropout layers. + + Parameters + ---------- + spatial_dims : int + number of spatial dimensions of the input image. + in_channels : int + number of channels in the input image. + channels : Sequence[int] + sequence of integers stating the output channels of each transposed convolution. Thus, this + parameter also controls the number of transposed convolutions. + kernel_size : ConvParameters (optional, default=3) + the kernel size of the transposed convolutions. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the kernel sizes for each layer. + The length of the list must be equal to the number of transposed convolution layers (i.e. + `len(channels)`). + stride : ConvParameters (optional, default=1) + the stride of the transposed convolutions. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the strides for each layer. + The length of the list must be equal to the number of transposed convolution layers (i.e. + `len(channels)`). + padding : ConvParameters (optional, default=0) + the padding of the transposed convolutions. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the paddings for each layer. + The length of the list must be equal to the number of transposed convolution layers (i.e. + `len(channels)`). + output_padding : ConvParameters (optional, default=0) + the output padding of the transposed convolutions. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the output paddings for each layer. + The length of the list must be equal to the number of transposed convolution layers (i.e. + `len(channels)`). + dilation : ConvParameters (optional, default=1) + the dilation factor of the transposed convolutions. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the dilations for each layer. + The length of the list must be equal to the number of transposed convolution layers (i.e. + `len(channels)`). + unpooling : Optional[UnpoolingParameters] (optional, default=(UnpoolingLayer.UPSAMPLE, {"scale_factor": 2})) + the unpooling mode and the arguments of the unpooling layer, passed as `(unpooling_mode, arguments)`. + If None, no unpooling will be performed in the network.\n + `unpooling_mode` can be either `upsample` or `convtranspose`. Please refer to PyTorch's [Upsample] + (https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html) or [ConvTranspose](https:// + pytorch.org/docs/stable/generated/torch.nn.ConvTranspose2d.html) to know the mandatory and optional + arguments.\n + If a list is passed, it will be understood as `(unpooling_mode, arguments)` for each unpooling layer.\n + Note: no need to pass `in_channels` and `out_channels` for `convtranspose` because the unpooling + layers are not intended to modify the number of channels. + unpooling_indices : Optional[Sequence[int]] (optional, default=None) + indices of the transposed convolution layers after which unpooling should be performed. + If None, no unpooling will be performed. An index equal to -1 will be understood as a pooling layer before + the first transposed convolution. + act : Optional[ActivationParameters] (optional, default=ActFunction.PRELU) + the activation function used after a transposed convolution layer, and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + output_act : Optional[ActivationParameters] (optional, default=None) + a potential activation layer applied to the output of the network. Should be pass in the same way as `act`. + If None, no last activation will be applied. + norm : Optional[ConvNormalizationParameters] (optional, default=NormLayer.INSTANCE) + the normalization type used after a transposed convolution layer, and optionally the arguments of the normalization + layer. Should be passed as `norm_type` or `(norm_type, parameters)`. If None, no normalization will be + performed.\n + `norm_type` can be any value in {`batch`, `group`, `instance`, `syncbatch`}. Please refer to PyTorch's + [normalization layers](https://pytorch.org/docs/stable/nn.html#normalization-layers) to know the mandatory and + optional arguments for each of them.\n + Please note that arguments `num_channels`, `num_features` of the normalization layer + should not be passed, as they are automatically inferred from the output of the previous layer in the network. + dropout : Optional[float] (optional, default=None) + dropout ratio. If None, no dropout. + bias : bool (optional, default=True) + whether to have a bias term in transposed convolutions. + adn_ordering : str (optional, default="NDA") + order of operations `Activation`, `Dropout` and `Normalization` after a transposed convolutional layer (except the + last one).\n + For example if "ND" is passed, `Normalization` and then `Dropout` will be performed (without `Activation`).\n + Note: ADN will not be applied after the last convolution. + + Examples + -------- + >>> ConvDecoder( + in_channels=16, + spatial_dims=2, + channels=[8, 4, 1], + kernel_size=(3, 5), + stride=2, + padding=[1, 0, 0], + output_padding=[0, 0, (1, 2)], + dilation=1, + unpooling=[("upsample", {"scale_factor": 2}), ("upsample", {"size": (32, 32)})], + unpooling_indices=[0, 1], + act="elu", + output_act="relu", + norm=("batch", {"eps": 1e-05}), + dropout=0.1, + bias=True, + adn_ordering="NDA", + ) + ConvDecoder( + (layer0): Convolution( + (conv): ConvTranspose2d(16, 8, kernel_size=(3, 5), stride=(2, 2), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (unpool0): Upsample(scale_factor=2.0, mode='nearest') + (layer1): Convolution( + (conv): ConvTranspose2d(8, 4, kernel_size=(3, 5), stride=(2, 2)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (unpool1): Upsample(size=(32, 32), mode='nearest') + (layer2): Convolution( + (conv): ConvTranspose2d(4, 1, kernel_size=(3, 5), stride=(2, 2), output_padding=(1, 2)) + ) + (output_act): ReLU() + ) + + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + channels: Sequence[int], + kernel_size: ConvParameters = 3, + stride: ConvParameters = 1, + padding: ConvParameters = 0, + output_padding: ConvParameters = 0, + dilation: ConvParameters = 1, + unpooling: Optional[UnpoolingParameters] = ( + UnpoolingLayer.UPSAMPLE, + {"scale_factor": 2}, + ), + unpooling_indices: Optional[Sequence[int]] = None, + act: Optional[ActivationParameters] = ActFunction.PRELU, + output_act: Optional[ActivationParameters] = None, + norm: Optional[ConvNormalizationParameters] = ConvNormLayer.INSTANCE, + dropout: Optional[float] = None, + bias: bool = True, + adn_ordering: str = "NDA", + _input_size: Optional[Sequence[int]] = None, + ) -> None: + super().__init__() + + self._current_size = _input_size if _input_size else None + + self.spatial_dims = spatial_dims + self.in_channels = in_channels + self.channels = ensure_tuple(channels) + self.n_layers = len(self.channels) + + self.kernel_size = ensure_list_of_tuples( + kernel_size, self.spatial_dims, self.n_layers, "kernel_size" + ) + self.stride = ensure_list_of_tuples( + stride, self.spatial_dims, self.n_layers, "stride" + ) + self.padding = ensure_list_of_tuples( + padding, self.spatial_dims, self.n_layers, "padding" + ) + self.output_padding = ensure_list_of_tuples( + output_padding, self.spatial_dims, self.n_layers, "output_padding" + ) + self.dilation = ensure_list_of_tuples( + dilation, self.spatial_dims, self.n_layers, "dilation" + ) + + self.unpooling_indices = check_pool_indices(unpooling_indices, self.n_layers) + self.unpooling = self._check_unpool_layers(unpooling) + self.act = act + self.norm = check_norm_layer(norm) + if self.norm == NormLayer.LAYER: + raise ValueError("Layer normalization not implemented in ConvDecoder.") + self.dropout = dropout + self.bias = bias + self.adn_ordering = check_adn_ordering(adn_ordering) + + n_unpoolings = 0 + if self.unpooling and -1 in self.unpooling_indices: + unpooling_layer = self._get_unpool_layer( + self.unpooling[n_unpoolings], n_channels=self.in_channels + ) + self.add_module("init_unpool", unpooling_layer) + n_unpoolings += 1 + + echannel = self.in_channels + for i, (c, k, s, p, o_p, d) in enumerate( + zip( + self.channels, + self.kernel_size, + self.stride, + self.padding, + self.output_padding, + self.dilation, + ) + ): + conv_layer = self._get_convtranspose_layer( + in_channels=echannel, + out_channels=c, + kernel_size=k, + stride=s, + padding=p, + output_padding=o_p, + dilation=d, + is_last=(i == len(channels) - 1), + ) + self.add_module(f"layer{i}", conv_layer) + echannel = c # use the output channel number as the input for the next loop + if self.unpooling and i in self.unpooling_indices: + unpooling_layer = self._get_unpool_layer( + self.unpooling[n_unpoolings], n_channels=c + ) + self.add_module(f"unpool{i}", unpooling_layer) + n_unpoolings += 1 + + self.output_act = get_act_layer(output_act) if output_act else None + + @property + def final_size(self): + """ + To know the size of an image at the end of the network. + """ + return self._current_size + + @final_size.setter + def final_size(self, fct: Callable[[Tuple[int, ...]], Tuple[int, ...]]): + """ + Takes as input the function used to update the current image size. + """ + if self._current_size is not None: + self._current_size = fct(self._current_size) + + def _get_convtranspose_layer( + self, + in_channels: int, + out_channels: int, + kernel_size: Tuple[int, ...], + stride: Tuple[int, ...], + padding: Tuple[int, ...], + output_padding: Tuple[int, ...], + dilation: Tuple[int, ...], + is_last: bool, + ) -> Convolution: + """ + Gets the parametrized TransposedConvolution-ADN block and updates the current output size. + """ + self.final_size = lambda size: calculate_convtranspose_out_shape( + size, kernel_size, stride, padding, output_padding, dilation + ) + + return Convolution( + is_transposed=True, + conv_only=is_last, + spatial_dims=self.spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + strides=stride, + kernel_size=kernel_size, + padding=padding, + output_padding=output_padding, + dilation=dilation, + act=self.act, + norm=self.norm, + dropout=self.dropout, + bias=self.bias, + adn_ordering=self.adn_ordering, + ) + + def _get_unpool_layer( + self, unpooling: SingleLayerUnpoolingParameters, n_channels: int + ) -> nn.Module: + """ + Gets the parametrized unpooling layer and updates the current output size. + """ + unpool_layer = get_unpool_layer( + unpooling, + spatial_dims=self.spatial_dims, + in_channels=n_channels, + out_channels=n_channels, + ) + self.final_size = lambda size: calculate_unpool_out_shape( + unpool_mode=unpooling[0], + in_shape=size, + **unpool_layer.__dict__, + ) + return unpool_layer + + @classmethod + def _check_single_unpool_layer( + cls, unpooling: SingleLayerUnpoolingParameters + ) -> SingleLayerUnpoolingParameters: + """ + Checks unpooling arguments for a single pooling layer. + """ + if not isinstance(unpooling, tuple) or len(unpooling) != 2: + raise ValueError( + "unpooling must be double (or a list of doubles) with first the type of unpooling and then the parameters of " + f"the unpooling layer in a dict. Got {unpooling}" + ) + _ = UnpoolingLayer(unpooling[0]) # check unpooling mode + args = unpooling[1] + if not isinstance(args, dict): + raise ValueError( + f"The arguments of the unpooling layer must be passed in a dict. Got {args}" + ) + + return unpooling + + def _check_unpool_layers( + self, unpooling: UnpoolingParameters + ) -> UnpoolingParameters: + """ + Checks argument unpooling. + """ + if unpooling is None: + return unpooling + if isinstance(unpooling, list): + for unpool_layer in unpooling: + self._check_single_unpool_layer(unpool_layer) + if len(unpooling) != len(self.unpooling_indices): + raise ValueError( + "If you pass a list for unpooling, the size of that list must match " + f"the size of unpooling_indices. Got: unpooling={unpooling} and " + f"unpooling_indices={self.unpooling_indices}" + ) + elif isinstance(unpooling, tuple): + self._check_single_unpool_layer(unpooling) + unpooling = (unpooling,) * len(self.unpooling_indices) + else: + raise ValueError( + f"unpooling can be either None, a double (string, dictionary) or a list of such doubles. Got {unpooling}" + ) + + return unpooling diff --git a/clinicadl/monai_networks/nn/conv_encoder.py b/clinicadl/monai_networks/nn/conv_encoder.py new file mode 100644 index 000000000..f3ec66484 --- /dev/null +++ b/clinicadl/monai_networks/nn/conv_encoder.py @@ -0,0 +1,392 @@ +from typing import Callable, List, Optional, Sequence, Tuple + +import numpy as np +import torch.nn as nn +from monai.networks.blocks import Convolution +from monai.networks.layers.utils import get_act_layer, get_pool_layer +from monai.utils.misc import ensure_tuple + +from .layers.utils import ( + ActFunction, + ActivationParameters, + ConvNormalizationParameters, + ConvNormLayer, + ConvParameters, + NormLayer, + PoolingLayer, + PoolingParameters, + SingleLayerPoolingParameters, +) +from .utils import ( + calculate_conv_out_shape, + calculate_pool_out_shape, + check_adn_ordering, + check_norm_layer, + check_pool_indices, + ensure_list_of_tuples, +) + + +class ConvEncoder(nn.Sequential): + """ + Fully convolutional encoder network with convolutional, pooling, normalization, activation + and dropout layers. + + Parameters + ---------- + spatial_dims : int + number of spatial dimensions of the input image. + in_channels : int + number of channels in the input image. + channels : Sequence[int] + sequence of integers stating the output channels of each convolutional layer. Thus, this + parameter also controls the number of convolutional layers. + kernel_size : ConvParameters (optional, default=3) + the kernel size of the convolutional layers. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the kernel sizes for each layer. + The length of the list must be equal to the number of convolutional layers (i.e. `len(channels)`). + stride : ConvParameters (optional, default=1) + the stride of the convolutional layers. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the strides for each layer. + The length of the list must be equal to the number of convolutional layers (i.e. `len(channels)`). + padding : ConvParameters (optional, default=0) + the padding of the convolutional layers. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the paddings for each layer. + The length of the list must be equal to the number of convolutional layers (i.e. `len(channels)`). + dilation : ConvParameters (optional, default=1) + the dilation factor of the convolutional layers. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the dilations for each layer. + The length of the list must be equal to the number of convolutional layers (i.e. `len(channels)`). + pooling : Optional[PoolingParameters] (optional, default=(PoolingLayer.MAX, {"kernel_size": 2})) + the pooling mode and the arguments of the pooling layer, passed as `(pooling_mode, arguments)`. + If None, no pooling will be performed in the network.\n + `pooling_mode` can be either `max`, `avg`, `adaptivemax` or `adaptiveavg`. Please refer to PyTorch's [documentation] + (https://pytorch.org/docs/stable/nn.html#pooling-layers) to know the mandatory and optional arguments.\n + If a list is passed, it will be understood as `(pooling_mode, arguments)` for each pooling layer. + pooling_indices : Optional[Sequence[int]] (optional, default=None) + indices of the convolutional layers after which pooling should be performed. + If None, no pooling will be performed. An index equal to -1 will be understood as an unpooling layer before + the first convolution. + act : Optional[ActivationParameters] (optional, default=ActFunction.PRELU) + the activation function used after a convolutional layer, and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + output_act : Optional[ActivationParameters] (optional, default=None) + a potential activation layer applied to the output of the network. Should be pass in the same way as `act`. + If None, no last activation will be applied. + norm : Optional[ConvNormalizationParameters] (optional, default=NormLayer.INSTANCE) + the normalization type used after a convolutional layer, and optionally the arguments of the normalization + layer. Should be passed as `norm_type` or `(norm_type, parameters)`. If None, no normalization will be + performed.\n + `norm_type` can be any value in {`batch`, `group`, `instance`, `syncbatch`}. Please refer to PyTorch's + [normalization layers](https://pytorch.org/docs/stable/nn.html#normalization-layers) to know the mandatory and + optional arguments for each of them.\n + Please note that arguments `num_channels`, `num_features` of the normalization layer + should not be passed, as they are automatically inferred from the output of the previous layer in the network. + dropout : Optional[float] (optional, default=None) + dropout ratio. If None, no dropout. + bias : bool (optional, default=True) + whether to have a bias term in convolutions. + adn_ordering : str (optional, default="NDA") + order of operations `Activation`, `Dropout` and `Normalization` after a convolutional layer (except the last + one). + For example if "ND" is passed, `Normalization` and then `Dropout` will be performed (without `Activation`).\n + Note: ADN will not be applied after the last convolution. + + Examples + -------- + >>> ConvEncoder( + spatial_dims=2, + in_channels=1, + channels=[2, 4, 8], + kernel_size=(3, 5), + stride=1, + padding=[1, (0, 1), 0], + dilation=1, + pooling=[("max", {"kernel_size": 2}), ("avg", {"kernel_size": 2})], + pooling_indices=[0, 1], + act="elu", + output_act="relu", + norm=("batch", {"eps": 1e-05}), + dropout=0.1, + bias=True, + adn_ordering="NDA", + ) + ConvEncoder( + (layer0): Convolution( + (conv): Conv2d(1, 2, kernel_size=(3, 5), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (pool0): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) + (layer1): Convolution( + (conv): Conv2d(2, 4, kernel_size=(3, 5), stride=(1, 1), padding=(0, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (pool1): AvgPool2d(kernel_size=2, stride=2, padding=0) + (layer2): Convolution( + (conv): Conv2d(4, 8, kernel_size=(3, 5), stride=(1, 1)) + ) + (output_act): ReLU() + ) + + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + channels: Sequence[int], + kernel_size: ConvParameters = 3, + stride: ConvParameters = 1, + padding: ConvParameters = 0, + dilation: ConvParameters = 1, + pooling: Optional[PoolingParameters] = ( + PoolingLayer.MAX, + {"kernel_size": 2}, + ), + pooling_indices: Optional[Sequence[int]] = None, + act: Optional[ActivationParameters] = ActFunction.PRELU, + output_act: Optional[ActivationParameters] = None, + norm: Optional[ConvNormalizationParameters] = ConvNormLayer.INSTANCE, + dropout: Optional[float] = None, + bias: bool = True, + adn_ordering: str = "NDA", + _input_size: Optional[Sequence[int]] = None, + ) -> None: + super().__init__() + + self._current_size = _input_size if _input_size else None + self._size_details = [self._current_size] if _input_size else None + + self.spatial_dims = spatial_dims + self.in_channels = in_channels + self.channels = ensure_tuple(channels) + self.n_layers = len(self.channels) + + self.kernel_size = ensure_list_of_tuples( + kernel_size, self.spatial_dims, self.n_layers, "kernel_size" + ) + self.stride = ensure_list_of_tuples( + stride, self.spatial_dims, self.n_layers, "stride" + ) + self.padding = ensure_list_of_tuples( + padding, self.spatial_dims, self.n_layers, "padding" + ) + self.dilation = ensure_list_of_tuples( + dilation, self.spatial_dims, self.n_layers, "dilation" + ) + + self.pooling_indices = check_pool_indices(pooling_indices, self.n_layers) + self.pooling = self._check_pool_layers(pooling) + self.act = act + self.norm = check_norm_layer(norm) + if self.norm == NormLayer.LAYER: + raise ValueError("Layer normalization not implemented in ConvEncoder.") + self.dropout = dropout + self.bias = bias + self.adn_ordering = check_adn_ordering(adn_ordering) + + n_poolings = 0 + if self.pooling and -1 in self.pooling_indices: + pooling_layer = self._get_pool_layer(self.pooling[n_poolings]) + self.add_module("init_pool", pooling_layer) + n_poolings += 1 + + echannel = self.in_channels + for i, (c, k, s, p, d) in enumerate( + zip( + self.channels, + self.kernel_size, + self.stride, + self.padding, + self.dilation, + ) + ): + conv_layer = self._get_conv_layer( + in_channels=echannel, + out_channels=c, + kernel_size=k, + stride=s, + padding=p, + dilation=d, + is_last=(i == len(channels) - 1), + ) + self.add_module(f"layer{i}", conv_layer) + echannel = c # use the output channel number as the input for the next loop + if self.pooling and i in self.pooling_indices: + pooling_layer = self._get_pool_layer(self.pooling[n_poolings]) + self.add_module(f"pool{i}", pooling_layer) + n_poolings += 1 + + self.output_act = get_act_layer(output_act) if output_act else None + + @property + def final_size(self): + """ + To know the size of an image at the end of the network. + """ + return self._current_size + + @property + def size_details(self): + """ + To know the sizes of intermediate images. + """ + return self._size_details + + @final_size.setter + def final_size(self, fct: Callable[[Tuple[int, ...]], Tuple[int, ...]]): + """ + Takes as input the function used to update the current image size. + """ + if self._current_size is not None: + self._current_size = fct(self._current_size) + self._size_details.append(self._current_size) + self._check_size() + + def _get_conv_layer( + self, + in_channels: int, + out_channels: int, + kernel_size: Tuple[int, ...], + stride: Tuple[int, ...], + padding: Tuple[int, ...], + dilation: Tuple[int, ...], + is_last: bool, + ) -> Convolution: + """ + Gets the parametrized Convolution-ADN block and updates the current output size. + """ + self.final_size = lambda size: calculate_conv_out_shape( + size, kernel_size, stride, padding, dilation + ) + + return Convolution( + conv_only=is_last, + spatial_dims=self.spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + strides=stride, + kernel_size=kernel_size, + padding=padding, + dilation=dilation, + act=self.act, + norm=self.norm, + dropout=self.dropout, + bias=self.bias, + adn_ordering=self.adn_ordering, + ) + + def _get_pool_layer(self, pooling: SingleLayerPoolingParameters) -> nn.Module: + """ + Gets the parametrized pooling layer and updates the current output size. + """ + pool_layer = get_pool_layer(pooling, spatial_dims=self.spatial_dims) + old_size = self.final_size + self.final_size = lambda size: calculate_pool_out_shape( + pool_mode=pooling[0], in_shape=size, **pool_layer.__dict__ + ) + + if ( + self.final_size is not None + and (np.array(old_size) < np.array(self.final_size)).any() + ): + raise ValueError( + f"You passed {pooling} as a pooling layer. But before this layer, the size of the image " + f"was {old_size}. So, pooling can't be performed." + ) + + return pool_layer + + def _check_size(self) -> None: + """ + Checks that image size never reaches 0. + """ + if self._current_size is not None and (np.array(self._current_size) <= 0).any(): + raise ValueError( + f"Failed to build the network. An image of size 0 or less has been reached. Stopped at:\n {self}" + ) + + @classmethod + def _check_single_pool_layer( + cls, pooling: SingleLayerPoolingParameters + ) -> SingleLayerPoolingParameters: + """ + Checks pooling arguments for a single pooling layer. + """ + if not isinstance(pooling, tuple) or len(pooling) != 2: + raise ValueError( + "pooling must be a double (or a list of doubles) with first the type of pooling and then the parameters " + f"of the pooling layer in a dict. Got {pooling}" + ) + pooling_type = PoolingLayer(pooling[0]) + args = pooling[1] + if not isinstance(args, dict): + raise ValueError( + f"The arguments of the pooling layer must be passed in a dict. Got {args}" + ) + if ( + pooling_type == PoolingLayer.MAX or pooling_type == PoolingLayer.AVG + ) and "kernel_size" not in args: + raise ValueError( + f"For {pooling_type} pooling mode, `kernel_size` argument must be passed. " + f"Got {args}" + ) + elif ( + pooling_type == PoolingLayer.ADAPT_AVG + or pooling_type == PoolingLayer.ADAPT_MAX + ) and "output_size" not in args: + raise ValueError( + f"For {pooling_type} pooling mode, `output_size` argument must be passed. " + f"Got {args}" + ) + + def _check_pool_layers( + self, pooling: PoolingParameters + ) -> List[SingleLayerPoolingParameters]: + """ + Check argument pooling. + """ + if pooling is None: + return pooling + if isinstance(pooling, list): + for pool_layer in pooling: + self._check_single_pool_layer(pool_layer) + if len(pooling) != len(self.pooling_indices): + raise ValueError( + "If you pass a list for pooling, the size of that list must match " + f"the size of pooling_indices. Got: pooling={pooling} and " + f"pooling_indices={self.pooling_indices}" + ) + elif isinstance(pooling, tuple): + self._check_single_pool_layer(pooling) + pooling = [pooling] * len(self.pooling_indices) + else: + raise ValueError( + f"pooling can be either None, a double (string, dictionary) or a list of such doubles. Got {pooling}" + ) + + return pooling diff --git a/clinicadl/monai_networks/nn/densenet.py b/clinicadl/monai_networks/nn/densenet.py new file mode 100644 index 000000000..45d99cc71 --- /dev/null +++ b/clinicadl/monai_networks/nn/densenet.py @@ -0,0 +1,312 @@ +import re +from collections import OrderedDict +from enum import Enum +from typing import Any, Mapping, Optional, Sequence, Union + +import torch.nn as nn +from monai.networks.layers.utils import get_act_layer +from monai.networks.nets import DenseNet as BaseDenseNet +from torch.hub import load_state_dict_from_url +from torchvision.models.densenet import ( + DenseNet121_Weights, + DenseNet161_Weights, + DenseNet169_Weights, + DenseNet201_Weights, +) + +from .layers.utils import ActivationParameters + + +class DenseNet(nn.Sequential): + """ + DenseNet based on the [Densely Connected Convolutional Networks](https://arxiv.org/pdf/1608.06993) paper. + Adapted from [MONAI's implementation](https://docs.monai.io/en/stable/networks.html#densenet). + + The user can customize the number of dense blocks, the number of dense layers in each block, as well as + other parameters like the growth rate. + + DenseNet is a fully convolutional network that can work with input of any size, provided that is it large + enough not to be reduced to a 1-pixel image (before the adaptative average pooling). + + Parameters + ---------- + spatial_dims : int + number of spatial dimensions of the input image. + in_channels : int + number of channels in the input image. + num_outputs : Optional[int] + number of output variables after the last linear layer.\n + If None, the features before the last fully connected layer will be returned. + n_dense_layers : Sequence[int] (optional, default=(6, 12, 24, 16)) + number of dense layers in each dense block. Thus, this parameter also defines the number of dense blocks. + Default is set to DenseNet-121 parameter. + init_features : int (optional, default=64) + number of feature maps after the initial convolution. Default is set to 64, as in the original paper. + growth_rate : int (optional, default=32) + how many feature maps to add at each dense layer. Default is set to 32, as in the original paper. + bottleneck_factor : int (optional, default=4) + multiplicative factor for bottleneck layers (1x1 convolutions). The output of of these bottleneck layers will + have `bottleneck_factor * growth_rate` feature maps. Default is 4, as in the original paper. + act : ActivationParameters (optional, default=("relu", {"inplace": True})) + the activation function used in the convolutional part, and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them.\n + Default is "relu", as in the original paper. + output_act : Optional[ActivationParameters] (optional, default=None) + if `num_outputs` is not None, a potential activation layer applied to the outputs of the network. + Should be pass in the same way as `act`. + If None, no last activation will be applied. + dropout : Optional[float] (optional, default=None) + dropout ratio. If None, no dropout. + + Examples + -------- + >>> DenseNet(spatial_dims=2, in_channels=1, num_outputs=2, output_act="softmax", n_dense_layers=(2, 2)) + DenseNet( + (features): Sequential( + (conv0): Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) + (norm0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act0): ReLU(inplace=True) + (pool0): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) + (denseblock1): _DenseBlock( + (denselayer1): _DenseLayer( + (layers): Sequential( + (norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv1): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act2): ReLU(inplace=True) + (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + ) + ) + (denselayer2): _DenseLayer( + (layers): Sequential( + (norm1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv1): Conv2d(96, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act2): ReLU(inplace=True) + (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + ) + ) + ) + (transition1): _Transition( + (norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act): ReLU(inplace=True) + (conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) + (pool): AvgPool2d(kernel_size=2, stride=2, padding=0) + ) + (denseblock2): _DenseBlock( + (denselayer1): _DenseLayer( + (layers): Sequential( + (norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv1): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act2): ReLU(inplace=True) + (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + ) + ) + (denselayer2): _DenseLayer( + (layers): Sequential( + (norm1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv1): Conv2d(96, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act2): ReLU(inplace=True) + (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + ) + ) + ) + (norm5): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + (fc): Sequential( + (act): ReLU(inplace=True) + (pool): AdaptiveAvgPool2d(output_size=1) + (flatten): Flatten(start_dim=1, end_dim=-1) + (out): Linear(in_features=128, out_features=2, bias=True) + (output_act): Softmax(dim=None) + ) + ) + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + num_outputs: Optional[int], + n_dense_layers: Sequence[int] = (6, 12, 24, 16), + init_features: int = 64, + growth_rate: int = 32, + bottleneck_factor: int = 4, + act: ActivationParameters = ("relu", {"inplace": True}), + output_act: Optional[ActivationParameters] = None, + dropout: Optional[float] = None, + ) -> None: + super().__init__() + self.spatial_dims = spatial_dims + self.in_channels = in_channels + self.num_outputs = num_outputs + self.n_dense_layers = n_dense_layers + self.init_features = init_features + self.growth_rate = growth_rate + self.bottleneck_factor = bottleneck_factor + self.act = act + self.dropout = dropout + + base_densenet = BaseDenseNet( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=num_outputs if num_outputs else 1, + init_features=init_features, + growth_rate=growth_rate, + block_config=n_dense_layers, + bn_size=bottleneck_factor, + act=act, + dropout_prob=dropout if dropout else 0.0, + ) + self.features = base_densenet.features + self.fc = base_densenet.class_layers if num_outputs else None + if self.fc: + self.fc.output_act = get_act_layer(output_act) if output_act else None + + self._rename_act(self) + + @classmethod + def _rename_act(cls, module: nn.Module) -> None: + """ + Rename activation layers from 'relu' to 'act'. + """ + for name, layer in list(module.named_children()): + if "relu" in name: + module._modules = OrderedDict( # pylint: disable=protected-access + [ + (key.replace("relu", "act"), sub_m) + for key, sub_m in module._modules.items() # pylint: disable=protected-access + ] + ) + else: + cls._rename_act(layer) + + +class SOTADenseNet(str, Enum): + """Supported DenseNet networks.""" + + DENSENET_121 = "DenseNet-121" + DENSENET_161 = "DenseNet-161" + DENSENET_169 = "DenseNet-169" + DENSENET_201 = "DenseNet-201" + + +def get_densenet( + name: Union[str, SOTADenseNet], + num_outputs: Optional[int], + output_act: ActivationParameters = None, + pretrained: bool = False, +) -> DenseNet: + """ + To get a DenseNet implemented in the [Densely Connected Convolutional Networks](https://arxiv.org/pdf/1608.06993) + paper. + + Only the last fully connected layer will be changed to match `num_outputs`. + + The user can also use the pretrained models from `torchvision`. Note that the last fully connected layer will not + used pretrained weights, as it is task specific. + + .. warning:: `DenseNet-121`, `DenseNet-161`, `DenseNet-169` and `DenseNet-201` only works with 2D images with 3 channels. + + Notes: `torchvision` does not provide an implementation for `DenseNet-264` but provides a `DenseNet-161` that is not + mentioned in the paper. + + Parameters + ---------- + name : Union[str, SOTADenseNet] + The name of the DenseNet. Available networks are `DenseNet-121`, `DenseNet-161`, `DenseNet-169` and `DenseNet-201`. + num_outputs : Optional[int] + number of output variables after the last linear layer.\n + If None, the features before the last fully connected layer will be returned. + output_act : ActivationParameters (optional, default=None) + if `num_outputs` is not None, a potential activation layer applied to the outputs of the network, + and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + pretrained : bool (optional, default=False) + whether to use pretrained weights. The pretrained weights used are the default ones from [torchvision](https:// + pytorch.org/vision/main/models/densenet.html). + + Returns + ------- + DenseNet + The network, with potentially pretrained weights. + """ + name = SOTADenseNet(name) + if name == SOTADenseNet.DENSENET_121: + n_dense_layers = (6, 12, 24, 16) + growth_rate = 32 + init_features = 64 + model_url = DenseNet121_Weights.DEFAULT.url + elif name == SOTADenseNet.DENSENET_161: + n_dense_layers = (6, 12, 36, 24) + growth_rate = 48 + init_features = 96 + model_url = DenseNet161_Weights.DEFAULT.url + elif name == SOTADenseNet.DENSENET_169: + n_dense_layers = (6, 12, 32, 32) + growth_rate = 32 + init_features = 64 + model_url = DenseNet169_Weights.DEFAULT.url + elif name == SOTADenseNet.DENSENET_201: + n_dense_layers = (6, 12, 48, 32) + growth_rate = 32 + init_features = 64 + model_url = DenseNet201_Weights.DEFAULT.url + + # pylint: disable=possibly-used-before-assignment + densenet = DenseNet( + spatial_dims=2, + in_channels=3, + num_outputs=num_outputs, + n_dense_layers=n_dense_layers, + growth_rate=growth_rate, + init_features=init_features, + output_act=output_act, + ) + if not pretrained: + return densenet + + pretrained_dict = load_state_dict_from_url(model_url, progress=True) + features_state_dict = { + k.replace("features.", ""): v + for k, v in pretrained_dict.items() + if "classifier" not in k + } + densenet.features.load_state_dict(_state_dict_adapter(features_state_dict)) + + return densenet + + +def _state_dict_adapter(state_dict: Mapping[str, Any]) -> Mapping[str, Any]: + """ + To update the old nomenclature in the pretrained state dict. + Adapted from `_load_state_dict` in [torchvision.models.densenet](https://pytorch.org/vision/main + /_modules/torchvision/models/densenet.html). + """ + pattern = re.compile( + r"^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$" + ) + + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + new_key = re.sub(r"^(.*denselayer\d+)\.", r"\1.layers.", new_key) + state_dict[new_key] = state_dict[key] + del state_dict[key] + + return state_dict diff --git a/clinicadl/monai_networks/nn/generator.py b/clinicadl/monai_networks/nn/generator.py new file mode 100644 index 000000000..5f68a2e58 --- /dev/null +++ b/clinicadl/monai_networks/nn/generator.py @@ -0,0 +1,131 @@ +from typing import Any, Dict, Optional, Sequence + +import numpy as np +import torch.nn as nn +from monai.networks.layers.simplelayers import Reshape + +from .conv_decoder import ConvDecoder +from .mlp import MLP +from .utils import check_conv_args, check_mlp_args + + +class Generator(nn.Sequential): + """ + A generator with first fully connected layers and then convolutional layers. + + This network is a simple aggregation of a Multi Layer Perceptron (:py:class: + `clinicadl.monai_networks.nn.mlp.MLP`) and a Fully Convolutional Network + (:py:class:`clinicadl.monai_networks.nn.conv_decoder.ConvDecoder`). + + Parameters + ---------- + latent_size : int + size of the latent vector. + start_shape : Sequence[int] + sequence of integers stating the initial shape of the image, i.e. the shape at the + beginning of the convolutional part (minus batch dimension, but including the number + of channels).\n + Thus, `start_shape` determines the dimension of the output of the generator (the exact + shape depends on the convolutional part and can be accessed via the class attribute + `output_shape`). + conv_args : Dict[str, Any] + the arguments for the convolutional part. The arguments are those accepted by + :py:class:`clinicadl.monai_networks.nn.conv_decoder.ConvDecoder`, except `in_shape` that + is specified here via `start_shape`. So, the only mandatory argument is `channels`. + mlp_args : Optional[Dict[str, Any]] (optional, default=None) + the arguments for the MLP part. The arguments are those accepted by + :py:class:`clinicadl.monai_networks.nn.mlp.MLP`, except `in_channels` that is specified + here via `latent_size`, and `out_channels` that is inferred from `start_shape`. + So, the only mandatory argument is `hidden_channels`.\n + If None, the MLP part will be reduced to a single linear layer. + + Examples + -------- + >>> Generator( + latent_size=8, + start_shape=(8, 2, 2), + conv_args={"channels": [4, 2], "norm": None, "act": None}, + mlp_args={"hidden_channels": [16], "act": "elu", "norm": None}, + ) + Generator( + (mlp): MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (hidden0): Sequential( + (linear): Linear(in_features=8, out_features=16, bias=True) + (adn): ADN( + (A): ELU(alpha=1.0) + ) + ) + (output): Linear(in_features=16, out_features=32, bias=True) + ) + (reshape): Reshape() + (convolutions): ConvDecoder( + (layer0): Convolution( + (conv): ConvTranspose2d(8, 4, kernel_size=(3, 3), stride=(1, 1)) + ) + (layer1): Convolution( + (conv): ConvTranspose2d(4, 2, kernel_size=(3, 3), stride=(1, 1)) + ) + ) + ) + + >>> Generator( + latent_size=8, + start_shape=(8, 2, 2), + conv_args={"channels": [4, 2], "norm": None, "act": None, "output_act": "relu"}, + ) + Generator( + (mlp): MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (output): Linear(in_features=8, out_features=32, bias=True) + ) + (reshape): Reshape() + (convolutions): ConvDecoder( + (layer0): Convolution( + (conv): ConvTranspose2d(8, 4, kernel_size=(3, 3), stride=(1, 1)) + ) + (layer1): Convolution( + (conv): ConvTranspose2d(4, 2, kernel_size=(3, 3), stride=(1, 1)) + ) + (output_act): ReLU() + ) + ) + """ + + def __init__( + self, + latent_size: int, + start_shape: Sequence[int], + conv_args: Dict[str, Any], + mlp_args: Optional[Dict[str, Any]] = None, + ) -> None: + super().__init__() + check_conv_args(conv_args) + check_mlp_args(mlp_args) + self.latent_size = latent_size + self.start_shape = start_shape + + flatten_shape = int(np.prod(start_shape)) + if mlp_args is None: + mlp_args = {"hidden_channels": []} + self.mlp = MLP( + in_channels=latent_size, + out_channels=flatten_shape, + **mlp_args, + ) + + self.reshape = Reshape(*start_shape) + inter_channels, *inter_size = start_shape + self.convolutions = ConvDecoder( + in_channels=inter_channels, + spatial_dims=len(inter_size), + _input_size=inter_size, + **conv_args, + ) + + n_channels = ( + conv_args["channels"][-1] + if len(conv_args["channels"]) > 0 + else start_shape[0] + ) + self.output_shape = (n_channels, *self.convolutions.final_size) diff --git a/clinicadl/monai_networks/nn/layers/__init__.py b/clinicadl/monai_networks/nn/layers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/clinicadl/monai_networks/nn/layers/resnet.py b/clinicadl/monai_networks/nn/layers/resnet.py new file mode 100644 index 000000000..c115da512 --- /dev/null +++ b/clinicadl/monai_networks/nn/layers/resnet.py @@ -0,0 +1,124 @@ +from collections.abc import Callable +from typing import Optional + +import torch +import torch.nn as nn +from monai.networks.layers.factories import Conv, Norm +from monai.networks.layers.utils import get_act_layer + +from .utils import ActivationParameters + + +class ResNetBlock(nn.Module): + """ + ResNet basic block. Adapted from MONAI's implementation: + https://github.com/Project-MONAI/MONAI/blob/59a7211070538586369afd4a01eca0a7fe2e742e/ + monai/networks/nets/resnet.py#L71 + """ + + expansion = 1 + + def __init__( + self, + in_planes: int, + planes: int, + spatial_dims: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + act: ActivationParameters = ("relu", {"inplace": True}), + ) -> None: + super().__init__() + + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + norm_type: Callable = Norm[Norm.BATCH, spatial_dims] + + self.conv1 = conv_type( # pylint: disable=not-callable + in_planes, planes, kernel_size=3, padding=1, stride=stride, bias=False + ) + self.norm1 = norm_type(planes) # pylint: disable=not-callable + self.act1 = get_act_layer(name=act) + self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, bias=False) # pylint: disable=not-callable + self.norm2 = norm_type(planes) # pylint: disable=not-callable + self.downsample = downsample + self.act2 = get_act_layer(name=act) + self.stride = stride + + def forward(self, x: torch.Tensor) -> torch.Tensor: + residual = x + + out: torch.Tensor = self.conv1(x) + out = self.norm1(out) + out = self.act1(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.act2(out) + + return out + + +class ResNetBottleneck(nn.Module): + """ + ResNet bottleneck block. Adapted from MONAI's implementation: + https://github.com/Project-MONAI/MONAI/blob/59a7211070538586369afd4a01eca0a7fe2e742e/ + monai/networks/nets/resnet.py#L124 + """ + + expansion = 4 + + def __init__( + self, + in_planes: int, + planes: int, + spatial_dims: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + act: ActivationParameters = ("relu", {"inplace": True}), + ) -> None: + super().__init__() + + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + norm_type: Callable = Norm[Norm.BATCH, spatial_dims] + + self.conv1 = conv_type(in_planes, planes, kernel_size=1, bias=False) # pylint: disable=not-callable + self.norm1 = norm_type(planes) # pylint: disable=not-callable + self.act1 = get_act_layer(name=act) + self.conv2 = conv_type( # pylint: disable=not-callable + planes, planes, kernel_size=3, stride=stride, padding=1, bias=False + ) + self.norm2 = norm_type(planes) # pylint: disable=not-callable + self.act2 = get_act_layer(name=act) + self.conv3 = conv_type( # pylint: disable=not-callable + planes, planes * self.expansion, kernel_size=1, bias=False + ) + self.norm3 = norm_type(planes * self.expansion) # pylint: disable=not-callable + self.downsample = downsample + self.act3 = get_act_layer(name=act) + self.stride = stride + + def forward(self, x: torch.Tensor) -> torch.Tensor: + residual = x + + out: torch.Tensor = self.conv1(x) + out = self.norm1(out) + out = self.act1(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.act2(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.act3(out) + + return out diff --git a/clinicadl/monai_networks/nn/layers/senet.py b/clinicadl/monai_networks/nn/layers/senet.py new file mode 100644 index 000000000..8847ef577 --- /dev/null +++ b/clinicadl/monai_networks/nn/layers/senet.py @@ -0,0 +1,142 @@ +from typing import Callable, Optional + +import torch +import torch.nn as nn +from monai.networks.blocks.squeeze_and_excitation import ChannelSELayer +from monai.networks.layers.factories import Conv, Norm +from monai.networks.layers.utils import get_act_layer + +from .utils import ActivationParameters + + +class SEResNetBlock(nn.Module): + """ + ResNet basic block. Adapted from MONAI's ResNetBlock: + https://github.com/Project-MONAI/MONAI/blob/59a7211070538586369afd4a01eca0a7fe2e742e/ + monai/networks/nets/resnet.py#L71 + """ + + expansion = 1 + reduction = 16 + + def __init__( + self, + in_planes: int, + planes: int, + spatial_dims: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + act: ActivationParameters = ("relu", {"inplace": True}), + ) -> None: + super().__init__() + + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + norm_type: Callable = Norm[Norm.BATCH, spatial_dims] + + self.conv1 = conv_type( # pylint: disable=not-callable + in_planes, planes, kernel_size=3, padding=1, stride=stride, bias=False + ) + self.norm1 = norm_type(planes) # pylint: disable=not-callable + self.act1 = get_act_layer(name=act) + self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, bias=False) # pylint: disable=not-callable + self.norm2 = norm_type(planes) # pylint: disable=not-callable + self.se_layer = ChannelSELayer( + spatial_dims=spatial_dims, + in_channels=planes, + r=self.reduction, + acti_type_1=("relu", {"inplace": True}), + acti_type_2="sigmoid", + ) + self.downsample = downsample + self.act2 = get_act_layer(name=act) + self.stride = stride + + def forward(self, x: torch.Tensor) -> torch.Tensor: + residual = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.act1(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out = self.se_layer(out) + out += residual + out = self.act2(out) + + return out + + +class SEResNetBottleneck(nn.Module): + """ + ResNet bottleneck block. Adapted from MONAI's ResNetBottleneck: + https://github.com/Project-MONAI/MONAI/blob/59a7211070538586369afd4a01eca0a7fe2e742e/ + monai/networks/nets/resnet.py#L124 + """ + + expansion = 4 + reduction = 16 + + def __init__( + self, + in_planes: int, + planes: int, + spatial_dims: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + act: ActivationParameters = ("relu", {"inplace": True}), + ) -> None: + super().__init__() + + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + norm_type: Callable = Norm[Norm.BATCH, spatial_dims] + + self.conv1 = conv_type(in_planes, planes, kernel_size=1, bias=False) # pylint: disable=not-callable + self.norm1 = norm_type(planes) # pylint: disable=not-callable + self.act1 = get_act_layer(name=act) + self.conv2 = conv_type( # pylint: disable=not-callable + planes, planes, kernel_size=3, stride=stride, padding=1, bias=False + ) + self.norm2 = norm_type(planes) # pylint: disable=not-callable + self.act2 = get_act_layer(name=act) + self.conv3 = conv_type( # pylint: disable=not-callable + planes, planes * self.expansion, kernel_size=1, bias=False + ) + self.norm3 = norm_type(planes * self.expansion) # pylint: disable=not-callable + self.se_layer = ChannelSELayer( + spatial_dims=spatial_dims, + in_channels=planes * self.expansion, + r=self.reduction, + acti_type_1=("relu", {"inplace": True}), + acti_type_2="sigmoid", + ) + self.downsample = downsample + self.act3 = get_act_layer(name=act) + self.stride = stride + + def forward(self, x: torch.Tensor) -> torch.Tensor: + residual = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.act1(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.act2(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out = self.se_layer(out) + out += residual + out = self.act3(out) + + return out diff --git a/clinicadl/monai_networks/nn/layers/unet.py b/clinicadl/monai_networks/nn/layers/unet.py new file mode 100644 index 000000000..2186425be --- /dev/null +++ b/clinicadl/monai_networks/nn/layers/unet.py @@ -0,0 +1,102 @@ +from typing import Optional + +import torch.nn as nn +from monai.networks.blocks.convolutions import Convolution +from monai.networks.layers.utils import get_pool_layer + +from .utils import ActFunction, ActivationParameters, NormLayer + + +class ConvBlock(nn.Sequential): + """UNet doouble convolution block.""" + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + act: ActivationParameters = ActFunction.RELU, + dropout: Optional[float] = None, + ): + super().__init__() + self.add_module( + "0", + Convolution( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + strides=1, + padding=None, + adn_ordering="NDA", + act=act, + norm=NormLayer.BATCH, + dropout=dropout, + ), + ) + self.add_module( + "1", + Convolution( + spatial_dims=spatial_dims, + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + strides=1, + padding=None, + adn_ordering="NDA", + act=act, + norm=NormLayer.BATCH, + dropout=dropout, + ), + ) + + +class UpSample(nn.Sequential): + """UNet up-conv block with first upsampling and then a convolution.""" + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + act: ActivationParameters = ActFunction.RELU, + dropout: Optional[float] = None, + ): + super().__init__() + self.add_module("0", nn.Upsample(scale_factor=2)) + self.add_module( + "1", + Convolution( + spatial_dims, + in_channels, + out_channels, + strides=1, + kernel_size=3, + act=act, + adn_ordering="NDA", + norm=NormLayer.BATCH, + dropout=dropout, + ), + ) + + +class DownBlock(nn.Sequential): + """UNet down block with first max pooling and then two convolutions.""" + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + act: ActivationParameters = ActFunction.RELU, + dropout: Optional[float] = None, + ): + super().__init__() + self.pool = get_pool_layer(("max", {"kernel_size": 2}), spatial_dims) + self.doubleconv = ConvBlock( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + act=act, + dropout=dropout, + ) diff --git a/clinicadl/monai_networks/nn/layers/unpool.py b/clinicadl/monai_networks/nn/layers/unpool.py new file mode 100644 index 000000000..1c90fde90 --- /dev/null +++ b/clinicadl/monai_networks/nn/layers/unpool.py @@ -0,0 +1,87 @@ +from typing import Any, Dict, Optional, Tuple, Type, Union + +import torch.nn as nn +from monai.networks.layers.factories import LayerFactory, split_args +from monai.utils import has_option + +from .utils import UnpoolingLayer + +Unpool = LayerFactory( + name="Unpooling layers", description="Factory for creating unpooling layers." +) + + +@Unpool.factory_function("upsample") +def upsample_factory(dim: int) -> Type[nn.Upsample]: + """ + Upsample layer. + """ + return nn.Upsample + + +@Unpool.factory_function("convtranspose") +def convtranspose_factory( + dim: int, +) -> Type[Union[nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d]]: + """ + Transposed convolutional layers in 1,2,3 dimensions. + + Parameters + ---------- + dim : int + desired dimension of the transposed convolutional layer. + + Returns + ------- + type[Union[nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d]] + ConvTranspose[dim]d + """ + types = (nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d) + return types[dim - 1] + + +def get_unpool_layer( + name: Union[UnpoolingLayer, Tuple[UnpoolingLayer, Dict[str, Any]]], + spatial_dims: int, + in_channels: Optional[int] = None, + out_channels: Optional[int] = None, +) -> nn.Module: + """ + Creates an unpooling layer instance. + + Parameters + ---------- + name : Union[UnpoolingLayer, Tuple[UnpoolingLayer, Dict[str, Any]]] + the unpooling type, potentially with arguments in a dict. + + Returns + ------- + nn.Module + the parametrized unpooling layer. + + Parameters + ---------- + name : Union[UnpoolingLayer, Tuple[UnpoolingLayer, Dict[str, Any]]] + the unpooling type, potentially with arguments in a dict. + spatial_dims : int + number of spatial dimensions of the input. + in_channels : Optional[int] (optional, default=None) + number of input channels if the unpool layer requires this parameter. + out_channels : Optional[int] (optional, default=None) + number of output channels if the unpool layer requires this parameter. + + Returns + ------- + nn.Module + the parametrized unpooling layer. + """ + unpool_name, unpool_args = split_args(name) + unpool_name = UnpoolingLayer(unpool_name) + unpool_type = Unpool[unpool_name, spatial_dims] + kw_args = dict(unpool_args) + if has_option(unpool_type, "in_channels") and "in_channels" not in kw_args: + kw_args["in_channels"] = in_channels + if has_option(unpool_type, "out_channels") and "out_channels" not in kw_args: + kw_args["out_channels"] = out_channels + + return unpool_type(**kw_args) # pylint: disable=not-callable diff --git a/clinicadl/monai_networks/nn/layers/utils/__init__.py b/clinicadl/monai_networks/nn/layers/utils/__init__.py new file mode 100644 index 000000000..5c080fffd --- /dev/null +++ b/clinicadl/monai_networks/nn/layers/utils/__init__.py @@ -0,0 +1,19 @@ +from .enum import ( + ActFunction, + ConvNormLayer, + NormLayer, + PoolingLayer, + UnpoolingLayer, + UnpoolingMode, +) +from .types import ( + ActivationParameters, + ConvNormalizationParameters, + ConvParameters, + NormalizationParameters, + PoolingParameters, + SingleLayerConvParameter, + SingleLayerPoolingParameters, + SingleLayerUnpoolingParameters, + UnpoolingParameters, +) diff --git a/clinicadl/monai_networks/nn/layers/utils/enum.py b/clinicadl/monai_networks/nn/layers/utils/enum.py new file mode 100644 index 000000000..695776551 --- /dev/null +++ b/clinicadl/monai_networks/nn/layers/utils/enum.py @@ -0,0 +1,65 @@ +from clinicadl.utils.enum import CaseInsensitiveEnum + + +class UnpoolingLayer(CaseInsensitiveEnum): + """Supported unpooling layers in ClinicaDL.""" + + CONV_TRANS = "convtranspose" + UPSAMPLE = "upsample" + + +class ActFunction(CaseInsensitiveEnum): + """Supported activation functions in ClinicaDL.""" + + ELU = "elu" + RELU = "relu" + LEAKY_RELU = "leakyrelu" + PRELU = "prelu" + RELU6 = "relu6" + SELU = "selu" + CELU = "celu" + GELU = "gelu" + SIGMOID = "sigmoid" + TANH = "tanh" + SOFTMAX = "softmax" + LOGSOFTMAX = "logsoftmax" + MISH = "mish" + + +class PoolingLayer(CaseInsensitiveEnum): + """Supported pooling layers in ClinicaDL.""" + + MAX = "max" + AVG = "avg" + ADAPT_AVG = "adaptiveavg" + ADAPT_MAX = "adaptivemax" + + +class NormLayer(CaseInsensitiveEnum): + """Supported normalization layers in ClinicaDL.""" + + GROUP = "group" + LAYER = "layer" + SYNCBATCH = "syncbatch" + BATCH = "batch" + INSTANCE = "instance" + + +class ConvNormLayer(CaseInsensitiveEnum): + """Supported normalization layers with convolutions in ClinicaDL.""" + + GROUP = "group" + SYNCBATCH = "syncbatch" + BATCH = "batch" + INSTANCE = "instance" + + +class UnpoolingMode(CaseInsensitiveEnum): + """Supported unpooling mode for AutoEncoders in ClinicaDL.""" + + NEAREST = "nearest" + LINEAR = "linear" + BILINEAR = "bilinear" + BICUBIC = "bicubic" + TRILINEAR = "trilinear" + CONV_TRANS = "convtranspose" diff --git a/clinicadl/monai_networks/nn/layers/utils/types.py b/clinicadl/monai_networks/nn/layers/utils/types.py new file mode 100644 index 000000000..f5ef18847 --- /dev/null +++ b/clinicadl/monai_networks/nn/layers/utils/types.py @@ -0,0 +1,37 @@ +from typing import Any, Dict, List, Tuple, Union + +from .enum import ( + ActFunction, + ConvNormLayer, + NormLayer, + PoolingLayer, + UnpoolingLayer, +) + +SingleLayerConvParameter = Union[int, Tuple[int, ...]] +ConvParameters = Union[SingleLayerConvParameter, List[SingleLayerConvParameter]] + +PoolingType = Union[str, PoolingLayer] +SingleLayerPoolingParameters = Tuple[PoolingType, Dict[str, Any]] +PoolingParameters = Union[ + SingleLayerPoolingParameters, List[SingleLayerPoolingParameters] +] + +UnpoolingType = Union[str, UnpoolingLayer] +SingleLayerUnpoolingParameters = Tuple[UnpoolingType, Dict[str, Any]] +UnpoolingParameters = Union[ + SingleLayerUnpoolingParameters, List[SingleLayerUnpoolingParameters] +] + +NormalizationType = Union[str, NormLayer] +NormalizationParameters = Union[ + NormalizationType, Tuple[NormalizationType, Dict[str, Any]] +] + +ConvNormalizationType = Union[str, ConvNormLayer] +ConvNormalizationParameters = Union[ + ConvNormalizationType, Tuple[ConvNormalizationType, Dict[str, Any]] +] + +ActivationType = Union[str, ActFunction] +ActivationParameters = Union[ActivationType, Tuple[ActivationType, Dict[str, Any]]] diff --git a/clinicadl/monai_networks/nn/layers/vit.py b/clinicadl/monai_networks/nn/layers/vit.py new file mode 100644 index 000000000..e485d6c6b --- /dev/null +++ b/clinicadl/monai_networks/nn/layers/vit.py @@ -0,0 +1,94 @@ +from functools import partial +from typing import Callable, Optional + +import torch +import torch.nn as nn +from torchvision.models.vision_transformer import MLPBlock + + +class EncoderBlock(nn.Module): + """Transformer encoder block.""" + + def __init__( + self, + num_heads: int, + hidden_dim: int, + mlp_dim: int, + dropout: float, + attention_dropout: float, + norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6), + ) -> None: + super().__init__() + self.num_heads = num_heads + + # Attention block + self.norm1 = norm_layer(hidden_dim) + self.self_attention = nn.MultiheadAttention( + hidden_dim, num_heads, dropout=attention_dropout, batch_first=True + ) + self.dropout = nn.Dropout(dropout) + + # MLP block + self.norm2 = norm_layer(hidden_dim) + self.mlp = MLPBlock(hidden_dim, mlp_dim, dropout) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + residual = x + + x = self.norm1(x) + x, _ = self.self_attention(x, x, x, need_weights=False) + x = self.dropout(x) + x += residual + + y = self.norm2(x) + y = self.mlp(y) + return x + y + + +class Encoder(nn.Module): + """Encoder with multiple transformer blocks.""" + + def __init__( + self, + seq_length: int, + num_layers: int, + num_heads: int, + hidden_dim: int, + mlp_dim: int, + dropout: float, + attention_dropout: float, + pos_embedding: Optional[nn.Parameter] = None, + norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6), + ) -> None: + super().__init__() + + if pos_embedding is not None: + self.pos_embedding = pos_embedding + else: + self.pos_embedding = nn.Parameter( + torch.empty(1, seq_length, hidden_dim).normal_(std=0.02) + ) # from BERT + self.dropout = nn.Dropout(dropout) + self.layers = nn.ModuleList( + [ + EncoderBlock( + num_heads, + hidden_dim, + mlp_dim, + dropout, + attention_dropout, + norm_layer, + ) + for _ in range(num_layers) + ] + ) + self.norm = norm_layer(hidden_dim) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = x + self.pos_embedding + + x = self.dropout(x) + for layer in self.layers: + x = layer(x) + + return self.norm(x) diff --git a/clinicadl/monai_networks/nn/mlp.py b/clinicadl/monai_networks/nn/mlp.py new file mode 100644 index 000000000..a27b2ad4e --- /dev/null +++ b/clinicadl/monai_networks/nn/mlp.py @@ -0,0 +1,146 @@ +from collections import OrderedDict +from typing import Optional, Sequence + +import torch.nn as nn +from monai.networks.blocks import ADN +from monai.networks.layers.utils import get_act_layer +from monai.networks.nets import FullyConnectedNet as BaseMLP + +from .layers.utils import ( + ActFunction, + ActivationParameters, + NormalizationParameters, + NormLayer, +) +from .utils import check_adn_ordering, check_norm_layer + + +class MLP(BaseMLP): + """Simple full-connected layer neural network (or Multi-Layer Perceptron) with linear, normalization, activation + and dropout layers. + + Parameters + ---------- + in_channels : int + number of input channels (i.e. number of features). + out_channels : int + number of output channels. + hidden_channels : Sequence[int] + number of output channels for each hidden layer. Thus, this parameter also controls the number of hidden layers. + act : Optional[ActivationParameters] (optional, default=ActFunction.PRELU) + the activation function used after a linear layer, and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + output_act : Optional[ActivationParameters] (optional, default=None) + a potential activation layer applied to the output of the network. Should be pass in the same way as `act`. + If None, no last activation will be applied. + norm : Optional[NormalizationParameters] (optional, default=NormLayer.BATCH) + the normalization type used after a linear layer, and optionally the arguments of the normalization + layer. Should be passed as `norm_type` or `(norm_type, parameters)`. If None, no normalization will be + performed.\n + `norm_type` can be any value in {`batch`, `group`, `instance`, `layer`, `syncbatch`}. Please refer to PyTorch's + [normalization layers](https://pytorch.org/docs/stable/nn.html#normalization-layers) to know the mandatory and + optional arguments for each of them.\n + Please note that arguments `num_channels`, `num_features` and `normalized_shape` of the normalization layer + should not be passed, as they are automatically inferred from the output of the previous layer in the network. + dropout : Optional[float] (optional, default=None) + dropout ratio. If None, no dropout. + bias : bool (optional, default=True) + whether to have a bias term in linear layers. + adn_ordering : str (optional, default="NDA") + order of operations `Activation`, `Dropout` and `Normalization` after a linear layer (except the last + one). + For example if "ND" is passed, `Normalization` and then `Dropout` will be performed (without `Activation`).\n + Note: ADN will not be applied after the last linear layer. + + Examples + -------- + >>> MLP(in_channels=12, out_channels=2, hidden_channels=[8, 4], dropout=0.1, act=("elu", {"alpha": 0.5}), + norm=("group", {"num_groups": 2}), bias=True, adn_ordering="ADN", output_act="softmax") + MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (hidden0): Sequential( + (linear): Linear(in_features=12, out_features=8, bias=True) + (adn): ADN( + (A): ELU(alpha=0.5) + (D): Dropout(p=0.1, inplace=False) + (N): GroupNorm(2, 8, eps=1e-05, affine=True) + ) + ) + (hidden1): Sequential( + (linear): Linear(in_features=8, out_features=4, bias=True) + (adn): ADN( + (A): ELU(alpha=0.5) + (D): Dropout(p=0.1, inplace=False) + (N): GroupNorm(2, 4, eps=1e-05, affine=True) + ) + ) + (output): Sequential( + (linear): Linear(in_features=4, out_features=2, bias=True) + (output_act): Softmax(dim=None) + ) + ) + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + hidden_channels: Sequence[int], + act: Optional[ActivationParameters] = ActFunction.PRELU, + output_act: Optional[ActivationParameters] = None, + norm: Optional[NormalizationParameters] = NormLayer.BATCH, + dropout: Optional[float] = None, + bias: bool = True, + adn_ordering: str = "NDA", + ) -> None: + self.norm = check_norm_layer(norm) + super().__init__( + in_channels, + out_channels, + hidden_channels, + dropout, + act, + bias, + check_adn_ordering(adn_ordering), + ) + self.output = nn.Sequential(OrderedDict([("linear", self.output)])) + self.output.output_act = get_act_layer(output_act) if output_act else None + # renaming + self._modules = OrderedDict( + [ + (key.replace("hidden_", "hidden"), sub_m) + for key, sub_m in self._modules.items() + ] + ) + + def _get_layer(self, in_channels: int, out_channels: int, bias: bool) -> nn.Module: + """ + Gets the parametrized Linear layer + ADN block. + """ + if self.norm == NormLayer.LAYER: + norm = ("layer", {"normalized_shape": out_channels}) + else: + norm = self.norm + seq = nn.Sequential( + OrderedDict( + [ + ("linear", nn.Linear(in_channels, out_channels, bias)), + ( + "adn", + ADN( + ordering=self.adn_ordering, + act=self.act, + norm=norm, + dropout=self.dropout, + dropout_dim=1, + in_channels=out_channels, + ), + ), + ] + ) + ) + return seq diff --git a/clinicadl/monai_networks/nn/resnet.py b/clinicadl/monai_networks/nn/resnet.py new file mode 100644 index 000000000..1ba90b30c --- /dev/null +++ b/clinicadl/monai_networks/nn/resnet.py @@ -0,0 +1,566 @@ +import re +from collections import OrderedDict +from copy import deepcopy +from enum import Enum +from typing import Any, Callable, Mapping, Optional, Sequence, Type, Union + +import torch +import torch.nn as nn +from monai.networks.layers.factories import Conv, Norm, Pool +from monai.networks.layers.utils import get_act_layer +from monai.utils import ensure_tuple_rep +from torch.hub import load_state_dict_from_url +from torchvision.models.resnet import ( + ResNet18_Weights, + ResNet34_Weights, + ResNet50_Weights, + ResNet101_Weights, + ResNet152_Weights, +) + +from .layers.resnet import ResNetBlock, ResNetBottleneck +from .layers.senet import SEResNetBlock, SEResNetBottleneck +from .layers.utils import ActivationParameters + + +class ResNetBlockType(str, Enum): + """Supported ResNet blocks.""" + + BASIC = "basic" + BOTTLENECK = "bottleneck" + + +class GeneralResNet(nn.Module): + """Common base class for ResNet and SEResNet.""" + + def __init__( + self, + spatial_dims: int, + in_channels: int, + num_outputs: Optional[int], + block_type: Union[str, ResNetBlockType], + n_res_blocks: Sequence[int], + n_features: Sequence[int], + init_conv_size: Union[Sequence[int], int], + init_conv_stride: Union[Sequence[int], int], + bottleneck_reduction: int, + se_reduction: Optional[int], + act: ActivationParameters, + output_act: ActivationParameters, + ) -> None: + super().__init__() + + self.spatial_dims = spatial_dims + self.in_channels = in_channels + self.num_outputs = num_outputs + self.block_type = block_type + self._check_args_consistency(n_res_blocks, n_features) + self.n_res_blocks = n_res_blocks + self.n_features = n_features + self.bottleneck_reduction = bottleneck_reduction + self.se_reduction = se_reduction + self.act = act + self.squeeze_excitation = True if se_reduction else False + + self.init_conv_size = ensure_tuple_rep(init_conv_size, spatial_dims) + self.init_conv_stride = ensure_tuple_rep(init_conv_stride, spatial_dims) + + block, in_planes = self._get_block(block_type) + + conv_type, norm_type, pool_type, avgp_type = self._get_layers() + + block_avgpool = [0, 1, (1, 1), (1, 1, 1)] + + self.in_planes = in_planes[0] + self.n_layers = len(in_planes) + self.bias_downsample = False + + self.conv0 = conv_type( # pylint: disable=not-callable + in_channels, + self.in_planes, + kernel_size=self.init_conv_size, + stride=self.init_conv_stride, + padding=tuple(k // 2 for k in self.init_conv_size), + bias=False, + ) + self.norm0 = norm_type(self.in_planes) # pylint: disable=not-callable + self.act0 = get_act_layer(name=act) + self.pool0 = pool_type(kernel_size=3, stride=2, padding=1) # pylint: disable=not-callable + self.layer1 = self._make_resnet_layer( + block, in_planes[0], n_res_blocks[0], spatial_dims, act + ) + for i, (n_blocks, n_feats) in enumerate( + zip(n_res_blocks[1:], in_planes[1:]), start=2 + ): + self.add_module( + f"layer{i}", + self._make_resnet_layer( + block, + planes=n_feats, + blocks=n_blocks, + spatial_dims=spatial_dims, + stride=2, + act=act, + ), + ) + self.fc = ( + nn.Sequential( + OrderedDict( + [ + ("pool", avgp_type(block_avgpool[spatial_dims])), # pylint: disable=not-callable + ("flatten", nn.Flatten(1)), + ("out", nn.Linear(n_features[-1], num_outputs)), + ] + ) + ) + if num_outputs + else None + ) + if self.fc: + self.fc.output_act = get_act_layer(output_act) if output_act else None + + self._init_module(conv_type, norm_type) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.conv0(x) + x = self.norm0(x) + x = self.act0(x) + x = self.pool0(x) + + for i in range(1, self.n_layers + 1): + x = self.get_submodule(f"layer{i}")(x) + + if self.fc is not None: + x = self.fc(x) + + return x + + def _get_block(self, block_type: Union[str, ResNetBlockType]) -> nn.Module: + """ + Gets the residual block, depending on the block choice made by the user and depending + on whether squeeze-excitation mode or not. + """ + block_type = ResNetBlockType(block_type) + if block_type == ResNetBlockType.BASIC: + in_planes = self.n_features + if self.squeeze_excitation: + block = SEResNetBlock + block.reduction = self.se_reduction + else: + block = ResNetBlock + elif block_type == ResNetBlockType.BOTTLENECK: + in_planes = self._bottleneck_reduce( + self.n_features, self.bottleneck_reduction + ) + if self.squeeze_excitation: + block = SEResNetBottleneck + block.reduction = self.se_reduction + else: + block = ResNetBottleneck + block.expansion = self.bottleneck_reduction + + return block, in_planes + + def _get_layers(self): + """ + Gets convolution, normalization, pooling and adaptative average pooling layers. + """ + conv_type: Type[Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]] = Conv[ + Conv.CONV, self.spatial_dims + ] + norm_type: Type[Union[nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[ + Norm.BATCH, self.spatial_dims + ] + pool_type: Type[Union[nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d]] = Pool[ + Pool.MAX, self.spatial_dims + ] + avgp_type: Type[ + Union[nn.AdaptiveAvgPool1d, nn.AdaptiveAvgPool2d, nn.AdaptiveAvgPool3d] + ] = Pool[Pool.ADAPTIVEAVG, self.spatial_dims] + + return conv_type, norm_type, pool_type, avgp_type + + def _make_resnet_layer( + self, + block: Type[Union[ResNetBlock, ResNetBottleneck]], + planes: int, + blocks: int, + spatial_dims: int, + act: ActivationParameters, + stride: int = 1, + ) -> nn.Sequential: + """ + Builds a ResNet layer. + """ + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + norm_type: Callable = Norm[Norm.BATCH, spatial_dims] + + downsample = None + if stride != 1 or self.in_planes != planes * block.expansion: + downsample = nn.Sequential( + conv_type( # pylint: disable=not-callable + self.in_planes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=self.bias_downsample, + ), + norm_type(planes * block.expansion), # pylint: disable=not-callable + ) + + layers = [ + block( + in_planes=self.in_planes, + planes=planes, + spatial_dims=spatial_dims, + stride=stride, + downsample=downsample, + act=act, + ) + ] + + self.in_planes = planes * block.expansion + for _ in range(1, blocks): + layers.append( + block(self.in_planes, planes, spatial_dims=spatial_dims, act=act) + ) + + return nn.Sequential(*layers) + + def _init_module( + self, conv_type: Type[nn.Module], norm_type: Type[nn.Module] + ) -> None: + """ + Initializes the parameters. + """ + for m in self.modules(): + if isinstance(m, conv_type): + nn.init.kaiming_normal_( + torch.as_tensor(m.weight), mode="fan_out", nonlinearity="relu" + ) + elif isinstance(m, norm_type): + nn.init.constant_(torch.as_tensor(m.weight), 1) + nn.init.constant_(torch.as_tensor(m.bias), 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(torch.as_tensor(m.bias), 0) + + @classmethod + def _bottleneck_reduce( + cls, n_features: Sequence[int], bottleneck_reduction: int + ) -> Sequence[int]: + """ + Finds number of feature maps for the bottleneck layers. + """ + reduced_features = [] + for n in n_features: + if n % bottleneck_reduction != 0: + raise ValueError( + "All elements of n_features must be divisible by bottleneck_reduction. " + f"Got {n} in n_features and bottleneck_reduction={bottleneck_reduction}" + ) + reduced_features.append(n // bottleneck_reduction) + + return reduced_features + + @classmethod + def _check_args_consistency( + cls, n_res_blocks: Sequence[int], n_features: Sequence[int] + ) -> None: + """ + Checks consistency between `n_res_blocks` and `n_features`. + """ + if not isinstance(n_res_blocks, Sequence): + raise ValueError(f"n_res_blocks must be a sequence, got {n_res_blocks}") + if not isinstance(n_features, Sequence): + raise ValueError(f"n_features must be a sequence, got {n_features}") + if len(n_features) != len(n_res_blocks): + raise ValueError( + f"n_features and n_res_blocks must have the same length, got n_features={n_features} " + f"and n_res_blocks={n_res_blocks}" + ) + + +class ResNet(GeneralResNet): + """ + ResNet based on the [Deep Residual Learning for Image Recognition](https://arxiv.org/pdf/1512.03385) paper. + Adapted from [MONAI's implementation](https://docs.monai.io/en/stable/networks.html#resnet). + + The user can customize the number of residual blocks, the number of downsampling blocks, the number of channels + in each block, as well as other parameters like the type of residual block used. + + ResNet is a fully convolutional network that can work with input of any size, provided that is it large + enough not to be reduced to a 1-pixel image (before the adaptative average pooling). + + Parameters + ---------- + spatial_dims : int + number of spatial dimensions of the input image. + in_channels : int + number of channels in the input image. + num_outputs : Optional[int] + number of output variables after the last linear layer.\n + If None, the features before the last fully connected layer (including average pooling) will be returned. + block_type : Union[str, ResNetBlockType] (optional, default=ResNetBlockType.BASIC) + type of residual block. Either `basic` or `bottleneck`. Default to `basic`, as in `ResNet-18`. + n_res_blocks : Sequence[int] (optional, default=(2, 2, 2, 2)) + number of residual block in each ResNet layer. A ResNet layer refers here to the set of residual blocks + between two downsamplings. The length of `n_res_blocks` thus determines the number of ResNet layers. + Default to `(2, 2, 2, 2)`, as in `ResNet-18`. + n_features : Sequence[int] (optional, default=(64, 128, 256, 512)) + number of output feature maps for each ResNet layer. The length of `n_features` must be equal to the length + of `n_res_blocks`. Default to `(64, 128, 256, 512)`, as in `ResNet-18`. + init_conv_size : Union[Sequence[int], int] (optional, default=7) + kernel_size for the first convolution. + If tuple, it will be understood as the values for each dimension. + Default to 7, as in the original paper. + init_conv_stride : Union[Sequence[int], int] (optional, default=2) + stride for the first convolution. + If tuple, it will be understood as the values for each dimension. + Default to 2, as in the original paper. + bottleneck_reduction : int (optional, default=4) + if `block_type='bottleneck'`, `bottleneck_reduction` determines the reduction factor for the number + of feature maps in bottleneck layers (1x1 convolutions). Default to 4, as in the original paper. + act : ActivationParameters (optional, default=("relu", {"inplace": True})) + the activation function used in the convolutional part, and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them.\n + Default is "relu", as in the original paper. + output_act : Optional[ActivationParameters] (optional, default=None) + if `num_outputs` is not None, a potential activation layer applied to the outputs of the network. + Should be pass in the same way as `act`. + If None, no last activation will be applied. + + Examples + -------- + >>> ResNet( + spatial_dims=2, + in_channels=1, + num_outputs=2, + block_type="bottleneck", + bottleneck_reduction=4, + n_features=(8, 16), + n_res_blocks=(2, 2), + output_act="softmax", + init_conv_size=5, + ) + ResNet( + (conv0): Conv2d(1, 2, kernel_size=(5, 5), stride=(2, 2), padding=(2, 2), bias=False) + (norm0): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act0): ReLU(inplace=True) + (pool0): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) + (layer1): Sequential( + (0): ResNetBottleneck( + (conv1): Conv2d(2, 2, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv2): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (norm2): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act2): ReLU(inplace=True) + (conv3): Conv2d(2, 8, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm3): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (downsample): Sequential( + (0): Conv2d(2, 8, kernel_size=(1, 1), stride=(1, 1), bias=False) + (1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + (act3): ReLU(inplace=True) + ) + (1): ResNetBottleneck( + (conv1): Conv2d(8, 2, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv2): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (norm2): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act2): ReLU(inplace=True) + (conv3): Conv2d(2, 8, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm3): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act3): ReLU(inplace=True) + ) + ) + (layer2): Sequential( + (0): ResNetBottleneck( + (conv1): Conv2d(8, 4, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm1): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv2): Conv2d(4, 4, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) + (norm2): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act2): ReLU(inplace=True) + (conv3): Conv2d(4, 16, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm3): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (downsample): Sequential( + (0): Conv2d(8, 16, kernel_size=(1, 1), stride=(2, 2), bias=False) + (1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + (act3): ReLU(inplace=True) + ) + (1): ResNetBottleneck( + (conv1): Conv2d(16, 4, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm1): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv2): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (norm2): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act2): ReLU(inplace=True) + (conv3): Conv2d(4, 16, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm3): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act3): ReLU(inplace=True) + ) + ) + (fc): Sequential( + (pool): AdaptiveAvgPool2d(output_size=(1, 1)) + (flatten): Flatten(start_dim=1, end_dim=-1) + (out): Linear(in_features=16, out_features=2, bias=True) + (output_act): Softmax(dim=None) + ) + ) + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + num_outputs: Optional[int], + block_type: Union[str, ResNetBlockType] = ResNetBlockType.BASIC, + n_res_blocks: Sequence[int] = (2, 2, 2, 2), + n_features: Sequence[int] = (64, 128, 256, 512), + init_conv_size: Union[Sequence[int], int] = 7, + init_conv_stride: Union[Sequence[int], int] = 2, + bottleneck_reduction: int = 4, + act: ActivationParameters = ("relu", {"inplace": True}), + output_act: Optional[ActivationParameters] = None, + ) -> None: + super().__init__( + spatial_dims=spatial_dims, + in_channels=in_channels, + num_outputs=num_outputs, + block_type=block_type, + n_res_blocks=n_res_blocks, + n_features=n_features, + init_conv_size=init_conv_size, + init_conv_stride=init_conv_stride, + bottleneck_reduction=bottleneck_reduction, + se_reduction=None, + act=act, + output_act=output_act, + ) + + +class SOTAResNet(str, Enum): + """Supported ResNet networks.""" + + RESNET_18 = "ResNet-18" + RESNET_34 = "ResNet-34" + RESNET_50 = "ResNet-50" + RESNET_101 = "ResNet-101" + RESNET_152 = "ResNet-152" + + +def get_resnet( + name: Union[str, SOTAResNet], + num_outputs: Optional[int], + output_act: ActivationParameters = None, + pretrained: bool = False, +) -> ResNet: + """ + To get a ResNet implemented in the [Deep Residual Learning for Image Recognition](https://arxiv.org/pdf/1512.03385) + paper. + + Only the last fully connected layer will be changed to match `num_outputs`. + + The user can also use the pretrained models from `torchvision`. Note that the last fully connected layer will not + used pretrained weights, as it is task specific. + + .. warning:: `ResNet-18`, `ResNet-34`, `ResNet-50`, `ResNet-101` and `ResNet-152` only works with 2D images with 3 + channels. + + Parameters + ---------- + model : Union[str, SOTAResNet] + The name of the ResNet. Available networks are `ResNet-18`, `ResNet-34`, `ResNet-50`, `ResNet-101` and `ResNet-152`. + num_outputs : Optional[int] + number of output variables after the last linear layer.\n + If None, the features before the last fully connected layer will be returned. + output_act : ActivationParameters (optional, default=None) + if `num_outputs` is not None, a potential activation layer applied to the outputs of the network, + and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + pretrained : bool (optional, default=False) + whether to use pretrained weights. The pretrained weights used are the default ones from [torchvision](https:// + pytorch.org/vision/main/models/resnet.html). + + Returns + ------- + ResNet + The network, with potentially pretrained weights. + """ + name = SOTAResNet(name) + if name == SOTAResNet.RESNET_18: + block_type = ResNetBlockType.BASIC + n_res_blocks = (2, 2, 2, 2) + n_features = (64, 128, 256, 512) + model_url = ResNet18_Weights.DEFAULT.url + elif name == SOTAResNet.RESNET_34: + block_type = ResNetBlockType.BASIC + n_res_blocks = (3, 4, 6, 3) + n_features = (64, 128, 256, 512) + model_url = ResNet34_Weights.DEFAULT.url + elif name == SOTAResNet.RESNET_50: + block_type = ResNetBlockType.BOTTLENECK + n_res_blocks = (3, 4, 6, 3) + n_features = (256, 512, 1024, 2048) + model_url = ResNet50_Weights.DEFAULT.url + elif name == SOTAResNet.RESNET_101: + block_type = ResNetBlockType.BOTTLENECK + n_res_blocks = (3, 4, 23, 3) + n_features = (256, 512, 1024, 2048) + model_url = ResNet101_Weights.DEFAULT.url + elif name == SOTAResNet.RESNET_152: + block_type = ResNetBlockType.BOTTLENECK + n_res_blocks = (3, 8, 36, 3) + n_features = (256, 512, 1024, 2048) + model_url = ResNet152_Weights.DEFAULT.url + + # pylint: disable=possibly-used-before-assignment + resnet = ResNet( + spatial_dims=2, + in_channels=3, + num_outputs=num_outputs, + n_res_blocks=n_res_blocks, + block_type=block_type, + n_features=n_features, + output_act=output_act, + ) + if pretrained: + fc_layers = deepcopy(resnet.fc) + resnet.fc = None + pretrained_dict = load_state_dict_from_url(model_url, progress=True) + resnet.load_state_dict(_state_dict_adapter(pretrained_dict)) + resnet.fc = fc_layers + + return resnet + + +def _state_dict_adapter(state_dict: Mapping[str, Any]) -> Mapping[str, Any]: + """ + A mapping between torchvision's layer names and ours. + """ + state_dict = {k: v for k, v in state_dict.items() if "fc" not in k} + + mappings = [ + (r"(?>> SEResNet( + spatial_dims=2, + in_channels=1, + num_outputs=2, + block_type="basic", + se_reduction=2, + n_features=(8,), + n_res_blocks=(2,), + output_act="softmax", + init_conv_size=5, + ) + SEResNet( + (conv0): Conv2d(1, 8, kernel_size=(5, 5), stride=(2, 2), padding=(2, 2), bias=False) + (norm0): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act0): ReLU(inplace=True) + (pool0): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) + (layer1): Sequential( + (0): SEResNetBlock( + (conv1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (norm1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv2): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (norm2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (se_layer): ChannelSELayer( + (avg_pool): AdaptiveAvgPool2d(output_size=1) + (fc): Sequential( + (0): Linear(in_features=8, out_features=4, bias=True) + (1): ReLU(inplace=True) + (2): Linear(in_features=4, out_features=8, bias=True) + (3): Sigmoid() + ) + ) + (act2): ReLU(inplace=True) + ) + (1): SEResNetBlock( + (conv1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (norm1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv2): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (norm2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (se_layer): ChannelSELayer( + (avg_pool): AdaptiveAvgPool2d(output_size=1) + (fc): Sequential( + (0): Linear(in_features=8, out_features=4, bias=True) + (1): ReLU(inplace=True) + (2): Linear(in_features=4, out_features=8, bias=True) + (3): Sigmoid() + ) + ) + (act2): ReLU(inplace=True) + ) + ) + (fc): Sequential( + (pool): AdaptiveAvgPool2d(output_size=(1, 1)) + (flatten): Flatten(start_dim=1, end_dim=-1) + (out): Linear(in_features=8, out_features=2, bias=True) + (output_act): Softmax(dim=None) + ) + ) + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + num_outputs: Optional[int], + se_reduction: int = 16, + **kwargs: Any, + ) -> None: + # get defaults from resnet + _, default_resnet_args = get_args_and_defaults(ResNet.__init__) + for arg, value in default_resnet_args.items(): + if arg not in kwargs: + kwargs[arg] = value + + self._check_se_channels(kwargs["n_features"], se_reduction) + + super().__init__( + spatial_dims=spatial_dims, + in_channels=in_channels, + num_outputs=num_outputs, + se_reduction=se_reduction, + **kwargs, + ) + + @classmethod + def _check_se_channels(cls, n_features: Sequence[int], se_reduction: int) -> None: + """ + Checks that the output of residual blocks always have a number of channels greater + than squeeze-excitation bottleneck reduction factor. + """ + if not isinstance(n_features, Sequence): + raise ValueError(f"n_features must be a sequence. Got {n_features}") + for n in n_features: + if n < se_reduction: + raise ValueError( + f"elements of n_features must be greater or equal to se_reduction. Got {n} in n_features " + f"and se_reduction={se_reduction}" + ) + + +class SOTAResNet(str, Enum): + """Supported SEResNet networks.""" + + SE_RESNET_50 = "SEResNet-50" + SE_RESNET_101 = "SEResNet-101" + SE_RESNET_152 = "SEResNet-152" + + +def get_seresnet( + name: Union[str, SOTAResNet], + num_outputs: Optional[int], + output_act: ActivationParameters = None, + pretrained: bool = False, +) -> SEResNet: + """ + To get a Squeeze-and-Excitation ResNet implemented in the [Squeeze-and-Excitation Networks](https://arxiv.org/pdf/ + 1709.01507) paper. + + Only the last fully connected layer will be changed to match `num_outputs`. + + .. warning:: `SEResNet-50`, `SEResNet-101` and `SEResNet-152` only works with 2D images with 3 channels. + + Note: pretrained weights are not yet available for these networks. + + Parameters + ---------- + model : Union[str, SOTAResNet] + the name of the SEResNet. Available networks are `SEResNet-50`, `SEResNet-101` and `SEResNet-152`. + num_outputs : Optional[int] + number of output variables after the last linear layer.\n + If None, the features before the last fully connected layer will be returned. + output_act : ActivationParameters (optional, default=None) + if `num_outputs` is not None, a potential activation layer applied to the outputs of the network, + and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + pretrained : bool (optional, default=False) + pretrained networks are not yet available for SE-ResNets. Leave this argument to False. + + Returns + ------- + SEResNet + the network. + """ + if pretrained is not False: + raise ValueError( + "Pretrained networks are not yet available for SE-ResNets. Please leave " + "'pretrained' to False." + ) + + name = SOTAResNet(name) + if name == SOTAResNet.SE_RESNET_50: + block_type = ResNetBlockType.BOTTLENECK + n_res_blocks = (3, 4, 6, 3) + n_features = (256, 512, 1024, 2048) + elif name == SOTAResNet.SE_RESNET_101: + block_type = ResNetBlockType.BOTTLENECK + n_res_blocks = (3, 4, 23, 3) + n_features = (256, 512, 1024, 2048) + elif name == SOTAResNet.SE_RESNET_152: + block_type = ResNetBlockType.BOTTLENECK + n_res_blocks = (3, 8, 36, 3) + n_features = (256, 512, 1024, 2048) + + # pylint: disable=possibly-used-before-assignment + resnet = SEResNet( + spatial_dims=2, + in_channels=3, + num_outputs=num_outputs, + n_res_blocks=n_res_blocks, + block_type=block_type, + n_features=n_features, + output_act=output_act, + ) + + return resnet diff --git a/clinicadl/monai_networks/nn/unet.py b/clinicadl/monai_networks/nn/unet.py new file mode 100644 index 000000000..dd1e59141 --- /dev/null +++ b/clinicadl/monai_networks/nn/unet.py @@ -0,0 +1,250 @@ +from abc import ABC, abstractmethod +from typing import Optional, Sequence + +import torch +import torch.nn as nn +from monai.networks.blocks.convolutions import Convolution +from monai.networks.layers.utils import get_act_layer + +from .layers.unet import ConvBlock, DownBlock, UpSample +from .layers.utils import ActFunction, ActivationParameters + + +class BaseUNet(nn.Module, ABC): + """Base class for UNet and AttentionUNet.""" + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + channels: Sequence[int] = (64, 128, 256, 512, 1024), + act: ActivationParameters = ActFunction.RELU, + output_act: Optional[ActivationParameters] = None, + dropout: Optional[float] = None, + ): + super().__init__() + if not isinstance(channels, Sequence) or len(channels) < 2: + raise ValueError( + f"channels should be a sequence, whose length is no less than 2. Got {channels}" + ) + self.spatial_dims = spatial_dims + self.in_channels = in_channels + self.out_channels = out_channels + self.channels = channels + self.act = act + self.dropout = dropout + + self.doubleconv = ConvBlock( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=channels[0], + act=act, + dropout=dropout, + ) + self._build_encoder() + self._build_decoder() + self.reduce_channels = Convolution( + spatial_dims=spatial_dims, + in_channels=channels[0], + out_channels=out_channels, + kernel_size=1, + strides=1, + padding=0, + conv_only=True, + ) + self.output_act = get_act_layer(output_act) if output_act else None + + @abstractmethod + def forward(self, x: torch.Tensor) -> torch.Tensor: + pass + + def _build_encoder(self) -> None: + for i in range(1, len(self.channels)): + self.add_module( + f"down{i}", + DownBlock( + spatial_dims=self.spatial_dims, + in_channels=self.channels[i - 1], + out_channels=self.channels[i], + act=self.act, + dropout=self.dropout, + ), + ) + + @abstractmethod + def _build_decoder(self) -> None: + pass + + +class UNet(BaseUNet): + """ + UNet based on [U-Net: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/pdf/1505.04597). + + The user can customize the number of encoding blocks, the number of channels in each block, as well as other parameters + like the activation function. + + .. warning:: UNet works only with images whose dimensions are high enough powers of 2. More precisely, if n is the number + of max pooling operation in your UNet (which is equal to `len(channels)-1`), the image must have :math:`2^{k}` + pixels in each dimension, with :math:`k \\geq n` (e.g. shape (:math:`2^{n}`, :math:`2^{n+3}`) for a 2D image). + + Note: the implementation proposed here is not exactly the one described in the original paper. Padding is added to + convolutions so that the feature maps keep a constant size (except when they are passed to `max pool` or `up-sample` + layers), batch normalization is used, and `up-conv` layers are here made with an [Upsample](https://pytorch.org/docs/ + stable/generated/torch.nn.Upsample.html) layer followed by a 3x3 convolution. + + Parameters + ---------- + spatial_dims : int + number of spatial dimensions of the input image. + in_channels : int + number of channels in the input image. + out_channels : int + number of output channels. + channels : Sequence[int] (optional, default=(64, 128, 256, 512, 1024)) + sequence of integers stating the number of channels in each UNet block. Thus, this parameter also controls + the number of UNet blocks. The length `channels` should be nos less than 2.\n + Default to `(64, 128, 256, 512, 1024)`, as in the original paper. + act : ActivationParameters (optional, default=ActFunction.RELU) + the activation function used in the convolutional part, and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them.\n + Default is "relu", as in the original paper. + output_act : Optional[ActivationParameters] (optional, default=None) + a potential activation layer applied to the output of the network. Should be pass in the same way as `act`. + If None, no last activation will be applied. + dropout : Optional[float] (optional, default=None) + dropout ratio. If None, no dropout. + + Examples + -------- + >>> UNet( + spatial_dims=2, + in_channels=1, + out_channels=2, + channels=(4, 8), + act="elu", + output_act=("softmax", {"dim": 1}), + dropout=0.1, + ) + UNet( + (doubleconv): ConvBlock( + (0): Convolution( + (conv): Conv2d(1, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (1): Convolution( + (conv): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + ) + (down1): DownBlock( + (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) + (doubleconv): ConvBlock( + (0): Convolution( + (conv): Conv2d(4, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (1): Convolution( + (conv): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + ) + ) + (upsample1): UpSample( + (0): Upsample(scale_factor=2.0, mode='nearest') + (1): Convolution( + (conv): Conv2d(8, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + ) + (doubleconv1): ConvBlock( + (0): Convolution( + (conv): Conv2d(8, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (1): Convolution( + (conv): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + ) + (reduce_channels): Convolution( + (conv): Conv2d(4, 2, kernel_size=(1, 1), stride=(1, 1)) + ) + (output_act): Softmax(dim=1) + ) + """ + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x_history = [self.doubleconv(x)] + + for i in range(1, len(self.channels)): + x = self.get_submodule(f"down{i}")(x_history[-1]) + x_history.append(x) + + x_history.pop() # the output of bottelneck is not used as a residual + for i in range(len(self.channels) - 1, 0, -1): + up = self.get_submodule(f"upsample{i}")(x) + merged = torch.cat((x_history.pop(), up), dim=1) + x = self.get_submodule(f"doubleconv{i}")(merged) + + out = self.reduce_channels(x) + + if self.output_act is not None: + out = self.output_act(out) + + return out + + def _build_decoder(self): + for i in range(len(self.channels) - 1, 0, -1): + self.add_module( + f"upsample{i}", + UpSample( + spatial_dims=self.spatial_dims, + in_channels=self.channels[i], + out_channels=self.channels[i - 1], + act=self.act, + dropout=self.dropout, + ), + ) + self.add_module( + f"doubleconv{i}", + ConvBlock( + spatial_dims=self.spatial_dims, + in_channels=self.channels[i - 1] * 2, + out_channels=self.channels[i - 1], + act=self.act, + dropout=self.dropout, + ), + ) diff --git a/clinicadl/monai_networks/nn/utils/__init__.py b/clinicadl/monai_networks/nn/utils/__init__.py new file mode 100644 index 000000000..ce603f205 --- /dev/null +++ b/clinicadl/monai_networks/nn/utils/__init__.py @@ -0,0 +1,14 @@ +from .checks import ( + check_adn_ordering, + check_conv_args, + check_mlp_args, + check_norm_layer, + check_pool_indices, + ensure_list_of_tuples, +) +from .shapes import ( + calculate_conv_out_shape, + calculate_convtranspose_out_shape, + calculate_pool_out_shape, + calculate_unpool_out_shape, +) diff --git a/clinicadl/monai_networks/nn/utils/checks.py b/clinicadl/monai_networks/nn/utils/checks.py new file mode 100644 index 000000000..1917a2894 --- /dev/null +++ b/clinicadl/monai_networks/nn/utils/checks.py @@ -0,0 +1,167 @@ +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union + +from ..layers.utils import ( + ConvParameters, + NormalizationParameters, + NormLayer, + PoolingLayer, +) + +__all__ = [ + "ensure_list_of_tuples", + "check_norm_layer", + "check_conv_args", + "check_mlp_args", + "check_pool_indices", +] + + +def ensure_list_of_tuples( + parameter: ConvParameters, dim: int, n_layers: int, name: str +) -> List[Tuple[int, ...]]: + """ + Checks spatial parameters (e.g. kernel_size) and returns a list of tuples. + Each element of the list corresponds to the parameters of one layer, and + each element of the tuple corresponds to the parameters for one dimension. + """ + parameter = _check_conv_parameter(parameter, dim, n_layers, name) + if isinstance(parameter, tuple): + return [parameter] * n_layers + else: + return parameter + + +def check_norm_layer( + norm: Optional[NormalizationParameters], +) -> Optional[NormalizationParameters]: + """ + Checks that the argument for normalization layers has the right format (i.e. + `norm_type` or (`norm_type`, `norm_layer_parameters`)) and checks potential + mandatory arguments in `norm_layer_parameters`. + """ + if norm is None: + return norm + + if not isinstance(norm, str) and not isinstance(norm, PoolingLayer): + if ( + not isinstance(norm, tuple) + or len(norm) != 2 + or not isinstance(norm[1], dict) + ): + raise ValueError( + "norm must be either the name of the normalization layer or a double with first the name and then the " + f"arguments of the layer in a dict. Got {norm}" + ) + norm_mode = NormLayer(norm[0]) + args = norm[1] + else: + norm_mode = NormLayer(norm) + args = {} + if norm_mode == NormLayer.GROUP and "num_groups" not in args: + raise ValueError( + f"num_groups is a mandatory argument for GroupNorm and must be passed in `norm`. Got `norm`={norm}" + ) + + return norm + + +def check_adn_ordering(adn: str) -> str: + """ + Checks ADN sequence. + """ + if not isinstance(adn, str): + raise ValueError(f"adn_ordering must be a string. Got {adn}") + + for letter in adn: + if letter not in { + "A", + "D", + "N", + }: + raise ValueError( + f"adn_ordering must be composed by 'A', 'D' or/and 'N'. Got {letter}" + ) + if len(adn) != len(set(adn)): + raise ValueError(f"adn_ordering cannot contain duplicated letter. Got {adn}") + + return adn + + +def check_conv_args(conv_args: Dict[str, Any]) -> None: + """ + Checks that `conv_args` is a dict with at least the mandatory argument `channels`. + """ + if not isinstance(conv_args, dict): + raise ValueError( + f"conv_args must be a dict with the arguments for the convolutional part. Got: {conv_args}" + ) + if "channels" not in conv_args: + raise ValueError( + "channels is a mandatory argument for the convolutional part and must therefore be " + f"passed in conv_args. Got conv_args={conv_args}" + ) + + +def check_mlp_args(mlp_args: Optional[Dict[str, Any]]) -> None: + """ + Checks that `mlp_args` is a dict with at least the mandatory argument `hidden_channels`. + """ + if mlp_args is not None: + if not isinstance(mlp_args, dict): + raise ValueError( + f"mlp_args must be a dict with the arguments for the MLP part. Got: {mlp_args}" + ) + if "hidden_channels" not in mlp_args: + raise ValueError( + "hidden_channels is a mandatory argument for the MLP part and must therefore be " + f"passed in mlp_args. Got mlp_args={mlp_args}" + ) + + +def check_pool_indices( + pooling_indices: Optional[Sequence[int]], n_layers: int +) -> Sequence[int]: + """ + Checks that the (un)pooling indices are consistent with the number of layers. + """ + if pooling_indices is not None: + for idx in pooling_indices: + if idx > n_layers - 1: + raise ValueError( + f"indices in (un)pooling_indices must be smaller than len(channels)-1, got (un)pooling_indices={pooling_indices} and len(channels)={n_layers}" + ) + elif idx < -1: + raise ValueError( + f"indices in (un)pooling_indices must be greater or equal to -1, got (un)pooling_indices={pooling_indices}" + ) + return sorted(pooling_indices) + else: + return [] + + +def _check_conv_parameter( + parameter: ConvParameters, dim: int, n_layers: int, name: str +) -> Union[Tuple[int, ...], List[Tuple[int, ...]]]: + """ + Checks spatial parameters (e.g. kernel_size). + """ + if isinstance(parameter, int): + return (parameter,) * dim + elif isinstance(parameter, tuple): + if len(parameter) != dim: + raise ValueError( + f"If a tuple is passed for {name}, its dimension must be {dim}. Got {parameter}" + ) + return parameter + elif isinstance(parameter, list): + if len(parameter) != n_layers: + raise ValueError( + f"If a list is passed, {name} must contain as many elements as there are layers. " + f"There are {n_layers} layers, but got {parameter}" + ) + checked_params = [] + for param in parameter: + checked_params.append(_check_conv_parameter(param, dim, n_layers, name)) + return checked_params + else: + raise ValueError(f"{name} must be an int, a tuple or a list. Got {name}") diff --git a/clinicadl/monai_networks/nn/utils/shapes.py b/clinicadl/monai_networks/nn/utils/shapes.py new file mode 100644 index 000000000..a649af076 --- /dev/null +++ b/clinicadl/monai_networks/nn/utils/shapes.py @@ -0,0 +1,203 @@ +from math import ceil +from typing import Optional, Sequence, Tuple, Union + +import numpy as np + +from ..layers.utils import PoolingLayer, UnpoolingLayer + +__all__ = [ + "calculate_conv_out_shape", + "calculate_convtranspose_out_shape", + "calculate_pool_out_shape", + "calculate_unpool_out_shape", +] + + +def calculate_conv_out_shape( + in_shape: Union[Sequence[int], int], + kernel_size: Union[Sequence[int], int], + stride: Union[Sequence[int], int] = 1, + padding: Union[Sequence[int], int] = 0, + dilation: Union[Sequence[int], int] = 1, + **kwargs, # for uniformization +) -> Tuple[int, ...]: + """ + Calculates the output shape of a convolution layer. All arguments can be scalars or multiple + values. Always return a tuple. + """ + in_shape_np = np.atleast_1d(in_shape) + kernel_size_np = np.atleast_1d(kernel_size) + stride_np = np.atleast_1d(stride) + padding_np = np.atleast_1d(padding) + dilation_np = np.atleast_1d(dilation) + + out_shape_np = ( + (in_shape_np + 2 * padding_np - dilation_np * (kernel_size_np - 1) - 1) + / stride_np + ) + 1 + + return tuple(int(s) for s in out_shape_np) + + +def calculate_convtranspose_out_shape( + in_shape: Union[Sequence[int], int], + kernel_size: Union[Sequence[int], int], + stride: Union[Sequence[int], int] = 1, + padding: Union[Sequence[int], int] = 0, + output_padding: Union[Sequence[int], int] = 0, + dilation: Union[Sequence[int], int] = 1, + **kwargs, # for uniformization +) -> Tuple[int, ...]: + """ + Calculates the output shape of a transposed convolution layer. All arguments can be scalars or + multiple values. Always return a tuple. + """ + in_shape_np = np.atleast_1d(in_shape) + kernel_size_np = np.atleast_1d(kernel_size) + stride_np = np.atleast_1d(stride) + padding_np = np.atleast_1d(padding) + dilation_np = np.atleast_1d(dilation) + output_padding_np = np.atleast_1d(output_padding) + + out_shape_np = ( + (in_shape_np - 1) * stride_np + - 2 * padding_np + + dilation_np * (kernel_size_np - 1) + + output_padding_np + + 1 + ) + + return tuple(int(s) for s in out_shape_np) + + +def calculate_pool_out_shape( + pool_mode: Union[str, PoolingLayer], + in_shape: Union[Sequence[int], int], + **kwargs, +) -> Tuple[int, ...]: + """ + Calculates the output shape of a pooling layer. The first argument is the type of pooling + performed (`max` or `avg`). All other arguments can be scalars or multiple values, except + `ceil_mode`. + Always return a tuple. + """ + pool_mode = PoolingLayer(pool_mode) + if pool_mode == PoolingLayer.MAX: + return _calculate_maxpool_out_shape(in_shape, **kwargs) + elif pool_mode == PoolingLayer.AVG: + return _calculate_avgpool_out_shape(in_shape, **kwargs) + elif pool_mode == PoolingLayer.ADAPT_MAX or pool_mode == PoolingLayer.ADAPT_AVG: + return _calculate_adaptivepool_out_shape(in_shape, **kwargs) + + +def calculate_unpool_out_shape( + unpool_mode: Union[str, UnpoolingLayer], + in_shape: Union[Sequence[int], int], + **kwargs, +) -> Tuple[int, ...]: + """ + Calculates the output shape of an unpooling layer. The first argument is the type of unpooling + performed (`upsample` or `convtranspose`). + Always return a tuple. + """ + unpool_mode = UnpoolingLayer(unpool_mode) + if unpool_mode == UnpoolingLayer.UPSAMPLE: + return _calculate_upsample_out_shape(in_shape, **kwargs) + elif unpool_mode == UnpoolingLayer.CONV_TRANS: + return calculate_convtranspose_out_shape(in_shape, **kwargs) + + +def _calculate_maxpool_out_shape( + in_shape: Union[Sequence[int], int], + kernel_size: Union[Sequence[int], int], + stride: Optional[Union[Sequence[int], int]] = None, + padding: Union[Sequence[int], int] = 0, + dilation: Union[Sequence[int], int] = 1, + ceil_mode: bool = False, + **kwargs, # for uniformization +) -> Tuple[int, ...]: + """ + Calculates the output shape of a MaxPool layer. + """ + if stride is None: + stride = kernel_size + + in_shape_np = np.atleast_1d(in_shape) + kernel_size_np = np.atleast_1d(kernel_size) + stride_np = np.atleast_1d(stride) + padding_np = np.atleast_1d(padding) + dilation_np = np.atleast_1d(dilation) + + out_shape_np = ( + (in_shape_np + 2 * padding_np - dilation_np * (kernel_size_np - 1) - 1) + / stride_np + ) + 1 + if ceil_mode: + out_shape = tuple(ceil(s) for s in out_shape_np) + else: + out_shape = tuple(int(s) for s in out_shape_np) + + return out_shape + + +def _calculate_avgpool_out_shape( + in_shape: Union[Sequence[int], int], + kernel_size: Union[Sequence[int], int], + stride: Optional[Union[Sequence[int], int]] = None, + padding: Union[Sequence[int], int] = 0, + ceil_mode: bool = False, + **kwargs, # for uniformization +) -> Tuple[int, ...]: + """ + Calculates the output shape of an AvgPool layer. + """ + if stride is None: + stride = kernel_size + + in_shape_np = np.atleast_1d(in_shape) + kernel_size_np = np.atleast_1d(kernel_size) + stride_np = np.atleast_1d(stride) + padding_np = np.atleast_1d(padding) + + out_shape_np = ((in_shape_np + 2 * padding_np - kernel_size_np) / stride_np) + 1 + if ceil_mode: + out_shape_np = np.ceil(out_shape_np) + out_shape_np[(out_shape_np - 1) * stride_np >= in_shape_np + padding_np] -= 1 + + return tuple(int(s) for s in out_shape_np) + + +def _calculate_adaptivepool_out_shape( + in_shape: Union[Sequence[int], int], + output_size: Union[Sequence[int], int], + **kwargs, # for uniformization +) -> Tuple[int, ...]: + """ + Calculates the output shape of an AdaptiveMaxPool or AdaptiveAvgPool layer. + """ + in_shape_np = np.atleast_1d(in_shape) + out_shape_np = np.ones_like(in_shape_np) * np.atleast_1d(output_size) + + return tuple(int(s) for s in out_shape_np) + + +def _calculate_upsample_out_shape( + in_shape: Union[Sequence[int], int], + scale_factor: Optional[Union[Sequence[int], int]] = None, + size: Optional[Union[Sequence[int], int]] = None, + **kwargs, # for uniformization +) -> Tuple[int, ...]: + """ + Calculates the output shape of an Upsample layer. + """ + in_shape_np = np.atleast_1d(in_shape) + if size and scale_factor: + raise ValueError("Pass either size or scale_factor, not both.") + elif size: + out_shape_np = np.ones_like(in_shape_np) * np.atleast_1d(size) + elif scale_factor: + out_shape_np = in_shape_np * scale_factor + else: + raise ValueError("Pass one of size or scale_factor.") + + return tuple(int(s) for s in out_shape_np) diff --git a/clinicadl/monai_networks/nn/vae.py b/clinicadl/monai_networks/nn/vae.py new file mode 100644 index 000000000..9dac6b43b --- /dev/null +++ b/clinicadl/monai_networks/nn/vae.py @@ -0,0 +1,200 @@ +from copy import deepcopy +from typing import Any, Dict, Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn + +from .autoencoder import AutoEncoder +from .layers.utils import ActivationParameters, UnpoolingMode + + +class VAE(nn.Module): + """ + A Variational AutoEncoder with convolutional and fully connected layers. + + The user must pass the arguments to build an encoder, from its convolutional and + fully connected parts, and the decoder will be automatically built by taking the + symmetrical network. + + More precisely, to build the decoder, the order of the encoding layers is reverted, convolutions are + replaced by transposed convolutions and pooling layers are replaced by either upsampling or transposed + convolution layers. + Please note that the order of `Activation`, `Dropout` and `Normalization`, defined with the + argument `adn_ordering` in `conv_args`, is the same for the encoder and the decoder. + + Note that an `AutoEncoder` is an aggregation of a `CNN` (:py:class:`clinicadl.monai_networks.nn. + cnn.CNN`), whose last linear layer is duplicated to infer both the mean and the log variance, + and a `Generator` (:py:class:`clinicadl.monai_networks.nn.generator.Generator`). + + Parameters + ---------- + in_shape : Sequence[int] + sequence of integers stating the dimension of the input tensor (minus batch dimension). + latent_size : int + size of the latent vector. + conv_args : Dict[str, Any] + the arguments for the convolutional part of the encoder. The arguments are those accepted + by :py:class:`clinicadl.monai_networks.nn.conv_encoder.ConvEncoder`, except `in_shape` that + is specified here. So, the only mandatory argument is `channels`. + mlp_args : Optional[Dict[str, Any]] (optional, default=None) + the arguments for the MLP part of the encoder . The arguments are those accepted by + :py:class:`clinicadl.monai_networks.nn.mlp.MLP`, except `in_channels` that is inferred + from the output of the convolutional part, and `out_channels` that is set to `latent_size`. + So, the only mandatory argument is `hidden_channels`.\n + If None, the MLP part will be reduced to a single linear layer.\n + The last linear layer will be duplicated to infer both the mean and the log variance. + out_channels : Optional[int] (optional, default=None) + number of output channels. If None, the output will have the same number of channels as the + input. + output_act : Optional[ActivationParameters] (optional, default=None) + a potential activation layer applied to the output of the network, and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + unpooling_mode : Union[str, UnpoolingMode] (optional, default=UnpoolingMode.NEAREST) + type of unpooling. Can be either `"nearest"`, `"linear"`, `"bilinear"`, `"bicubic"`, `"trilinear"` or + `"convtranspose"`.\n + - `nearest`: unpooling is performed by upsampling with the :italic:`nearest` algorithm (see [PyTorch's Upsample layer] + (https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html)). + - `linear`: unpooling is performed by upsampling with the :italic:`linear` algorithm. Only works with 1D images (excluding the + channel dimension). + - `bilinear`: unpooling is performed by upsampling with the :italic:`bilinear` algorithm. Only works with 2D images. + - `bicubic`: unpooling is performed by upsampling with the :italic:`bicubic` algorithm. Only works with 2D images. + - `trilinear`: unpooling is performed by upsampling with the :italic:`trilinear` algorithm. Only works with 3D images. + - `convtranspose`: unpooling is performed with a transposed convolution, whose parameters (kernel size, stride, etc.) are + computed to reverse the pooling operation. + + Examples + -------- + >>> VAE( + in_shape=(1, 16, 16), + latent_size=4, + conv_args={"channels": [2]}, + mlp_args={"hidden_channels": [16], "output_act": "relu"}, + out_channels=2, + output_act="sigmoid", + unpooling_mode="bilinear", + ) + VAE( + (encoder): CNN( + (convolutions): ConvEncoder( + (layer0): Convolution( + (conv): Conv2d(1, 2, kernel_size=(3, 3), stride=(1, 1)) + ) + ) + (mlp): MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (hidden0): Sequential( + (linear): Linear(in_features=392, out_features=16, bias=True) + (adn): ADN( + (N): BatchNorm1d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (A): PReLU(num_parameters=1) + ) + ) + (output): Identity() + ) + ) + (mu): Sequential( + (linear): Linear(in_features=16, out_features=4, bias=True) + (output_act): ReLU() + ) + (log_var): Sequential( + (linear): Linear(in_features=16, out_features=4, bias=True) + (output_act): ReLU() + ) + (decoder): Generator( + (mlp): MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (hidden0): Sequential( + (linear): Linear(in_features=4, out_features=16, bias=True) + (adn): ADN( + (N): BatchNorm1d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (A): PReLU(num_parameters=1) + ) + ) + (output): Sequential( + (linear): Linear(in_features=16, out_features=392, bias=True) + (output_act): ReLU() + ) + ) + (reshape): Reshape() + (convolutions): ConvDecoder( + (layer0): Convolution( + (conv): ConvTranspose2d(2, 2, kernel_size=(3, 3), stride=(1, 1)) + ) + (output_act): Sigmoid() + ) + ) + ) + """ + + def __init__( + self, + in_shape: Sequence[int], + latent_size: int, + conv_args: Dict[str, Any], + mlp_args: Optional[Dict[str, Any]] = None, + out_channels: Optional[int] = None, + output_act: Optional[ActivationParameters] = None, + unpooling_mode: Union[str, UnpoolingMode] = UnpoolingMode.NEAREST, + ) -> None: + super().__init__() + ae = AutoEncoder( + in_shape, + latent_size, + conv_args, + mlp_args, + out_channels, + output_act, + unpooling_mode, + ) + + # replace last mlp layer by two parallel layers + mu_layers = deepcopy(ae.encoder.mlp.output) + log_var_layers = deepcopy(ae.encoder.mlp.output) + self._reset_weights( + log_var_layers + ) # to have different initialization for the two layers + ae.encoder.mlp.output = nn.Identity() + + self.encoder = ae.encoder + self.mu = mu_layers + self.log_var = log_var_layers + self.decoder = ae.decoder + + def forward( + self, x: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Encoding, sampling and decoding. + """ + feature = self.encoder(x) + mu = self.mu(feature) + log_var = self.log_var(feature) + z = self.reparameterize(mu, log_var) + + return self.decoder(z), mu, log_var + + def reparameterize(self, mu: torch.Tensor, log_var: torch.Tensor) -> torch.Tensor: + """ + Samples a random vector from a gaussian distribution, given the mean and log-variance + of this distribution. + """ + std = torch.exp(0.5 * log_var) + + if self.training: # multiply random noise with std only during training + std = torch.randn_like(std).mul(std) + + return std.add_(mu) + + @classmethod + def _reset_weights(cls, layer: Union[nn.Sequential, nn.Linear]) -> None: + """ + Resets the output layer(s) of an MLP. + """ + if isinstance(layer, nn.Linear): + layer.reset_parameters() + else: + layer.linear.reset_parameters() diff --git a/clinicadl/monai_networks/nn/vit.py b/clinicadl/monai_networks/nn/vit.py new file mode 100644 index 000000000..372e1728a --- /dev/null +++ b/clinicadl/monai_networks/nn/vit.py @@ -0,0 +1,420 @@ +import math +import re +from collections import OrderedDict +from copy import deepcopy +from enum import Enum +from typing import Any, Mapping, Optional, Sequence, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +from monai.networks.blocks.pos_embed_utils import build_sincos_position_embedding +from monai.networks.layers import Conv +from monai.networks.layers.utils import get_act_layer +from monai.utils import ensure_tuple_rep +from torch.hub import load_state_dict_from_url +from torchvision.models.vision_transformer import ( + ViT_B_16_Weights, + ViT_B_32_Weights, + ViT_L_16_Weights, + ViT_L_32_Weights, +) + +from .layers.utils import ActFunction, ActivationParameters +from .layers.vit import Encoder + + +class PosEmbedType(str, Enum): + """Available position embedding types for ViT.""" + + LEARN = "learnable" + SINCOS = "sincos" + + +class ViT(nn.Module): + """ + Vision Transformer based on the [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale] + (https://arxiv.org/pdf/2010.11929) paper. + Adapted from [torchvision's implementation](https://pytorch.org/vision/main/models/vision_transformer.html). + + The user can customize the patch size, the embedding dimension, the number of transformer blocks, the number of + attention heads, as well as other parameters like the type of position embedding. + + Parameters + ---------- + in_shape : Sequence[int] + sequence of integers stating the dimension of the input tensor (minus batch dimension). + patch_size : Union[Sequence[int], int] + sequence of integers stating the patch size (minus batch and channel dimensions). If int, the same + patch size will be used for all dimensions. + Patch size must divide image size in all dimensions. + num_outputs : Optional[int] + number of output variables after the last linear layer.\n + If None, the patch embeddings after the last transformer block will be returned. + embedding_dim : int (optional, default=768) + size of the embedding vectors. Must be divisible by `num_heads` as each head will be responsible for + a part of the embedding vectors. Default to 768, as for 'ViT-Base' in the original paper. + num_layers : int (optional, default=12) + number of consecutive transformer blocks. Default to 12, as for 'ViT-Base' in the original paper. + num_heads : int (optional, default=12) + number of heads in the self-attention block. Must divide `embedding_size`. + Default to 12, as for 'ViT-Base' in the original paper. + mlp_dim : int (optional, default=3072) + size of the hidden layer in the MLP part of the transformer block. Default to 3072, as for 'ViT-Base' + in the original paper. + pos_embed_type : Optional[Union[str, PosEmbedType]] (optional, default="learnable") + type of position embedding. Can be either `"learnable"`, `"sincos"` or `None`.\n + - `learnable`: the position embeddings are parameters that will be learned during the training + process. + - `sincos`: the position embeddings are fixed and determined with sinus and cosinus formulas (based on Dosovitskiy et al., + 'Attention Is All You Need, https://arxiv.org/pdf/1706.03762). Only implemented for 2D and 3D images. With `sincos` + position embedding, `embedding_dim` must be divisible by 4 for 2D images and by 6 for 3D images. + - `None`: no position embeddings are used.\n + Default to `"learnable"`, as in the original paper. + output_act : Optional[ActivationParameters] (optional, default=ActFunction.TANH) + if `num_outputs` is not None, a potential activation layer applied to the outputs of the network, + and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them.\n + Default to `"tanh"`, as in the original paper. + dropout : Optional[float] (optional, default=None) + dropout ratio. If None, no dropout. + + Examples + -------- + >>> ViT( + in_shape=(3, 60, 64), + patch_size=4, + num_outputs=2, + embedding_dim=32, + num_layers=2, + num_heads=4, + mlp_dim=128, + output_act="softmax", + ) + ViT( + (conv_proj): Conv2d(3, 32, kernel_size=(4, 4), stride=(4, 4)) + (encoder): Encoder( + (dropout): Dropout(p=0.0, inplace=False) + (layers): ModuleList( + (0-1): 2 x EncoderBlock( + (norm1): LayerNorm((32,), eps=1e-06, elementwise_affine=True) + (self_attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=32, out_features=32, bias=True) + ) + (dropout): Dropout(p=0.0, inplace=False) + (norm2): LayerNorm((32,), eps=1e-06, elementwise_affine=True) + (mlp): MLPBlock( + (0): Linear(in_features=32, out_features=128, bias=True) + (1): GELU(approximate='none') + (2): Dropout(p=0.0, inplace=False) + (3): Linear(in_features=128, out_features=32, bias=True) + (4): Dropout(p=0.0, inplace=False) + ) + ) + ) + (norm): LayerNorm((32,), eps=1e-06, elementwise_affine=True) + ) + (fc): Sequential( + (out): Linear(in_features=32, out_features=2, bias=True) + (output_act): Softmax(dim=None) + ) + ) + """ + + def __init__( + self, + in_shape: Sequence[int], + patch_size: Union[Sequence[int], int], + num_outputs: Optional[int], + embedding_dim: int = 768, + num_layers: int = 12, + num_heads: int = 12, + mlp_dim: int = 3072, + pos_embed_type: Optional[Union[str, PosEmbedType]] = PosEmbedType.LEARN, + output_act: Optional[ActivationParameters] = ActFunction.TANH, + dropout: Optional[float] = None, + ) -> None: + super().__init__() + + self.in_channels, *self.img_size = in_shape + self.spatial_dims = len(self.img_size) + self.patch_size = ensure_tuple_rep(patch_size, self.spatial_dims) + + self._check_embedding_dim(embedding_dim, num_heads) + self._check_patch_size(self.img_size, self.patch_size) + self.embedding_dim = embedding_dim + self.classification = True if num_outputs else False + dropout = dropout if dropout else 0.0 + + self.conv_proj = Conv[Conv.CONV, self.spatial_dims]( # pylint: disable=not-callable + in_channels=self.in_channels, + out_channels=self.embedding_dim, + kernel_size=self.patch_size, + stride=self.patch_size, + ) + self.seq_length = int( + np.prod(np.array(self.img_size) // np.array(self.patch_size)) + ) + + # Add a class token + if self.classification: + self.class_token = nn.Parameter(torch.zeros(1, 1, self.embedding_dim)) + self.seq_length += 1 + + pos_embedding = self._get_pos_embedding(pos_embed_type) + self.encoder = Encoder( + self.seq_length, + num_layers, + num_heads, + self.embedding_dim, + mlp_dim, + dropout=dropout, + attention_dropout=dropout, + pos_embedding=pos_embedding, + ) + + if self.classification: + self.class_token = nn.Parameter(torch.zeros(1, 1, embedding_dim)) + self.fc = nn.Sequential( + OrderedDict([("out", nn.Linear(embedding_dim, num_outputs))]) + ) + self.fc.output_act = get_act_layer(output_act) if output_act else None + else: + self.fc = None + + self._init_layers() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.conv_proj(x) + # (n, hidden_dim, n_h, n_w) -> (n, (h * w * d), hidden_dim) + x = x.flatten(2).transpose(-1, -2) + n = x.shape[0] + + # Expand the class token to the full batch + if self.fc: + batch_class_token = self.class_token.expand(n, -1, -1) + x = torch.cat([batch_class_token, x], dim=1) + + x = self.encoder(x) + + # Classifier "token" as used by standard language architectures + if self.fc: + x = x[:, 0] + x = self.fc(x) + + return x + + def _get_pos_embedding( + self, pos_embed_type: Optional[Union[str, PosEmbedType]] + ) -> Optional[nn.Parameter]: + """ + Gets position embeddings. If `pos_embed_type` is "learnable", will return None as it will be handled + by the encoder module. + """ + if pos_embed_type is None: + pos_embed = nn.Parameter( + torch.zeros(1, self.seq_length, self.embedding_dim) + ) + pos_embed.requires_grad = False + return pos_embed + + pos_embed_type = PosEmbedType(pos_embed_type) + + if pos_embed_type == PosEmbedType.LEARN: + return None # will be initialized inside the Encoder + + elif pos_embed_type == PosEmbedType.SINCOS: + if self.spatial_dims != 2 and self.spatial_dims != 3: + raise ValueError( + f"{self.spatial_dims}D sincos position embedding not implemented" + ) + elif self.spatial_dims == 2 and self.embedding_dim % 4: + raise ValueError( + f"embedding_dim must be divisible by 4 for 2D sincos position embedding. Got embedding_dim={self.embedding_dim}" + ) + elif self.spatial_dims == 3 and self.embedding_dim % 6: + raise ValueError( + f"embedding_dim must be divisible by 6 for 3D sincos position embedding. Got embedding_dim={self.embedding_dim}" + ) + grid_size = [] + for in_size, pa_size in zip(self.img_size, self.patch_size): + grid_size.append(in_size // pa_size) + pos_embed = build_sincos_position_embedding( + grid_size, self.embedding_dim, self.spatial_dims + ) + if self.classification: + pos_embed = torch.nn.Parameter( + torch.cat([torch.zeros(1, 1, self.embedding_dim), pos_embed], dim=1) + ) # add 0 for class token pos embedding + pos_embed.requires_grad = False + return pos_embed + + def _init_layers(self): + """ + Initializes some layers, based on torchvision's implementation: https://pytorch.org/vision/main/ + _modules/torchvision/models/vision_transformer.html + """ + fan_in = self.conv_proj.in_channels * np.prod(self.conv_proj.kernel_size) + nn.init.trunc_normal_(self.conv_proj.weight, std=math.sqrt(1 / fan_in)) + nn.init.zeros_(self.conv_proj.bias) + + @classmethod + def _check_embedding_dim(cls, embedding_dim: int, num_heads: int) -> None: + """ + Checks consistency between embedding dimension and number of heads. + """ + if embedding_dim % num_heads != 0: + raise ValueError( + f"embedding_dim should be divisible by num_heads. Got embedding_dim={embedding_dim} " + f" and num_heads={num_heads}" + ) + + @classmethod + def _check_patch_size( + cls, img_size: Tuple[int, ...], patch_size: Tuple[int, ...] + ) -> None: + """ + Checks consistency between image size and patch size. + """ + for i, p in zip(img_size, patch_size): + if i % p != 0: + raise ValueError( + f"img_size should be divisible by patch_size. Got img_size={img_size} " + f" and patch_size={patch_size}" + ) + + +class SOTAViT(str, Enum): + """Supported ViT networks.""" + + B_16 = "ViT-B/16" + B_32 = "ViT-B/32" + L_16 = "ViT-L/16" + L_32 = "ViT-L/32" + + +def get_vit( + name: Union[str, SOTAViT], + num_outputs: Optional[int], + output_act: ActivationParameters = None, + pretrained: bool = False, +) -> ViT: + """ + To get a Vision Transformer implemented in the [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale] + (https://arxiv.org/pdf/2010.11929) paper. + + Only the last fully connected layer will be changed to match `num_outputs`. + + The user can also use the pretrained models from `torchvision`. Note that the last fully connected layer will not + used pretrained weights, as it is task specific. + + .. warning:: `ViT-B/16`, `ViT-B/32`, `ViT-L/16` and `ViT-L/32` work with 2D images of size (224, 224), with 3 channels. + + Parameters + ---------- + model : Union[str, SOTAViT] + The name of the Vision Transformer. Available networks are `ViT-B/16`, `ViT-B/32`, `ViT-L/16` and `ViT-L/32`. + num_outputs : Optional[int] + number of output variables after the last linear layer.\n + If None, the features before the last fully connected layer will be returned. + output_act : ActivationParameters (optional, default=None) + if `num_outputs` is not None, a potential activation layer applied to the outputs of the network, + and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + pretrained : bool (optional, default=False) + whether to use pretrained weights. The pretrained weights used are the default ones from [torchvision](https:// + pytorch.org/vision/main/models/vision_transformer.html). + + Returns + ------- + ViT + The network, with potentially pretrained weights. + """ + name = SOTAViT(name) + if name == SOTAViT.B_16: + in_shape = (3, 224, 224) + patch_size = 16 + embedding_dim = 768 + mlp_dim = 3072 + num_layers = 12 + num_heads = 12 + model_url = ViT_B_16_Weights.DEFAULT.url + elif name == SOTAViT.B_32: + in_shape = (3, 224, 224) + patch_size = 32 + embedding_dim = 768 + mlp_dim = 3072 + num_layers = 12 + num_heads = 12 + model_url = ViT_B_32_Weights.DEFAULT.url + elif name == SOTAViT.L_16: + in_shape = (3, 224, 224) + patch_size = 16 + embedding_dim = 1024 + mlp_dim = 4096 + num_layers = 24 + num_heads = 16 + model_url = ViT_L_16_Weights.DEFAULT.url + elif name == SOTAViT.L_32: + in_shape = (3, 224, 224) + patch_size = 32 + embedding_dim = 1024 + mlp_dim = 4096 + num_layers = 24 + num_heads = 16 + model_url = ViT_L_32_Weights.DEFAULT.url + + # pylint: disable=possibly-used-before-assignment + vit = ViT( + in_shape=in_shape, + patch_size=patch_size, + num_outputs=num_outputs, + embedding_dim=embedding_dim, + mlp_dim=mlp_dim, + num_heads=num_heads, + num_layers=num_layers, + output_act=output_act, + ) + + if pretrained: + pretrained_dict = load_state_dict_from_url(model_url, progress=True) + if num_outputs is None: + del pretrained_dict["class_token"] + pretrained_dict["encoder.pos_embedding"] = pretrained_dict[ + "encoder.pos_embedding" + ][:, 1:] # remove class token position embedding + fc_layers = deepcopy(vit.fc) + vit.fc = None + vit.load_state_dict(_state_dict_adapter(pretrained_dict)) + vit.fc = fc_layers + + return vit + + +def _state_dict_adapter(state_dict: Mapping[str, Any]) -> Mapping[str, Any]: + """ + A mapping between torchvision's layer names and ours. + """ + state_dict = {k: v for k, v in state_dict.items() if "heads" not in k} + + mappings = [ + ("ln_", "norm"), + ("ln", "norm"), + (r"encoder_layer_(\d+)", r"\1"), + ] + + for key in list(state_dict.keys()): + new_key = key + for transform in mappings: + new_key = re.sub(transform[0], transform[1], new_key) + state_dict[new_key] = state_dict.pop(key) + + return state_dict diff --git a/clinicadl/utils/enum.py b/clinicadl/utils/enum.py index 3e9031534..4e5c7721c 100644 --- a/clinicadl/utils/enum.py +++ b/clinicadl/utils/enum.py @@ -1,6 +1,17 @@ from enum import Enum +class CaseInsensitiveEnum(str, Enum): + @classmethod + def _missing_(cls, value): + if isinstance(value, str): + value = value.lower() + for member in cls: + if member.lower() == value: + return member + return None + + class BaseEnum(Enum): """Base Enum object that will print valid inputs if the value passed is not valid.""" diff --git a/tests/unittests/monai_networks/config/__init__.py b/tests/unittests/monai_networks/config/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unittests/monai_networks/config/test_autoencoder.py b/tests/unittests/monai_networks/config/test_autoencoder.py deleted file mode 100644 index 707695434..000000000 --- a/tests/unittests/monai_networks/config/test_autoencoder.py +++ /dev/null @@ -1,171 +0,0 @@ -import pytest -from pydantic import ValidationError - -from clinicadl.monai_networks.config.autoencoder import ( - AutoEncoderConfig, - VarAutoEncoderConfig, -) - - -@pytest.fixture -def dummy_arguments(): - args = { - "spatial_dims": 2, - "in_channels": 1, - "out_channels": 1, - "channels": [2, 4], - "latent_size": 16, - } - return args - - -@pytest.fixture( - params=[ - {"in_shape": (1, 10, 10), "strides": (1, 1), "dropout": 1.1}, - {"in_shape": (1, 10, 10), "strides": (1, 1), "kernel_size": 4}, - {"in_shape": (1, 10, 10), "strides": (1, 1), "kernel_size": (3,)}, - {"in_shape": (1, 10, 10), "strides": (1, 1), "kernel_size": (3, 3, 3)}, - {"in_shape": (1, 10, 10), "strides": (1, 1), "up_kernel_size": 4}, - {"in_shape": (1, 10, 10), "strides": (1, 1), "up_kernel_size": (3,)}, - {"in_shape": (1, 10, 10), "strides": (1, 1), "up_kernel_size": (3, 3, 3)}, - { - "in_shape": (1, 10, 10), - "strides": (1, 1), - "inter_channels": (2, 2), - "inter_dilations": (2,), - }, - {"in_shape": (1, 10, 10), "strides": (1, 1), "inter_dilations": (2, 2)}, - {"in_shape": (1, 10, 10), "strides": (1, 1), "padding": (1, 1, 1)}, - {"in_shape": (1, 10, 10), "strides": (1, 2, 3)}, - {"in_shape": (1, 10, 10), "strides": (1, (1, 2, 3))}, - ] -) -def bad_inputs(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -@pytest.fixture( - params=[ - {"in_shape": (1,), "strides": (1, 1)}, - {"in_shape": (1, 10), "strides": (1, 1)}, - ] -) -def bad_inputs_vae(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -def test_fails_validations(bad_inputs): - with pytest.raises(ValidationError): - AutoEncoderConfig(**bad_inputs) - with pytest.raises(ValidationError): - VarAutoEncoderConfig(**bad_inputs) - - -def test_fails_validations_vae(bad_inputs_vae): - with pytest.raises(ValidationError): - VarAutoEncoderConfig(**bad_inputs_vae) - - -@pytest.fixture( - params=[ - { - "in_shape": (1, 10, 10), - "strides": (1, 1), - "dropout": 0.5, - "kernel_size": 5, - "inter_channels": (2, 2), - "inter_dilations": (3, 3), - "padding": (2, 2), - }, - { - "in_shape": (1, 10, 10), - "strides": ((1, 2), 1), - "kernel_size": (3, 3), - "padding": 2, - "up_kernel_size": 5, - }, - ] -) -def good_inputs(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -def test_passes_validations(good_inputs): - AutoEncoderConfig(**good_inputs) - VarAutoEncoderConfig(**good_inputs) - - -def test_AutoEncoderConfig(): - config = AutoEncoderConfig( - spatial_dims=2, - in_channels=1, - out_channels=1, - channels=[2, 4], - strides=[1, 1], - kernel_size=(3, 5), - up_kernel_size=(3, 3), - num_res_units=1, - inter_channels=(2, 2), - inter_dilations=(3, 3), - num_inter_units=1, - norm=("BATCh", {"eps": 0.1}), - dropout=0.1, - bias=False, - padding=1, - ) - assert config.network == "AutoEncoder" - assert config.spatial_dims == 2 - assert config.in_channels == 1 - assert config.out_channels == 1 - assert config.channels == (2, 4) - assert config.strides == (1, 1) - assert config.kernel_size == (3, 5) - assert config.num_res_units == 1 - assert config.inter_channels == (2, 2) - assert config.inter_dilations == (3, 3) - assert config.num_inter_units == 1 - assert config.norm == ("batch", {"eps": 0.1}) - assert config.act == "DefaultFromLibrary" - assert config.dropout == 0.1 - assert not config.bias - assert config.padding == 1 - - -def test_VarAutoEncoderConfig(): - config = VarAutoEncoderConfig( - spatial_dims=2, - in_shape=(1, 10, 10), - out_channels=1, - latent_size=16, - channels=[2, 4], - strides=[1, 1], - kernel_size=(3, 5), - up_kernel_size=(3, 3), - num_res_units=1, - inter_channels=(2, 2), - inter_dilations=(3, 3), - num_inter_units=1, - norm=("BATCh", {"eps": 0.1}), - dropout=0.1, - bias=False, - padding=1, - use_sigmoid=False, - ) - assert config.network == "VarAutoEncoder" - assert config.spatial_dims == 2 - assert config.in_shape == (1, 10, 10) - assert config.out_channels == 1 - assert config.latent_size == 16 - assert config.channels == (2, 4) - assert config.strides == (1, 1) - assert config.kernel_size == (3, 5) - assert config.num_res_units == 1 - assert config.inter_channels == (2, 2) - assert config.inter_dilations == (3, 3) - assert config.num_inter_units == 1 - assert config.norm == ("batch", {"eps": 0.1}) - assert config.act == "DefaultFromLibrary" - assert config.dropout == 0.1 - assert not config.bias - assert config.padding == 1 - assert not config.use_sigmoid diff --git a/tests/unittests/monai_networks/config/test_classifier.py b/tests/unittests/monai_networks/config/test_classifier.py deleted file mode 100644 index f63b774d5..000000000 --- a/tests/unittests/monai_networks/config/test_classifier.py +++ /dev/null @@ -1,132 +0,0 @@ -import pytest -from pydantic import ValidationError - -from clinicadl.monai_networks.config.classifier import ( - ClassifierConfig, - CriticConfig, - DiscriminatorConfig, -) - - -@pytest.fixture -def dummy_arguments(): - args = { - "classes": 2, - "channels": [2, 4], - } - return args - - -@pytest.fixture( - params=[ - {"in_shape": (3,), "strides": (1, 1)}, - {"in_shape": (1, 3, 3), "strides": (1, 1), "dropout": 1.1}, - {"in_shape": (1, 3, 3), "strides": (1, 1), "kernel_size": 4}, - {"in_shape": (1, 3, 3), "strides": (1, 1), "kernel_size": (3,)}, - {"in_shape": (1, 3, 3), "strides": (1, 1), "kernel_size": (3, 3, 3)}, - {"in_shape": (1, 3, 3), "strides": (1, 2, 3)}, - {"in_shape": (1, 3, 3), "strides": (1, (1, 2, 3))}, - ] -) -def bad_inputs(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -def test_fails_validations(bad_inputs): - with pytest.raises(ValidationError): - ClassifierConfig(**bad_inputs) - with pytest.raises(ValidationError): - CriticConfig(**bad_inputs) - with pytest.raises(ValidationError): - DiscriminatorConfig(**bad_inputs) - - -@pytest.fixture( - params=[ - {"in_shape": (1, 3, 3), "strides": (1, 1), "dropout": 0.5, "kernel_size": 5}, - {"in_shape": (1, 3, 3), "strides": ((1, 2), 1), "kernel_size": (3, 3)}, - ] -) -def good_inputs(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -def test_passes_validations(good_inputs): - ClassifierConfig(**good_inputs) - CriticConfig(**good_inputs) - DiscriminatorConfig(**good_inputs) - - -def test_ClassifierConfig(): - config = ClassifierConfig( - in_shape=(1, 3, 3), - classes=2, - channels=[2, 4], - strides=[1, 1], - kernel_size=(3, 5), - num_res_units=1, - act=("ELU", {"alpha": 2.0}), - dropout=0.1, - bias=False, - last_act=None, - ) - assert config.network == "Classifier" - assert config.in_shape == (1, 3, 3) - assert config.classes == 2 - assert config.channels == (2, 4) - assert config.strides == (1, 1) - assert config.kernel_size == (3, 5) - assert config.num_res_units == 1 - assert config.act == ("elu", {"alpha": 2.0}) - assert config.norm == "DefaultFromLibrary" - assert config.dropout == 0.1 - assert not config.bias - assert config.last_act is None - - -def test_CriticConfig(): - config = CriticConfig( - in_shape=(1, 3, 3), - channels=[2, 4], - strides=[1, 1], - kernel_size=(3, 5), - num_res_units=1, - act=("ELU", {"alpha": 2.0}), - dropout=0.1, - bias=False, - ) - assert config.network == "Critic" - assert config.in_shape == (1, 3, 3) - assert config.channels == (2, 4) - assert config.strides == (1, 1) - assert config.kernel_size == (3, 5) - assert config.num_res_units == 1 - assert config.act == ("elu", {"alpha": 2.0}) - assert config.norm == "DefaultFromLibrary" - assert config.dropout == 0.1 - assert not config.bias - - -def test_DiscriminatorConfig(): - config = DiscriminatorConfig( - in_shape=(1, 3, 3), - channels=[2, 4], - strides=[1, 1], - kernel_size=(3, 5), - num_res_units=1, - act=("ELU", {"alpha": 2.0}), - dropout=0.1, - bias=False, - last_act=("eLu", {"alpha": 0.5}), - ) - assert config.network == "Discriminator" - assert config.in_shape == (1, 3, 3) - assert config.channels == (2, 4) - assert config.strides == (1, 1) - assert config.kernel_size == (3, 5) - assert config.num_res_units == 1 - assert config.act == ("elu", {"alpha": 2.0}) - assert config.norm == "DefaultFromLibrary" - assert config.dropout == 0.1 - assert not config.bias - assert config.last_act == ("elu", {"alpha": 0.5}) diff --git a/tests/unittests/monai_networks/config/test_config.py b/tests/unittests/monai_networks/config/test_config.py new file mode 100644 index 000000000..9da6756f5 --- /dev/null +++ b/tests/unittests/monai_networks/config/test_config.py @@ -0,0 +1,232 @@ +import pytest + +from clinicadl.monai_networks.config.densenet import ( + DenseNet121Config, + DenseNet161Config, + DenseNet169Config, + DenseNet201Config, +) +from clinicadl.monai_networks.config.resnet import ( + ResNet18Config, + ResNet34Config, + ResNet50Config, + ResNet101Config, + ResNet152Config, +) +from clinicadl.monai_networks.config.senet import ( + SEResNet50Config, + SEResNet101Config, + SEResNet152Config, +) +from clinicadl.monai_networks.config.vit import ( + ViTB16Config, + ViTB32Config, + ViTL16Config, + ViTL32Config, +) + + +@pytest.mark.parametrize( + "config_class", + [DenseNet121Config, DenseNet161Config, DenseNet169Config, DenseNet201Config], +) +def test_sota_densenet_config(config_class): + config = config_class(pretrained=True, num_outputs=None) + + assert config.num_outputs is None + assert config.pretrained + assert config.output_act == "DefaultFromLibrary" + assert config._type == "sota-DenseNet" + + +@pytest.mark.parametrize( + "config_class", + [ResNet18Config, ResNet34Config, ResNet50Config, ResNet101Config, ResNet152Config], +) +def test_sota_resnet_config(config_class): + config = config_class(pretrained=False, num_outputs=None) + + assert config.num_outputs is None + assert not config.pretrained + assert config.output_act == "DefaultFromLibrary" + assert config._type == "sota-ResNet" + + +@pytest.mark.parametrize( + "config_class", [SEResNet50Config, SEResNet101Config, SEResNet152Config] +) +def test_sota_senet_config(config_class): + config = config_class(output_act="relu", num_outputs=1) + + assert config.num_outputs == 1 + assert config.pretrained == "DefaultFromLibrary" + assert config.output_act == "relu" + assert config._type == "sota-SEResNet" + + +@pytest.mark.parametrize( + "config_class", [ViTB16Config, ViTB32Config, ViTL16Config, ViTL32Config] +) +def test_sota_vit_config(config_class): + config = config_class(output_act="relu", num_outputs=1) + + assert config.num_outputs == 1 + assert config.pretrained == "DefaultFromLibrary" + assert config.output_act == "relu" + assert config._type == "sota-ViT" + + +def test_autoencoder_config(): + from clinicadl.monai_networks.config.autoencoder import AutoEncoderConfig + + config = AutoEncoderConfig( + in_shape=(1, 10, 10), + latent_size=1, + conv_args={"channels": [1]}, + output_act="softmax", + ) + assert config.in_shape == (1, 10, 10) + assert config.conv_args.channels == [1] + assert config.output_act == "softmax" + assert config.out_channels == "DefaultFromLibrary" + + +def test_vae_config(): + from clinicadl.monai_networks.config.autoencoder import VAEConfig + + config = VAEConfig( + in_shape=(1, 10), + latent_size=1, + conv_args={"channels": [1], "adn_ordering": "NA"}, + output_act=("elu", {"alpha": 0.1}), + ) + assert config.in_shape == (1, 10) + assert config.conv_args.adn_ordering == "NA" + assert config.output_act == ("elu", {"alpha": 0.1}) + assert config.mlp_args == "DefaultFromLibrary" + + +def test_cnn_config(): + from clinicadl.monai_networks.config.cnn import CNNConfig + + config = CNNConfig( + in_shape=(2, 10, 10, 10), num_outputs=1, conv_args={"channels": [1]} + ) + assert config.in_shape == (2, 10, 10, 10) + assert config.conv_args.channels == [1] + assert config.mlp_args == "DefaultFromLibrary" + + +def test_generator_config(): + from clinicadl.monai_networks.config.generator import GeneratorConfig + + config = GeneratorConfig( + start_shape=(2, 10, 10), latent_size=2, conv_args={"channels": [1]} + ) + assert config.start_shape == (2, 10, 10) + assert config.conv_args.channels == [1] + assert config.mlp_args == "DefaultFromLibrary" + + +def test_conv_decoder_config(): + from clinicadl.monai_networks.config.conv_decoder import ConvDecoderConfig + + config = ConvDecoderConfig( + in_channels=1, spatial_dims=2, channels=[1, 2], kernel_size=(3, 4) + ) + assert config.in_channels == 1 + assert config.kernel_size == (3, 4) + assert config.stride == "DefaultFromLibrary" + + +def test_conv_encoder_config(): + from clinicadl.monai_networks.config.conv_encoder import ConvEncoderConfig + + config = ConvEncoderConfig( + in_channels=1, spatial_dims=2, channels=[1, 2], kernel_size=[(3, 4), (4, 5)] + ) + assert config.in_channels == 1 + assert config.kernel_size == [(3, 4), (4, 5)] + assert config.padding == "DefaultFromLibrary" + + +def test_mlp_config(): + from clinicadl.monai_networks.config.mlp import MLPConfig + + config = MLPConfig( + in_channels=1, out_channels=1, hidden_channels=[2, 3], dropout=0.1 + ) + assert config.in_channels == 1 + assert config.dropout == 0.1 + assert config.act == "DefaultFromLibrary" + + +def test_resnet_config(): + from clinicadl.monai_networks.config.resnet import ResNetConfig + + config = ResNetConfig( + spatial_dims=1, in_channels=1, num_outputs=None, block_type="bottleneck" + ) + assert config.num_outputs is None + assert config.block_type == "bottleneck" + assert config.bottleneck_reduction == "DefaultFromLibrary" + + +def test_seresnet_config(): + from clinicadl.monai_networks.config.senet import SEResNetConfig + + config = SEResNetConfig( + spatial_dims=1, + in_channels=1, + num_outputs=None, + block_type="bottleneck", + se_reduction=2, + ) + assert config.num_outputs is None + assert config.block_type == "bottleneck" + assert config.se_reduction == 2 + assert config.bottleneck_reduction == "DefaultFromLibrary" + + +def test_densenet_config(): + from clinicadl.monai_networks.config.densenet import DenseNetConfig + + config = DenseNetConfig( + spatial_dims=1, in_channels=1, num_outputs=2, n_dense_layers=(1, 2) + ) + assert config.num_outputs == 2 + assert config.n_dense_layers == (1, 2) + assert config.growth_rate == "DefaultFromLibrary" + + +def test_vit_config(): + from clinicadl.monai_networks.config.vit import ViTConfig + + config = ViTConfig(in_shape=(1, 10), patch_size=2, num_outputs=1, embedding_dim=42) + assert config.num_outputs == 1 + assert config.embedding_dim == 42 + assert config.mlp_dim == "DefaultFromLibrary" + + +def test_unet_config(): + from clinicadl.monai_networks.config.unet import UNetConfig + + config = UNetConfig(spatial_dims=1, in_channels=1, out_channels=1, channels=(4, 8)) + assert config.out_channels == 1 + assert config.channels == (4, 8) + assert config.output_act == "DefaultFromLibrary" + + +def test_att_unet_config(): + from clinicadl.monai_networks.config.unet import AttentionUNetConfig + + config = AttentionUNetConfig( + spatial_dims=1, + in_channels=1, + out_channels=1, + channels=(4, 8), + output_act="softmax", + ) + assert config.spatial_dims == 1 + assert config.output_act == "softmax" + assert config.dropout == "DefaultFromLibrary" diff --git a/tests/unittests/monai_networks/config/test_densenet.py b/tests/unittests/monai_networks/config/test_densenet.py deleted file mode 100644 index a18b86f09..000000000 --- a/tests/unittests/monai_networks/config/test_densenet.py +++ /dev/null @@ -1,48 +0,0 @@ -import pytest -from pydantic import ValidationError - -from clinicadl.monai_networks.config.densenet import DenseNetConfig - - -@pytest.fixture -def dummy_arguments(): - args = { - "spatial_dims": 2, - "in_channels": 1, - "out_channels": 1, - } - return args - - -def test_fails_validations(dummy_arguments): - with pytest.raises(ValidationError): - DenseNetConfig(**{**dummy_arguments, **{"dropout_prob": 1.1}}) - - -def test_passes_validations(dummy_arguments): - DenseNetConfig(**{**dummy_arguments, **{"dropout_prob": 0.1}}) - - -def test_DenseNetConfig(): - config = DenseNetConfig( - spatial_dims=2, - in_channels=1, - out_channels=1, - init_features=16, - growth_rate=2, - block_config=(3, 5), - bn_size=1, - norm=("batch", {"eps": 0.5}), - dropout_prob=0.1, - ) - assert config.network == "DenseNet" - assert config.spatial_dims == 2 - assert config.in_channels == 1 - assert config.out_channels == 1 - assert config.init_features == 16 - assert config.growth_rate == 2 - assert config.block_config == (3, 5) - assert config.bn_size == 1 - assert config.norm == ("batch", {"eps": 0.5}) - assert config.act == "DefaultFromLibrary" - assert config.dropout_prob diff --git a/tests/unittests/monai_networks/config/test_factory.py b/tests/unittests/monai_networks/config/test_factory.py index 07c96e2a9..9dcd7fdc1 100644 --- a/tests/unittests/monai_networks/config/test_factory.py +++ b/tests/unittests/monai_networks/config/test_factory.py @@ -9,9 +9,9 @@ def test_create_training_config(): config = config_class( spatial_dims=1, in_channels=2, - out_channels=3, + num_outputs=None, ) - assert config.network == "DenseNet" + assert config.name == "DenseNet" assert config.spatial_dims == 1 assert config.in_channels == 2 - assert config.out_channels == 3 + assert config.num_outputs is None diff --git a/tests/unittests/monai_networks/config/test_fcn.py b/tests/unittests/monai_networks/config/test_fcn.py deleted file mode 100644 index b7991368e..000000000 --- a/tests/unittests/monai_networks/config/test_fcn.py +++ /dev/null @@ -1,97 +0,0 @@ -import pytest -from pydantic import ValidationError - -from clinicadl.monai_networks.config.fcn import ( - FullyConnectedNetConfig, - VarFullyConnectedNetConfig, -) - - -@pytest.fixture -def dummy_arguments(): - args = { - "in_channels": 5, - "out_channels": 1, - "hidden_channels": [3, 2], - "latent_size": 16, - "encode_channels": [2, 3], - "decode_channels": [3, 2], - } - return args - - -@pytest.fixture( - params=[ - {"dropout": 1.1}, - {"adn_ordering": "NDB"}, - {"adn_ordering": "NND"}, - ] -) -def bad_inputs(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -def test_fails_validations(bad_inputs): - with pytest.raises(ValidationError): - FullyConnectedNetConfig(**bad_inputs) - with pytest.raises(ValidationError): - VarFullyConnectedNetConfig(**bad_inputs) - - -@pytest.fixture( - params=[ - {"dropout": 0.5, "adn_ordering": "DAN"}, - {"adn_ordering": "AN"}, - ] -) -def good_inputs(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -def test_passes_validations(good_inputs): - FullyConnectedNetConfig(**good_inputs) - VarFullyConnectedNetConfig(**good_inputs) - - -def test_FullyConnectedNetConfig(): - config = FullyConnectedNetConfig( - in_channels=5, - out_channels=1, - hidden_channels=[3, 2], - dropout=None, - act="prelu", - bias=False, - adn_ordering="ADN", - ) - assert config.network == "FullyConnectedNet" - assert config.in_channels == 5 - assert config.out_channels == 1 - assert config.hidden_channels == (3, 2) - assert config.dropout is None - assert config.act == "prelu" - assert not config.bias - assert config.adn_ordering == "ADN" - - -def test_VarFullyConnectedNetConfig(): - config = VarFullyConnectedNetConfig( - in_channels=5, - out_channels=1, - latent_size=16, - encode_channels=[2, 3], - decode_channels=[3, 2], - dropout=0.1, - act="prelu", - bias=False, - adn_ordering="ADN", - ) - assert config.network == "VarFullyConnectedNet" - assert config.in_channels == 5 - assert config.out_channels == 1 - assert config.latent_size == 16 - assert config.encode_channels == (2, 3) - assert config.decode_channels == (3, 2) - assert config.dropout == 0.1 - assert config.act == "prelu" - assert not config.bias - assert config.adn_ordering == "ADN" diff --git a/tests/unittests/monai_networks/config/test_generator.py b/tests/unittests/monai_networks/config/test_generator.py deleted file mode 100644 index 9ea1cd442..000000000 --- a/tests/unittests/monai_networks/config/test_generator.py +++ /dev/null @@ -1,72 +0,0 @@ -import pytest -from pydantic import ValidationError - -from clinicadl.monai_networks.config.generator import GeneratorConfig - - -@pytest.fixture -def dummy_arguments(): - args = {"latent_shape": (5,), "channels": (2, 4)} - return args - - -@pytest.fixture( - params=[ - {"start_shape": (3,), "strides": (1, 1)}, - {"start_shape": (1, 3), "strides": (1, 1), "dropout": 1.1}, - {"start_shape": (1, 3), "strides": (1, 1), "kernel_size": 4}, - {"start_shape": (1, 3), "strides": (1, 1), "kernel_size": (3, 3)}, - {"start_shape": (1, 3), "strides": (1, 2, 3)}, - {"start_shape": (1, 3), "strides": (1, (1, 2))}, - ] -) -def bad_inputs(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -def test_fails_validations(bad_inputs): - with pytest.raises(ValidationError): - GeneratorConfig(**bad_inputs) - - -@pytest.fixture( - params=[ - {"start_shape": (1, 3), "strides": (1, 1), "dropout": 0.5, "kernel_size": 5}, - { - "start_shape": (1, 3, 3, 3), - "strides": ((1, 2, 3), 1), - "kernel_size": (3, 3, 3), - }, - ] -) -def good_inputs(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -def test_passes_validations(good_inputs): - GeneratorConfig(**good_inputs) - - -def test_GeneratorConfig(): - config = GeneratorConfig( - latent_shape=(3,), - start_shape=(1, 3), - channels=[2, 4], - strides=[1, 1], - kernel_size=(3,), - num_res_units=1, - act="SIGMOID", - dropout=0.1, - bias=False, - ) - assert config.network == "Generator" - assert config.latent_shape == (3,) - assert config.start_shape == (1, 3) - assert config.channels == (2, 4) - assert config.strides == (1, 1) - assert config.kernel_size == (3,) - assert config.num_res_units == 1 - assert config.act == "sigmoid" - assert config.norm == "DefaultFromLibrary" - assert config.dropout == 0.1 - assert not config.bias diff --git a/tests/unittests/monai_networks/config/test_regressor.py b/tests/unittests/monai_networks/config/test_regressor.py deleted file mode 100644 index 920464cc2..000000000 --- a/tests/unittests/monai_networks/config/test_regressor.py +++ /dev/null @@ -1,72 +0,0 @@ -import pytest -from pydantic import ValidationError - -from clinicadl.monai_networks.config.regressor import RegressorConfig - - -@pytest.fixture -def dummy_arguments(): - args = { - "out_shape": (1,), - "channels": [2, 4], - } - return args - - -@pytest.fixture( - params=[ - {"in_shape": (3,), "strides": (1, 1)}, - {"in_shape": (1, 3, 3), "strides": (1, 1), "dropout": 1.1}, - {"in_shape": (1, 3, 3), "strides": (1, 1), "kernel_size": 4}, - {"in_shape": (1, 3, 3), "strides": (1, 1), "kernel_size": (3,)}, - {"in_shape": (1, 3, 3), "strides": (1, 1), "kernel_size": (3, 3, 3)}, - {"in_shape": (1, 3, 3), "strides": (1, 2, 3)}, - {"in_shape": (1, 3, 3), "strides": (1, (1, 2, 3))}, - ] -) -def bad_inputs(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -def test_fails_validations(bad_inputs): - with pytest.raises(ValidationError): - RegressorConfig(**bad_inputs) - - -@pytest.fixture( - params=[ - {"in_shape": (1, 3, 3), "strides": (1, 1), "dropout": 0.5, "kernel_size": 5}, - {"in_shape": (1, 3, 3), "strides": ((1, 2), 1), "kernel_size": (3, 3)}, - ] -) -def good_inputs(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -def test_passes_validations(good_inputs): - RegressorConfig(**good_inputs) - - -def test_RegressorConfig(): - config = RegressorConfig( - in_shape=(1, 3, 3), - out_shape=(1,), - channels=[2, 4], - strides=[1, 1], - kernel_size=(3, 5), - num_res_units=1, - act=("ELU", {"alpha": 2.0}), - dropout=0.1, - bias=False, - ) - assert config.network == "Regressor" - assert config.in_shape == (1, 3, 3) - assert config.out_shape == (1,) - assert config.channels == (2, 4) - assert config.strides == (1, 1) - assert config.kernel_size == (3, 5) - assert config.num_res_units == 1 - assert config.act == ("elu", {"alpha": 2.0}) - assert config.norm == "DefaultFromLibrary" - assert config.dropout == 0.1 - assert not config.bias diff --git a/tests/unittests/monai_networks/config/test_resnet.py b/tests/unittests/monai_networks/config/test_resnet.py deleted file mode 100644 index b238a3c93..000000000 --- a/tests/unittests/monai_networks/config/test_resnet.py +++ /dev/null @@ -1,83 +0,0 @@ -import pytest -from pydantic import ValidationError - -from clinicadl.monai_networks.config.resnet import ResNetConfig - - -@pytest.fixture -def dummy_arguments(): - args = { - "block": "basic", - "layers": (2, 2, 2, 2), - } - return args - - -@pytest.fixture( - params=[ - {"block_inplanes": (2, 4, 8)}, - {"block_inplanes": (2, 4, 8, 16), "conv1_t_size": (3, 3)}, - {"block_inplanes": (2, 4, 8, 16), "conv1_t_stride": (3, 3)}, - {"block_inplanes": (2, 4, 8, 16), "shortcut_type": "C"}, - ] -) -def bad_inputs(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -def test_fails_validations(bad_inputs): - with pytest.raises(ValidationError): - ResNetConfig(**bad_inputs) - - -@pytest.fixture( - params=[ - { - "block_inplanes": (2, 4, 8, 16), - "conv1_t_size": (3, 3, 3), - "conv1_t_stride": (3, 3, 3), - "shortcut_type": "B", - }, - {"block_inplanes": (2, 4, 8, 16), "conv1_t_size": 3, "conv1_t_stride": 3}, - ] -) -def good_inputs(request: pytest.FixtureRequest, dummy_arguments): - return {**dummy_arguments, **request.param} - - -def test_passes_validations(good_inputs): - ResNetConfig(**good_inputs) - - -def test_ResNetConfig(): - config = ResNetConfig( - block="bottleneck", - layers=(2, 2, 2, 2), - block_inplanes=(2, 4, 8, 16), - spatial_dims=3, - n_input_channels=3, - conv1_t_size=3, - conv1_t_stride=4, - no_max_pool=True, - shortcut_type="A", - widen_factor=0.8, - num_classes=3, - feed_forward=False, - bias_downsample=False, - act=("relu", {"inplace": False}), - ) - assert config.network == "ResNet" - assert config.block == "bottleneck" - assert config.layers == (2, 2, 2, 2) - assert config.block_inplanes == (2, 4, 8, 16) - assert config.spatial_dims == 3 - assert config.n_input_channels == 3 - assert config.conv1_t_size == 3 - assert config.conv1_t_stride == 4 - assert config.no_max_pool - assert config.shortcut_type == "A" - assert config.widen_factor == 0.8 - assert config.num_classes == 3 - assert not config.feed_forward - assert not config.bias_downsample - assert config.act == ("relu", {"inplace": False}) diff --git a/tests/unittests/monai_networks/config/test_resnet_features.py b/tests/unittests/monai_networks/config/test_resnet_features.py deleted file mode 100644 index 9f6131974..000000000 --- a/tests/unittests/monai_networks/config/test_resnet_features.py +++ /dev/null @@ -1,56 +0,0 @@ -import pytest -from pydantic import ValidationError - -from clinicadl.monai_networks.config.resnet import ResNetFeaturesConfig - - -@pytest.fixture( - params=[ - {"model_name": "abc"}, - {"model_name": "resnet18", "pretrained": True, "spatial_dims": 2}, - {"model_name": "resnet18", "pretrained": True, "in_channels": 2}, - { - "model_name": "resnet18", - "in_channels": 2, - }, # pretrained should be set to False - {"model_name": "resnet18", "spatial_dims": 2}, - ] -) -def bad_inputs(request: pytest.FixtureRequest): - return request.param - - -def test_fails_validations(bad_inputs: dict): - with pytest.raises(ValidationError): - ResNetFeaturesConfig(**bad_inputs) - - -@pytest.fixture( - params=[ - {"model_name": "resnet18", "pretrained": True, "spatial_dims": 3}, - {"model_name": "resnet18", "pretrained": True, "in_channels": 1}, - {"model_name": "resnet18", "pretrained": True}, - {"model_name": "resnet18", "spatial_dims": 3}, - {"model_name": "resnet18", "in_channels": 1}, - ] -) -def good_inputs(request: pytest.FixtureRequest): - return {**request.param} - - -def test_passes_validations(good_inputs: dict): - ResNetFeaturesConfig(**good_inputs) - - -def test_ResNetFeaturesConfig(): - config = ResNetFeaturesConfig( - model_name="resnet200", - pretrained=False, - spatial_dims=2, - in_channels=2, - ) - assert config.network == "ResNetFeatures" - assert config.model_name == "resnet200" - assert not config.pretrained - assert config.spatial_dims == 2 - assert config.in_channels == 2 diff --git a/tests/unittests/monai_networks/config/test_segresnet.py b/tests/unittests/monai_networks/config/test_segresnet.py deleted file mode 100644 index 44b946d49..000000000 --- a/tests/unittests/monai_networks/config/test_segresnet.py +++ /dev/null @@ -1,41 +0,0 @@ -import pytest -from pydantic import ValidationError - -from clinicadl.monai_networks.config.resnet import SegResNetConfig - - -def test_fails_validations(): - with pytest.raises(ValidationError): - SegResNetConfig(dropout_prob=1.1) - - -def test_passes_validations(): - SegResNetConfig(dropout_prob=0.5) - - -def test_SegResNetConfig(): - config = SegResNetConfig( - spatial_dims=2, - init_filters=3, - in_channels=1, - out_channels=1, - dropout_prob=0.1, - act=("ELU", {"inplace": False}), - norm=("group", {"num_groups": 4}), - use_conv_final=False, - blocks_down=[1, 2, 3], - blocks_up=[3, 2, 1], - upsample_mode="pixelshuffle", - ) - assert config.network == "SegResNet" - assert config.spatial_dims == 2 - assert config.init_filters == 3 - assert config.in_channels == 1 - assert config.out_channels == 1 - assert config.dropout_prob == 0.1 - assert config.act == ("elu", {"inplace": False}) - assert config.norm == ("group", {"num_groups": 4}) - assert not config.use_conv_final - assert config.blocks_down == (1, 2, 3) - assert config.blocks_up == (3, 2, 1) - assert config.upsample_mode == "pixelshuffle" diff --git a/tests/unittests/monai_networks/config/test_unet.py b/tests/unittests/monai_networks/config/test_unet.py deleted file mode 100644 index d331e0a14..000000000 --- a/tests/unittests/monai_networks/config/test_unet.py +++ /dev/null @@ -1,133 +0,0 @@ -import pytest -from pydantic import ValidationError - -from clinicadl.monai_networks.config.unet import AttentionUnetConfig, UNetConfig - - -@pytest.fixture -def dummy_arguments(): - args = { - "spatial_dims": 2, - "in_channels": 1, - "out_channels": 1, - } - return args - - -@pytest.fixture( - params=[ - {"strides": (1, 1), "channels": (2, 4, 8), "adn_ordering": "NDB"}, - {"strides": (1, 1), "channels": (2, 4, 8), "adn_ordering": "NND"}, - {"strides": (1, 1), "channels": (2, 4, 8), "dropout": 1.1}, - {"strides": (1, 1), "channels": (2, 4, 8), "kernel_size": 4}, - {"strides": (1, 1), "channels": (2, 4, 8), "kernel_size": (3,)}, - {"strides": (1, 1), "channels": (2, 4, 8), "kernel_size": (3, 3, 3)}, - {"strides": (1, 1), "channels": (2, 4, 8), "up_kernel_size": 4}, - {"strides": (1, 1), "channels": (2, 4, 8), "up_kernel_size": (3,)}, - {"strides": (1, 1), "channels": (2, 4, 8), "up_kernel_size": (3, 3, 3)}, - {"strides": (1, 2, 3), "channels": (2, 4, 8)}, - {"strides": (1, (1, 2, 3)), "channels": (2, 4, 8)}, - {"strides": (), "channels": (2,)}, - ] -) -def bad_inputs(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -def test_fails_validations(bad_inputs): - with pytest.raises(ValidationError): - UNetConfig(**bad_inputs) - with pytest.raises(ValidationError): - AttentionUnetConfig(**bad_inputs) - - -@pytest.fixture( - params=[ - { - "strides": (1, 1), - "channels": (2, 4, 8), - "adn_ordering": "DAN", - "dropout": 0.5, - "kernel_size": 5, - "up_kernel_size": 5, - }, - { - "strides": ((1, 2),), - "channels": (2, 4), - "adn_ordering": "AN", - "kernel_size": (3, 5), - "up_kernel_size": (3, 5), - }, - ] -) -def good_inputs(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -def test_passes_validations(good_inputs): - UNetConfig(**good_inputs) - AttentionUnetConfig(**good_inputs) - - -def test_UNetConfig(): - config = UNetConfig( - spatial_dims=2, - in_channels=1, - out_channels=1, - channels=[2, 4], - strides=[1], - kernel_size=(3, 5), - up_kernel_size=(3, 3), - num_res_units=1, - act="ElU", - norm=("BATCh", {"eps": 0.1}), - dropout=0.1, - bias=False, - adn_ordering="A", - ) - assert config.network == "UNet" - assert config.spatial_dims == 2 - assert config.in_channels == 1 - assert config.out_channels == 1 - assert config.channels == (2, 4) - assert config.strides == (1,) - assert config.kernel_size == (3, 5) - assert config.up_kernel_size == (3, 3) - assert config.num_res_units == 1 - assert config.act == "elu" - assert config.norm == ("batch", {"eps": 0.1}) - assert config.dropout == 0.1 - assert not config.bias - assert config.adn_ordering == "A" - - -def test_AttentionUnetConfig(): - config = AttentionUnetConfig( - spatial_dims=2, - in_channels=1, - out_channels=1, - channels=[2, 4], - strides=[1], - kernel_size=(3, 5), - up_kernel_size=(3, 3), - num_res_units=1, - act="ElU", - norm="inSTance", - dropout=0.1, - bias=False, - adn_ordering="DA", - ) - assert config.network == "AttentionUnet" - assert config.spatial_dims == 2 - assert config.in_channels == 1 - assert config.out_channels == 1 - assert config.channels == (2, 4) - assert config.strides == (1,) - assert config.kernel_size == (3, 5) - assert config.up_kernel_size == (3, 3) - assert config.num_res_units == 1 - assert config.act == "elu" - assert config.norm == "instance" - assert config.dropout == 0.1 - assert not config.bias - assert config.adn_ordering == "DA" diff --git a/tests/unittests/monai_networks/config/test_vit.py b/tests/unittests/monai_networks/config/test_vit.py deleted file mode 100644 index 737caf05e..000000000 --- a/tests/unittests/monai_networks/config/test_vit.py +++ /dev/null @@ -1,162 +0,0 @@ -import pytest -from pydantic import ValidationError - -from clinicadl.monai_networks.config.vit import ( - ViTAutoEncConfig, - ViTConfig, -) - - -@pytest.fixture -def dummy_arguments(): - args = { - "in_channels": 2, - } - return args - - -@pytest.fixture( - params=[ - {"img_size": (16, 16, 16), "patch_size": (4, 4, 4), "dropout_rate": 1.1}, - {"img_size": (16, 16), "patch_size": 4}, - {"img_size": 16, "patch_size": (4, 4)}, - {"img_size": 16, "patch_size": (4, 4)}, - { - "img_size": (16, 16, 16), - "patch_size": (4, 4, 4), - "hidden_size": 42, - "num_heads": 5, - }, - {"img_size": (16, 16, 16), "patch_size": (4, 4, 4), "num_heads": 5}, - {"img_size": (16, 16, 16), "patch_size": (4, 4, 4), "hidden_size": 42}, - ] -) -def bad_inputs(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -@pytest.fixture( - params=[ - {"img_size": (20, 20, 20), "patch_size": (4, 4, 5)}, - {"img_size": (20, 20, 20), "patch_size": (4, 4, 9)}, - ] -) -def bad_inputs_ae(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -def test_fails_validations(bad_inputs): - with pytest.raises(ValidationError): - ViTConfig(**bad_inputs) - with pytest.raises(ValidationError): - ViTAutoEncConfig(**bad_inputs) - - -def test_fails_validations_ae(bad_inputs_ae): - with pytest.raises(ValidationError): - ViTAutoEncConfig(**bad_inputs_ae) - - -@pytest.fixture( - params=[ - { - "img_size": (16, 16, 16), - "patch_size": (4, 4, 4), - "dropout_rate": 0.5, - "hidden_size": 42, - "num_heads": 6, - }, - ] -) -def good_inputs(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -@pytest.fixture( - params=[ - {"img_size": 10, "patch_size": 3}, - ] -) -def good_inputs_vit(request, dummy_arguments): - return {**dummy_arguments, **request.param} - - -def test_passes_validations(good_inputs): - ViTConfig(**good_inputs) - ViTAutoEncConfig(**good_inputs) - - -def test_passes_validations_vit(good_inputs_vit): - ViTConfig(**good_inputs_vit) - - -def test_ViTConfig(): - config = ViTConfig( - in_channels=2, - img_size=16, - patch_size=4, - hidden_size=32, - mlp_dim=4, - num_layers=3, - num_heads=4, - proj_type="perceptron", - pos_embed_type="sincos", - classification=True, - num_classes=3, - dropout_rate=0.1, - spatial_dims=3, - post_activation=None, - qkv_bias=True, - ) - assert config.network == "ViT" - assert config.in_channels == 2 - assert config.img_size == 16 - assert config.patch_size == 4 - assert config.hidden_size == 32 - assert config.mlp_dim == 4 - assert config.num_layers == 3 - assert config.num_heads == 4 - assert config.proj_type == "perceptron" - assert config.pos_embed_type == "sincos" - assert config.classification - assert config.num_classes == 3 - assert config.dropout_rate == 0.1 - assert config.spatial_dims == 3 - assert config.post_activation is None - assert config.qkv_bias - assert config.save_attn == "DefaultFromLibrary" - - -def test_ViTAutoEncConfig(): - config = ViTAutoEncConfig( - in_channels=2, - img_size=16, - patch_size=4, - out_channels=2, - deconv_chns=7, - hidden_size=32, - mlp_dim=4, - num_layers=3, - num_heads=4, - proj_type="perceptron", - pos_embed_type="sincos", - dropout_rate=0.1, - spatial_dims=3, - qkv_bias=True, - ) - assert config.network == "ViTAutoEnc" - assert config.in_channels == 2 - assert config.img_size == 16 - assert config.patch_size == 4 - assert config.out_channels == 2 - assert config.deconv_chns == 7 - assert config.hidden_size == 32 - assert config.mlp_dim == 4 - assert config.num_layers == 3 - assert config.num_heads == 4 - assert config.proj_type == "perceptron" - assert config.pos_embed_type == "sincos" - assert config.dropout_rate == 0.1 - assert config.spatial_dims == 3 - assert config.qkv_bias - assert config.save_attn == "DefaultFromLibrary" diff --git a/tests/unittests/monai_networks/nn/__init__.py b/tests/unittests/monai_networks/nn/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unittests/monai_networks/nn/test_att_unet.py b/tests/unittests/monai_networks/nn/test_att_unet.py new file mode 100644 index 000000000..711f11142 --- /dev/null +++ b/tests/unittests/monai_networks/nn/test_att_unet.py @@ -0,0 +1,134 @@ +import pytest +import torch + +from clinicadl.monai_networks.nn import AttentionUNet +from clinicadl.monai_networks.nn.layers.utils import ActFunction + +INPUT_1D = torch.randn(2, 1, 16) +INPUT_2D = torch.randn(2, 2, 32, 64) +INPUT_3D = torch.randn(2, 3, 16, 32, 8) + + +@pytest.mark.parametrize( + "input_tensor,out_channels,channels,act,output_act,dropout,error", + [ + (INPUT_1D, 1, (2, 3, 4), "relu", "sigmoid", None, False), + (INPUT_2D, 1, (2, 4, 5), "relu", None, 0.0, False), + (INPUT_3D, 2, (2, 3), None, ("softmax", {"dim": 1}), 0.1, False), + ( + INPUT_3D, + 2, + (2,), + None, + ("softmax", {"dim": 1}), + 0.1, + True, + ), # channels length is less than 2 + ], +) +def test_attentionunet( + input_tensor, out_channels, channels, act, output_act, dropout, error +): + batch_size, in_channels, *img_size = input_tensor.shape + spatial_dims = len(img_size) + if error: + with pytest.raises(ValueError): + AttentionUNet( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + channels=channels, + act=act, + output_act=output_act, + dropout=dropout, + ) + else: + net = AttentionUNet( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + channels=channels, + act=act, + output_act=output_act, + dropout=dropout, + ) + + out = net(input_tensor) + assert out.shape == (batch_size, out_channels, *img_size) + + if output_act: + assert net.output_act is not None + else: + assert net.output_act is None + + assert net.doubleconv[1].conv.out_channels == channels[0] + if dropout: + assert net.doubleconv[1].adn.D.p == dropout + else: + with pytest.raises(AttributeError): + net.doubleconv[1].conv.adn.D + + for i in range(1, len(channels)): + down = getattr(net, f"down{i}").doubleconv + up = getattr(net, f"doubleconv{i}") + att = getattr(net, f"attention{i}") + assert down[0].conv.in_channels == channels[i - 1] + assert down[1].conv.out_channels == channels[i] + assert att.W_g[0].out_channels == channels[i - 1] // 2 + assert att.W_x[0].out_channels == channels[i - 1] // 2 + assert up[0].conv.in_channels == channels[i - 1] * 2 + assert up[1].conv.out_channels == channels[i - 1] + for m in (down, up): + if dropout is not None: + assert m[1].adn.D.p == dropout + else: + with pytest.raises(AttributeError): + m[1].adn.D + with pytest.raises(AttributeError): + down = getattr(net, f"down{i+1}") + with pytest.raises(AttributeError): + getattr(net, f"doubleconv{i+1}") + with pytest.raises(AttributeError): + getattr(net, f"attention{i+1}") + + +@pytest.mark.parametrize("act", [act for act in ActFunction]) +def test_activations(act): + batch_size, in_channels, *img_size = INPUT_2D.shape + net = AttentionUNet( + spatial_dims=2, + in_channels=in_channels, + out_channels=2, + channels=(2, 4), + act=act, + output_act=act, + ) + assert net(INPUT_2D).shape == (batch_size, 2, *img_size) + + +def test_activation_parameters(): + in_channels = INPUT_2D.shape[1] + act = ("ELU", {"alpha": 0.1}) + output_act = ("ELU", {"alpha": 0.2}) + net = AttentionUNet( + spatial_dims=2, + in_channels=in_channels, + out_channels=2, + channels=(2, 4), + act=act, + output_act=output_act, + ) + assert isinstance(net.doubleconv[0].adn.A, torch.nn.ELU) + assert net.doubleconv[0].adn.A.alpha == 0.1 + + assert isinstance(net.down1.doubleconv[0].adn.A, torch.nn.ELU) + assert net.down1.doubleconv[0].adn.A.alpha == 0.1 + + assert isinstance(net.upsample1[1].adn.A, torch.nn.ELU) + assert net.upsample1[1].adn.A.alpha == 0.1 + + assert isinstance(net.doubleconv1[1].adn.A, torch.nn.ELU) + assert net.doubleconv1[1].adn.A.alpha == 0.1 + + assert isinstance(net.output_act, torch.nn.ELU) + assert net.output_act.alpha == 0.2 diff --git a/tests/unittests/monai_networks/nn/test_autoencoder.py b/tests/unittests/monai_networks/nn/test_autoencoder.py new file mode 100644 index 000000000..c59874353 --- /dev/null +++ b/tests/unittests/monai_networks/nn/test_autoencoder.py @@ -0,0 +1,215 @@ +import pytest +import torch +from torch.nn import GELU, Sigmoid, Tanh + +from clinicadl.monai_networks.nn import AutoEncoder +from clinicadl.monai_networks.nn.layers.utils import ActFunction + + +@pytest.mark.parametrize( + "input_tensor,kernel_size,stride,padding,dilation,pooling,pooling_indices,unpooling_mode", + [ + (torch.randn(2, 1, 21), 3, 1, 0, 1, ("max", {"kernel_size": 2}), [0], "linear"), + ( + torch.randn(2, 1, 65, 85), + (3, 5), + (1, 2), + 0, + (1, 2), + ("max", {"kernel_size": 2, "stride": 1}), + [0], + "bilinear", + ), + ( + torch.randn(2, 1, 64, 62, 61), # to test output padding + 4, + 2, + (1, 1, 0), + 1, + ("avg", {"kernel_size": 3, "stride": 2}), + [-1], + "convtranspose", + ), + ( + torch.randn(2, 1, 51, 55, 45), + 3, + 2, + 0, + 1, + ("max", {"kernel_size": 2, "ceil_mode": True}), + [0, 1, 2], + "convtranspose", + ), + ( + torch.randn(2, 1, 51, 55, 45), + 3, + 2, + 0, + 1, + [ + ("max", {"kernel_size": 2, "ceil_mode": True}), + ("adaptivemax", {"output_size": (5, 4, 3)}), + ], + [-1, 1], + "convtranspose", + ), + ], +) +def test_output_shape( + input_tensor, + kernel_size, + stride, + padding, + dilation, + pooling, + pooling_indices, + unpooling_mode, +): + net = AutoEncoder( + in_shape=input_tensor.shape[1:], + latent_size=3, + conv_args={ + "channels": [2, 4, 8], + "kernel_size": kernel_size, + "stride": stride, + "padding": padding, + "dilation": dilation, + "pooling": pooling, + "pooling_indices": pooling_indices, + }, + unpooling_mode=unpooling_mode, + ) + output = net(input_tensor) + assert output.shape == input_tensor.shape + + +def test_out_channels(): + input_tensor = torch.randn(2, 1, 64, 62, 61) + net = AutoEncoder( + in_shape=input_tensor.shape[1:], + latent_size=3, + conv_args={"channels": [2, 4, 8]}, + mlp_args={"hidden_channels": [8, 4]}, + out_channels=3, + ) + assert net(input_tensor).shape == (2, 3, 64, 62, 61) + assert net.decoder.convolutions.layer2.conv.in_channels == 2 + assert net.decoder.convolutions.layer2.conv.out_channels == 3 + + +@pytest.mark.parametrize( + "pooling,unpooling_mode", + [ + (("adaptivemax", {"output_size": (17, 16, 19)}), "nearest"), + (("adaptivemax", {"output_size": (17, 16, 19)}), "convtranspose"), + (("max", {"kernel_size": 2}), "nearest"), + (("max", {"kernel_size": 2}), "convtranspose"), + ( + ("max", {"kernel_size": 2, "stride": 1, "dilation": 2, "padding": 1}), + "nearest", + ), + ( + ("max", {"kernel_size": 2, "stride": 1, "dilation": 2, "padding": 1}), + "convtranspose", + ), + (("avg", {"kernel_size": 3, "ceil_mode": True}), "nearest"), + (("avg", {"kernel_size": 3, "ceil_mode": True}), "convtranspose"), + ], +) +def test_invert_pooling(pooling, unpooling_mode): + input_tensor = torch.randn(2, 1, 20, 27, 22) + net = AutoEncoder( + in_shape=(1, 20, 27, 22), + latent_size=1, + conv_args={"channels": [], "pooling": pooling, "pooling_indices": [-1]}, + mlp_args=None, + unpooling_mode=unpooling_mode, + ) + output = net(input_tensor) + assert output.shape == input_tensor.shape + + +@pytest.mark.parametrize( + "kernel_size,stride,padding,dilation", + [ + ((3, 2, 1), (1, 1, 2), (1, 1, 0), 1), + ((4, 5, 2), (3, 1, 1), (0, 0, 1), (2, 1, 1)), + ], +) +def test_invert_conv(kernel_size, stride, padding, dilation): + input_tensor = torch.randn(2, 1, 20, 27, 22) + net = AutoEncoder( + in_shape=(1, 20, 27, 22), + latent_size=1, + conv_args={ + "channels": [1], + "kernel_size": kernel_size, + "stride": stride, + "padding": padding, + "dilation": dilation, + }, + mlp_args=None, + ) + output = net(input_tensor) + assert output.shape == input_tensor.shape + + +@pytest.mark.parametrize("act", [act for act in ActFunction]) +def test_out_activation(act): + input_tensor = torch.randn(2, 1, 32, 32) + net = AutoEncoder( + in_shape=(1, 32, 32), + latent_size=3, + conv_args={"channels": [2]}, + output_act=act, + ) + assert net(input_tensor).shape == (2, 1, 32, 32) + + +def test_params(): + net = AutoEncoder( + in_shape=(1, 100, 100), + latent_size=3, + conv_args={"channels": [2], "act": "celu", "output_act": "sigmoid"}, + mlp_args={"hidden_channels": [2], "act": "relu", "output_act": "gelu"}, + output_act="tanh", + out_channels=2, + ) + assert net.encoder.convolutions.act == "celu" + assert net.decoder.convolutions.act == "celu" + assert net.encoder.mlp.act == "relu" + assert net.decoder.mlp.act == "relu" + assert isinstance(net.encoder.mlp.output.output_act, GELU) + assert isinstance(net.encoder.mlp.output.output_act, GELU) + assert isinstance(net.encoder.convolutions.output_act, Sigmoid) + assert isinstance(net.decoder.convolutions.output_act, Tanh) + + +@pytest.mark.parametrize( + "in_shape,upsampling_mode,error", + [ + ((1, 10), "bilinear", True), + ((1, 10, 10), "linear", True), + ((1, 10, 10), "trilinear", True), + ((1, 10, 10, 10), "bicubic", True), + ((1, 10), "linear", False), + ((1, 10, 10), "bilinear", False), + ((1, 10, 10, 10), "trilinear", False), + ], +) +def test_checks(in_shape, upsampling_mode, error): + if error: + with pytest.raises(ValueError): + AutoEncoder( + in_shape=in_shape, + latent_size=3, + conv_args={"channels": []}, + unpooling_mode=upsampling_mode, + ) + else: + AutoEncoder( + in_shape=in_shape, + latent_size=3, + conv_args={"channels": []}, + unpooling_mode=upsampling_mode, + ) diff --git a/tests/unittests/monai_networks/nn/test_cnn.py b/tests/unittests/monai_networks/nn/test_cnn.py new file mode 100644 index 000000000..095c8da5d --- /dev/null +++ b/tests/unittests/monai_networks/nn/test_cnn.py @@ -0,0 +1,62 @@ +import pytest +import torch +from torch.nn import Flatten, Linear, Softmax + +from clinicadl.monai_networks.nn import CNN, MLP, ConvEncoder + +INPUT_1D = torch.randn(3, 1, 16) +INPUT_2D = torch.randn(3, 1, 15, 16) +INPUT_3D = torch.randn(3, 3, 20, 21, 22) + + +@pytest.mark.parametrize("input_tensor", [INPUT_1D, INPUT_2D, INPUT_3D]) +@pytest.mark.parametrize("channels", [(), (2, 4)]) +@pytest.mark.parametrize( + "mlp_args", [None, {"hidden_channels": []}, {"hidden_channels": (2, 4)}] +) +def test_cnn(input_tensor, channels, mlp_args): + in_shape = input_tensor.shape[1:] + net = CNN( + in_shape=in_shape, + num_outputs=2, + conv_args={"channels": channels}, + mlp_args=mlp_args, + ) + output = net(input_tensor) + assert output.shape == (3, 2) + assert isinstance(net.convolutions, ConvEncoder) + assert isinstance(net.mlp, MLP) + + if mlp_args is None or mlp_args["hidden_channels"] == []: + children = net.mlp.children() + assert isinstance(next(children), Flatten) + assert isinstance(next(children).linear, Linear) + with pytest.raises(StopIteration): + next(children) + + if channels == []: + with pytest.raises(StopIteration): + next(net.convolutions.parameters()) + + +@pytest.mark.parametrize( + "conv_args,mlp_args", + [ + (None, {"hidden_channels": [2]}), + ({"channels": [2]}, {}), + ], +) +def test_checks(conv_args, mlp_args): + with pytest.raises(ValueError): + CNN(in_shape=(1, 10, 10), num_outputs=2, conv_args=conv_args, mlp_args=mlp_args) + + +def test_params(): + conv_args = {"channels": [2], "act": "celu"} + mlp_args = {"hidden_channels": [2], "act": "relu", "output_act": "softmax"} + net = CNN( + in_shape=(1, 10, 10), num_outputs=2, conv_args=conv_args, mlp_args=mlp_args + ) + assert net.convolutions.act == "celu" + assert net.mlp.act == "relu" + assert isinstance(net.mlp.output.output_act, Softmax) diff --git a/tests/unittests/monai_networks/nn/test_conv_decoder.py b/tests/unittests/monai_networks/nn/test_conv_decoder.py new file mode 100644 index 000000000..73576918e --- /dev/null +++ b/tests/unittests/monai_networks/nn/test_conv_decoder.py @@ -0,0 +1,407 @@ +import pytest +import torch +from torch.nn import ELU, ConvTranspose2d, Dropout, InstanceNorm2d, Upsample + +from clinicadl.monai_networks.nn import ConvDecoder +from clinicadl.monai_networks.nn.layers.utils import ActFunction + + +@pytest.fixture +def input_tensor(): + return torch.randn(2, 1, 8, 8) + + +@pytest.mark.parametrize("act", [act for act in ActFunction]) +def test_activations(input_tensor, act): + _, in_channels, *input_size = input_tensor.shape + spatial_dims = len(input_size) + net = ConvDecoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + act=act, + output_act=act, + ) + output_shape = net(input_tensor).shape + return len(output_shape) == 4 and output_shape[1] == 1 + + +@pytest.mark.parametrize( + "kernel_size,stride,padding,output_padding,dilation,unpooling,unpooling_indices,norm,dropout,bias,adn_ordering", + [ + ( + 3, + 2, + 0, + 1, + 1, + ("upsample", {"scale_factor": 2}), + [2], + "batch", + None, + True, + "ADN", + ), + ( + (4, 4), + (2, 1), + 2, + (1, 0), + 2, + ("upsample", {"scale_factor": 2}), + [0, 1], + "instance", + 0.5, + False, + "DAN", + ), + ( + 5, + 1, + (2, 1), + 0, + 1, + [("upsample", {"size": (16, 16)}), ("convtranspose", {"kernel_size": 2})], + [0, 1], + "syncbatch", + 0.5, + True, + "NA", + ), + (5, 1, 0, 1, (2, 3), None, [0, 1], "instance", 0.0, True, "DN"), + ( + 5, + 1, + 2, + 0, + 1, + ("convtranspose", {"kernel_size": 2}), + None, + ("group", {"num_groups": 2}), + None, + True, + "N", + ), + ( + 5, + 3, + 2, + (2, 1), + 1, + ("convtranspose", {"kernel_size": 2}), + [0, 1], + None, + None, + True, + "", + ), + ], +) +def test_params( + input_tensor, + kernel_size, + stride, + padding, + output_padding, + dilation, + unpooling, + unpooling_indices, + norm, + dropout, + bias, + adn_ordering, +): + batch_size, in_channels, *input_size = input_tensor.shape + spatial_dims = len(input_size) + + # test size computation + net = ConvDecoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + kernel_size=kernel_size, + stride=stride, + padding=padding, + output_padding=output_padding, + dilation=dilation, + unpooling=unpooling, + unpooling_indices=unpooling_indices, + dropout=dropout, + act=None, + norm=norm, + bias=bias, + adn_ordering=adn_ordering, + _input_size=input_size, + ) + output = net(input_tensor) + assert output.shape == (batch_size, 1, *net.final_size) + + # other checks + net = ConvDecoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + kernel_size=kernel_size, + stride=stride, + padding=padding, + output_padding=output_padding, + dilation=dilation, + unpooling=unpooling, + unpooling_indices=unpooling_indices, + dropout=dropout, + act=None, + norm=norm, + bias=bias, + adn_ordering=adn_ordering, + ) + assert isinstance(net.layer2[0], ConvTranspose2d) + with pytest.raises(IndexError): + net.layer2[1] # no adn at the end + + named_layers = list(net.named_children()) + if unpooling and unpooling_indices and unpooling_indices != []: + for i, idx in enumerate(unpooling_indices): + name, layer = named_layers[idx + 1 + i] + if idx == -1: + assert name == "init_unpool" + else: + assert name == f"unpool{idx}" + if net.unpooling[i][0] == "upsample": + assert isinstance(layer, Upsample) + else: + assert isinstance(layer, ConvTranspose2d) + else: + for name, layer in named_layers: + assert not isinstance(layer, Upsample) + assert "unpool" not in name + + assert ( + net.layer0[0].kernel_size == kernel_size + if isinstance(kernel_size, tuple) + else (kernel_size, kernel_size) + ) + assert ( + net.layer0[0].stride == stride + if isinstance(stride, tuple) + else (stride, stride) + ) + assert ( + net.layer0[0].padding == padding + if isinstance(padding, tuple) + else (padding, padding) + ) + assert ( + net.layer0[0].output_padding == output_padding + if isinstance(output_padding, tuple) + else (output_padding, output_padding) + ) + assert ( + net.layer0[0].dilation == dilation + if isinstance(dilation, tuple) + else (dilation, dilation) + ) + + if bias: + assert len(net.layer0[0].bias) > 0 + assert len(net.layer1[0].bias) > 0 + assert len(net.layer2[0].bias) > 0 + else: + assert net.layer0[0].bias is None + assert net.layer1[0].bias is None + assert net.layer2[0].bias is None + if isinstance(dropout, float) and "D" in adn_ordering: + assert net.layer0[1].D.p == dropout + assert net.layer1[1].D.p == dropout + + +def test_activation_parameters(input_tensor): + _, in_channels, *input_size = input_tensor.shape + spatial_dims = len(input_size) + + act = ("ELU", {"alpha": 0.1}) + output_act = ("ELU", {"alpha": 0.2}) + net = ConvDecoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + act=act, + output_act=output_act, + ) + assert isinstance(net.layer0[1].A, ELU) + assert net.layer0[1].A.alpha == 0.1 + assert isinstance(net.layer1[1].A, ELU) + assert net.layer1[1].A.alpha == 0.1 + assert isinstance(net.output_act, ELU) + assert net.output_act.alpha == 0.2 + + net = ConvDecoder( + spatial_dims=spatial_dims, in_channels=in_channels, channels=[2, 4, 1], act=None + ) + with pytest.raises(AttributeError): + net.layer0[1].A + with pytest.raises(AttributeError): + net.layer1[1].A + assert net.output_act is None + + +def test_norm_parameters(input_tensor): + _, in_channels, *input_size = input_tensor.shape + spatial_dims = len(input_size) + + norm = ("instance", {"momentum": 1.0}) + net = ConvDecoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + norm=norm, + ) + assert isinstance(net.layer0[1].N, InstanceNorm2d) + assert net.layer0[1].N.momentum == 1.0 + assert isinstance(net.layer1[1].N, InstanceNorm2d) + assert net.layer1[1].N.momentum == 1.0 + + net = ConvDecoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + norm=None, + ) + with pytest.raises(AttributeError): + net.layer0[1].N + with pytest.raises(AttributeError): + net.layer1[1].N + + +def test_unpool_parameters(input_tensor): + _, in_channels, *input_size = input_tensor.shape + spatial_dims = len(input_size) + + unpooling = ("convtranspose", {"kernel_size": 3, "stride": 2}) + net = ConvDecoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + unpooling=unpooling, + unpooling_indices=[1], + ) + assert isinstance(net.unpool1, ConvTranspose2d) + assert net.unpool1.stride == (2, 2) + assert net.unpool1.kernel_size == (3, 3) + + +@pytest.mark.parametrize("adn_ordering", ["DAN", "NA", "A"]) +def test_adn_ordering(input_tensor, adn_ordering): + _, in_channels, *input_size = input_tensor.shape + spatial_dims = len(input_size) + + net = ConvDecoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + dropout=0.1, + adn_ordering=adn_ordering, + act="elu", + norm="instance", + ) + objects = {"D": Dropout, "N": InstanceNorm2d, "A": ELU} + for i, letter in enumerate(adn_ordering): + assert isinstance(net.layer0[1][i], objects[letter]) + assert isinstance(net.layer1[1][i], objects[letter]) + for letter in set(["A", "D", "N"]) - set(adn_ordering): + with pytest.raises(AttributeError): + getattr(net.layer0[1], letter) + with pytest.raises(AttributeError): + getattr(net.layer1[1], letter) + + +@pytest.mark.parametrize( + "input_tensor", [torch.randn(2, 1, 16), torch.randn(2, 3, 20, 21, 22)] +) +def test_other_dimensions(input_tensor): + batch_size, in_channels, *input_size = input_tensor.shape + spatial_dims = len(input_size) + + net = ConvDecoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + _input_size=input_size, + ) + output = net(input_tensor) + assert output.shape == (batch_size, 1, *net.final_size) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"kernel_size": (3, 3, 3)}, + {"stride": [1, 1]}, + {"padding": [1, 1]}, + {"dilation": (1,)}, + {"unpooling_indices": [0, 1, 2, 3]}, + {"unpooling": "upsample", "unpooling_indices": [0]}, + {"norm": "group"}, + {"norm": "layer"}, + ], +) +def test_checks(input_tensor, kwargs): + _, in_channels, *input_size = input_tensor.shape + spatial_dims = len(input_size) + + with pytest.raises(ValueError): + ConvDecoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + **kwargs, + ) + + +@pytest.mark.parametrize( + "unpooling,error", + [ + (None, False), + ("abc", True), + ("upsample", True), + (("upsample",), True), + (("upsample", 2), True), + (("convtranspose", {"kernel_size": 2}), False), + (("upsample", {"scale_factor": 2}), False), + ( + [("upsample", {"scale_factor": 2}), ("convtranspose", {"kernel_size": 2})], + False, + ), + ([("upsample", {"scale_factor": 2}), None], True), + ([("upsample", {"scale_factor": 2}), "convtranspose"], True), + ([("upsample", {"scale_factor": 2}), ("convtranspose", 2)], True), + ( + [ + ("upsample", {"scale_factor": 2}), + ("convtranspose", {"kernel_size": 2}), + ("convtranspose", {"kernel_size": 2}), + ], + True, + ), + ], +) +def test_check_unpool_layer(input_tensor, unpooling, error): + _, in_channels, *input_size = input_tensor.shape + spatial_dims = len(input_size) + + if error: + with pytest.raises(ValueError): + ConvDecoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + unpooling=unpooling, + unpooling_indices=[0, 1], + ) + else: + ConvDecoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + unpooling=unpooling, + unpooling_indices=[0, 1], + ) diff --git a/tests/unittests/monai_networks/nn/test_conv_encoder.py b/tests/unittests/monai_networks/nn/test_conv_encoder.py new file mode 100644 index 000000000..3a21859b8 --- /dev/null +++ b/tests/unittests/monai_networks/nn/test_conv_encoder.py @@ -0,0 +1,400 @@ +import pytest +import torch +from torch.nn import ( + ELU, + AdaptiveAvgPool2d, + AdaptiveMaxPool2d, + AvgPool2d, + Conv2d, + Dropout, + InstanceNorm2d, + MaxPool2d, +) + +from clinicadl.monai_networks.nn import ConvEncoder +from clinicadl.monai_networks.nn.layers.utils import ActFunction + + +@pytest.fixture +def input_tensor(): + return torch.randn(2, 1, 55, 54) + + +@pytest.mark.parametrize("act", [act for act in ActFunction]) +def test_activations(input_tensor, act): + _, in_channels, *input_size = input_tensor.shape + spatial_dims = len(input_size) + + net = ConvEncoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + act=act, + output_act=act, + ) + output_shape = net(input_tensor).shape + assert len(output_shape) == 4 and output_shape[1] == 1 + + +@pytest.mark.parametrize( + "kernel_size,stride,padding,dilation,pooling,pooling_indices,norm,dropout,bias,adn_ordering", + [ + ( + 3, + 1, + 0, + 1, + ("adaptivemax", {"output_size": 1}), + [2], + "batch", + None, + True, + "ADN", + ), + ( + (4, 4), + (2, 1), + 2, + 2, + ("max", {"kernel_size": 2}), + [0, 1], + "instance", + 0.5, + False, + "DAN", + ), + ( + 5, + 1, + (2, 1), + 1, + [ + ("avg", {"kernel_size": 2}), + ("max", {"kernel_size": 2}), + ("adaptiveavg", {"output_size": (2, 3)}), + ], + [-1, 1, 2], + "syncbatch", + 0.5, + True, + "NA", + ), + (5, 1, 0, (1, 2), None, [0, 1], "instance", 0.0, True, "DN"), + ( + 5, + 1, + 2, + 1, + ("avg", {"kernel_size": 2}), + None, + ("group", {"num_groups": 2}), + None, + True, + "N", + ), + (5, 1, 2, 1, ("avg", {"kernel_size": 2}), None, None, None, True, ""), + ], +) +def test_params( + input_tensor, + kernel_size, + stride, + padding, + dilation, + pooling, + pooling_indices, + norm, + dropout, + bias, + adn_ordering, +): + batch_size, in_channels, *input_size = input_tensor.shape + spatial_dims = len(input_size) + + # test output size + net = ConvEncoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + pooling=pooling, + pooling_indices=pooling_indices, + dropout=dropout, + act=None, + norm=norm, + bias=bias, + adn_ordering=adn_ordering, + _input_size=input_size, + ) + output = net(input_tensor) + assert output.shape == (batch_size, 1, *net.final_size) + + # other checks + net = ConvEncoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + pooling=pooling, + pooling_indices=pooling_indices, + dropout=dropout, + act=None, + norm=norm, + bias=bias, + adn_ordering=adn_ordering, + ) + assert isinstance(net.layer2.conv, Conv2d) + with pytest.raises(IndexError): + net.layer2[1] # no adn at the end + + named_layers = list(net.named_children()) + if pooling and pooling_indices and pooling_indices != []: + for i, idx in enumerate(pooling_indices): + name, layer = named_layers[idx + 1 + i] + if idx == -1: + assert name == "init_pool" + else: + assert name == f"pool{idx}" + pooling_mode = net.pooling[i][0] + if pooling_mode == "max": + assert isinstance(layer, MaxPool2d) + elif pooling_mode == "avg": + assert isinstance(layer, AvgPool2d) + elif pooling_mode == "adaptivemax": + assert isinstance(layer, AdaptiveMaxPool2d) + else: + assert isinstance(layer, AdaptiveAvgPool2d) + else: + for name, layer in named_layers: + assert not isinstance(layer, AvgPool2d) or isinstance(layer, MaxPool2d) + assert "pool" not in name + + assert ( + net.layer0.conv.kernel_size == kernel_size + if isinstance(kernel_size, tuple) + else (kernel_size, kernel_size) + ) + assert ( + net.layer0.conv.stride == stride + if isinstance(stride, tuple) + else (stride, stride) + ) + assert ( + net.layer0.conv.padding == padding + if isinstance(padding, tuple) + else (padding, padding) + ) + assert ( + net.layer0.conv.dilation == dilation + if isinstance(dilation, tuple) + else (dilation, dilation) + ) + + if bias: + assert len(net.layer0.conv.bias) > 0 + assert len(net.layer1.conv.bias) > 0 + assert len(net.layer2.conv.bias) > 0 + else: + assert net.layer0.conv.bias is None + assert net.layer1.conv.bias is None + assert net.layer2.conv.bias is None + if isinstance(dropout, float) and "D" in adn_ordering: + assert net.layer0.adn.D.p == dropout + assert net.layer1.adn.D.p == dropout + + +def test_activation_parameters(input_tensor): + _, in_channels, *input_size = input_tensor.shape + spatial_dims = len(input_size) + + act = ("ELU", {"alpha": 0.1}) + output_act = ("ELU", {"alpha": 0.2}) + net = ConvEncoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + act=act, + output_act=output_act, + ) + assert isinstance(net.layer0.adn.A, ELU) + assert net.layer0.adn.A.alpha == 0.1 + assert isinstance(net.layer1.adn.A, ELU) + assert net.layer1.adn.A.alpha == 0.1 + assert isinstance(net.output_act, ELU) + assert net.output_act.alpha == 0.2 + + net = ConvEncoder( + spatial_dims=spatial_dims, in_channels=in_channels, channels=[2, 4, 1], act=None + ) + with pytest.raises(AttributeError): + net.layer0.adn.A + with pytest.raises(AttributeError): + net.layer1.adn.A + assert net.output_act is None + + +def test_norm_parameters(input_tensor): + _, in_channels, *input_size = input_tensor.shape + spatial_dims = len(input_size) + + norm = ("instance", {"momentum": 1.0}) + net = ConvEncoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + norm=norm, + ) + assert isinstance(net.layer0.adn.N, InstanceNorm2d) + assert net.layer0.adn.N.momentum == 1.0 + assert isinstance(net.layer1.adn.N, InstanceNorm2d) + assert net.layer1.adn.N.momentum == 1.0 + + net = ConvEncoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + norm=None, + ) + with pytest.raises(AttributeError): + net.layer0.adn.N + with pytest.raises(AttributeError): + net.layer1.adn.N + + +def test_pool_parameters(input_tensor): + _, in_channels, *input_size = input_tensor.shape + spatial_dims = len(input_size) + + pooling = ("avg", {"kernel_size": 3, "stride": 2}) + net = ConvEncoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + pooling=pooling, + pooling_indices=[1], + ) + assert isinstance(net.pool1, AvgPool2d) + assert net.pool1.stride == 2 + assert net.pool1.kernel_size == 3 + + +@pytest.mark.parametrize("adn_ordering", ["DAN", "NA", "A"]) +def test_adn_ordering(input_tensor, adn_ordering): + _, in_channels, *input_size = input_tensor.shape + spatial_dims = len(input_size) + + net = ConvEncoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + dropout=0.1, + adn_ordering=adn_ordering, + act="elu", + norm="instance", + ) + objects = {"D": Dropout, "N": InstanceNorm2d, "A": ELU} + for i, letter in enumerate(adn_ordering): + assert isinstance(net.layer0.adn[i], objects[letter]) + assert isinstance(net.layer1.adn[i], objects[letter]) + for letter in set(["A", "D", "N"]) - set(adn_ordering): + with pytest.raises(AttributeError): + getattr(net.layer0.adn, letter) + with pytest.raises(AttributeError): + getattr(net.layer1.adn, letter) + + +@pytest.mark.parametrize( + "input_tensor", [torch.randn(2, 1, 16), torch.randn(2, 3, 20, 21, 22)] +) +def test_other_dimensions(input_tensor): + batch_size, in_channels, *input_size = input_tensor.shape + spatial_dims = len(input_size) + + net = ConvEncoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + _input_size=input_size, + ) + output = net(input_tensor) + assert output.shape == (batch_size, 1, *net.final_size) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"kernel_size": (3, 3, 3)}, + {"stride": [1, 1]}, + {"padding": [1, 1]}, + {"dilation": (1,)}, + {"pooling_indices": [0, 1, 2, 3]}, + {"pooling": "avg", "pooling_indices": [0]}, + {"norm": "group"}, + {"_input_size": (1, 10, 10), "stride": 2, "channels": [2, 4, 6, 8]}, + ], +) +def test_checks(kwargs): + if "channels" not in kwargs: + kwargs["channels"] = [2, 4, 1] + if "in_channels" not in kwargs: + kwargs["in_channels"] = 1 + if "spatial_dims" not in kwargs: + kwargs["spatial_dims"] = 2 + with pytest.raises(ValueError): + ConvEncoder(**kwargs) + + +@pytest.mark.parametrize( + "pooling,error", + [ + (None, False), + ("abc", True), + ("max", True), + (("max",), True), + (("max", 3), True), + (("avg", {"stride": 1}), True), + (("avg", {"kernel_size": 1}), False), + (("avg", {"kernel_size": 1, "stride": 1}), False), + (("abc", {"kernel_size": 1, "stride": 1}), True), + ([("avg", {"kernel_size": 1}), ("max", {"kernel_size": 1})], False), + ([("avg", {"kernel_size": 1}), None], True), + ([("avg", {"kernel_size": 1}), "max"], True), + ([("avg", {"kernel_size": 1}), ("max", 3)], True), + ([("avg", {"kernel_size": 1}), ("max", {"stride": 1})], True), + ( + [ + ("avg", {"kernel_size": 1}), + ("max", {"stride": 1}), + ("max", {"stride": 1}), + ], + True, + ), + ], +) +def test_check_pool_layers(input_tensor, pooling, error): + _, in_channels, *input_size = input_tensor.shape + spatial_dims = len(input_size) + + if error: + with pytest.raises(ValueError): + ConvEncoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + pooling=pooling, + pooling_indices=[0, 1], + ) + else: + ConvEncoder( + spatial_dims=spatial_dims, + in_channels=in_channels, + channels=[2, 4, 1], + pooling=pooling, + pooling_indices=[0, 1], + ) diff --git a/tests/unittests/monai_networks/nn/test_densenet.py b/tests/unittests/monai_networks/nn/test_densenet.py new file mode 100644 index 000000000..303a22a6e --- /dev/null +++ b/tests/unittests/monai_networks/nn/test_densenet.py @@ -0,0 +1,138 @@ +import pytest +import torch + +from clinicadl.monai_networks.nn import DenseNet, get_densenet +from clinicadl.monai_networks.nn.densenet import SOTADenseNet +from clinicadl.monai_networks.nn.layers.utils import ActFunction + +INPUT_1D = torch.randn(3, 1, 16) +INPUT_2D = torch.randn(3, 2, 15, 16) +INPUT_3D = torch.randn(3, 3, 20, 21, 22) + + +@pytest.mark.parametrize( + "input_tensor,num_outputs,n_dense_layers,init_features,growth_rate,bottleneck_factor,act,output_act,dropout", + [ + (INPUT_1D, 2, (3, 4), 16, 8, 2, "relu", None, 0.1), + (INPUT_2D, None, (3, 4, 2), 9, 5, 3, "elu", "sigmoid", 0.0), + (INPUT_3D, 1, (2,), 4, 4, 2, "tanh", "sigmoid", 0.1), + ], +) +def test_densenet( + input_tensor, + num_outputs, + n_dense_layers, + init_features, + growth_rate, + bottleneck_factor, + act, + output_act, + dropout, +): + batch_size = input_tensor.shape[0] + net = DenseNet( + spatial_dims=len(input_tensor.shape[2:]), + in_channels=input_tensor.shape[1], + num_outputs=num_outputs, + n_dense_layers=n_dense_layers, + init_features=init_features, + growth_rate=growth_rate, + bottleneck_factor=bottleneck_factor, + act=act, + output_act=output_act, + dropout=dropout, + ) + output = net(input_tensor) + + if num_outputs: + assert output.shape == (batch_size, num_outputs) + else: + assert len(output.shape) == len(input_tensor.shape) + + if output_act and num_outputs: + assert net.fc.output_act is not None + elif output_act and num_outputs is None: + with pytest.raises(AttributeError): + net.fc.output_act + + features = net.features + for i, n in enumerate(n_dense_layers, start=1): + dense_block = getattr(features, f"denseblock{i}") + for k in range(1, n + 1): + dense_layer = getattr(dense_block, f"denselayer{k}").layers + assert dense_layer.conv1.out_channels == growth_rate * bottleneck_factor + assert dense_layer.conv2.out_channels == growth_rate + if dropout: + assert dense_layer.dropout.p == dropout + with pytest.raises(AttributeError): + getattr(dense_block, f"denseblock{n+1}") + with pytest.raises(AttributeError): + getattr(dense_block, f"denseblock{i+1}") + + assert features.conv0.out_channels == init_features + + +@pytest.mark.parametrize("act", [act for act in ActFunction]) +def test_activations(act): + batch_size = INPUT_2D.shape[0] + net = DenseNet( + spatial_dims=len(INPUT_2D.shape[2:]), + in_channels=INPUT_2D.shape[1], + n_dense_layers=(2, 2), + num_outputs=2, + act=act, + ) + assert net(INPUT_2D).shape == (batch_size, 2) + + +def test_activation_parameters(): + act = ("ELU", {"alpha": 0.1}) + output_act = ("ELU", {"alpha": 0.2}) + net = DenseNet( + spatial_dims=len(INPUT_2D.shape[2:]), + in_channels=INPUT_2D.shape[1], + num_outputs=2, + n_dense_layers=(2, 2), + act=act, + output_act=output_act, + ) + assert isinstance(net.features.denseblock1.denselayer1.layers.act1, torch.nn.ELU) + assert net.features.denseblock1.denselayer1.layers.act1.alpha == 0.1 + assert isinstance(net.fc.output_act, torch.nn.ELU) + assert net.fc.output_act.alpha == 0.2 + + +@pytest.mark.parametrize( + "name,num_outputs,output_act", + [ + (SOTADenseNet.DENSENET_121, 1, "sigmoid"), + (SOTADenseNet.DENSENET_161, 2, None), + (SOTADenseNet.DENSENET_169, None, "sigmoid"), + (SOTADenseNet.DENSENET_201, None, None), + ], +) +def test_get_densenet(name, num_outputs, output_act): + densenet = get_densenet( + name, num_outputs=num_outputs, output_act=output_act, pretrained=True + ) + if num_outputs: + assert densenet.fc.out.out_features == num_outputs + else: + assert densenet.fc is None + + if output_act and num_outputs: + assert densenet.fc.output_act is not None + elif output_act and num_outputs is None: + with pytest.raises(AttributeError): + densenet.fc.output_act + + +def test_get_densenet_output(): + from torchvision.models import densenet121 + + densenet = get_densenet( + SOTADenseNet.DENSENET_121, num_outputs=None, pretrained=True + ).features + gt = densenet121(weights="DEFAULT").features + x = torch.randn(1, 3, 128, 128) + assert (densenet(x) == gt(x)).all() diff --git a/tests/unittests/monai_networks/nn/test_generator.py b/tests/unittests/monai_networks/nn/test_generator.py new file mode 100644 index 000000000..1fa4fffa7 --- /dev/null +++ b/tests/unittests/monai_networks/nn/test_generator.py @@ -0,0 +1,67 @@ +import pytest +import torch +from torch.nn import Flatten, Linear + +from clinicadl.monai_networks.nn import MLP, ConvDecoder, Generator + + +@pytest.fixture +def input_tensor(): + return torch.randn(2, 8) + + +@pytest.mark.parametrize("channels", [(), (2, 4)]) +@pytest.mark.parametrize( + "mlp_args", [None, {"hidden_channels": []}, {"hidden_channels": (2, 4)}] +) +@pytest.mark.parametrize("start_shape", [(1, 5), (1, 5, 5), (1, 5, 5)]) +def test_generator(input_tensor, start_shape, channels, mlp_args): + latent_size = input_tensor.shape[1] + net = Generator( + latent_size=latent_size, + start_shape=start_shape, + conv_args={"channels": channels}, + mlp_args=mlp_args, + ) + output = net(input_tensor) + assert output.shape[1:] == net.output_shape + assert isinstance(net.convolutions, ConvDecoder) + assert isinstance(net.mlp, MLP) + + if mlp_args is None or mlp_args["hidden_channels"] == []: + children = net.mlp.children() + assert isinstance(next(children), Flatten) + assert isinstance(next(children).linear, Linear) + with pytest.raises(StopIteration): + next(children) + + if channels == []: + with pytest.raises(StopIteration): + next(net.convolutions.parameters()) + + +@pytest.mark.parametrize( + "conv_args,mlp_args", + [ + (None, {"hidden_channels": [2]}), + ({"channels": [2]}, {}), + ], +) +def test_checks(conv_args, mlp_args): + with pytest.raises(ValueError): + Generator( + latent_size=2, + start_shape=(1, 10, 10), + conv_args=conv_args, + mlp_args=mlp_args, + ) + + +def test_params(): + conv_args = {"channels": [2], "act": "celu"} + mlp_args = {"hidden_channels": [2], "act": "relu"} + net = Generator( + latent_size=2, start_shape=(1, 10, 10), conv_args=conv_args, mlp_args=mlp_args + ) + assert net.convolutions.act == "celu" + assert net.mlp.act == "relu" diff --git a/tests/unittests/monai_networks/nn/test_mlp.py b/tests/unittests/monai_networks/nn/test_mlp.py new file mode 100644 index 000000000..5eb3105a8 --- /dev/null +++ b/tests/unittests/monai_networks/nn/test_mlp.py @@ -0,0 +1,125 @@ +import pytest +import torch +from torch.nn import ELU, Dropout, InstanceNorm1d, Linear + +from clinicadl.monai_networks.nn import MLP +from clinicadl.monai_networks.nn.layers.utils import ActFunction + + +@pytest.fixture +def input_tensor(): + return torch.randn(8, 10) + + +@pytest.mark.parametrize("act", [act for act in ActFunction]) +def test_activations(input_tensor, act): + net = MLP( + in_channels=10, out_channels=2, hidden_channels=[6, 4], act=act, output_act=act + ) + assert net(input_tensor).shape == (8, 2) + + +@pytest.mark.parametrize( + "dropout,norm,bias,adn_ordering", + [ + (None, "batch", True, "ADN"), + (0.5, "layer", False, "DAN"), + (0.5, "syncbatch", True, "NA"), + (0.0, "instance", True, "DN"), + (None, ("group", {"num_groups": 2}), True, "N"), + (0.5, None, True, "ADN"), + (0.5, "batch", True, ""), + ], +) +def test_params(input_tensor, dropout, norm, bias, adn_ordering): + net = MLP( + in_channels=10, + out_channels=2, + hidden_channels=[6, 4], + dropout=dropout, + norm=norm, + act=None, + bias=bias, + adn_ordering=adn_ordering, + ) + assert net(input_tensor).shape == (8, 2) + assert isinstance(net.output.linear, Linear) + + if bias: + assert len(net.hidden0.linear.bias) > 0 + assert len(net.hidden1.linear.bias) > 0 + assert len(net.output.linear.bias) > 0 + else: + assert net.hidden0.linear.bias is None + assert net.hidden1.linear.bias is None + assert net.output.linear.bias is None + if isinstance(dropout, float) and "D" in adn_ordering: + assert net.hidden0.adn.D.p == dropout + assert net.hidden1.adn.D.p == dropout + + +def test_activation_parameters(): + act = ("ELU", {"alpha": 0.1}) + output_act = ("ELU", {"alpha": 0.2}) + net = MLP( + in_channels=10, + out_channels=2, + hidden_channels=[6, 4], + act=act, + output_act=output_act, + ) + assert isinstance(net.hidden0.adn.A, ELU) + assert net.hidden0.adn.A.alpha == 0.1 + assert isinstance(net.hidden1.adn.A, ELU) + assert net.hidden1.adn.A.alpha == 0.1 + assert isinstance(net.output.output_act, ELU) + assert net.output.output_act.alpha == 0.2 + + net = MLP(in_channels=10, out_channels=2, hidden_channels=[6, 4], act=None) + with pytest.raises(AttributeError): + net.hidden0.adn.A + with pytest.raises(AttributeError): + net.hidden1.adn.A + assert net.output.output_act is None + + +def test_norm_parameters(): + norm = ("instance", {"momentum": 1.0}) + net = MLP(in_channels=10, out_channels=2, hidden_channels=[6, 4], norm=norm) + assert isinstance(net.hidden0.adn.N, InstanceNorm1d) + assert net.hidden0.adn.N.momentum == 1.0 + assert isinstance(net.hidden1.adn.N, InstanceNorm1d) + assert net.hidden1.adn.N.momentum == 1.0 + + net = MLP(in_channels=10, out_channels=2, hidden_channels=[6, 4], act=None) + with pytest.raises(AttributeError): + net.layer_0[1].N + with pytest.raises(AttributeError): + net.layer_1[1].N + + +@pytest.mark.parametrize("adn_ordering", ["DAN", "NA", "A"]) +def test_adn_ordering(adn_ordering): + net = MLP( + in_channels=10, + out_channels=2, + hidden_channels=[6, 4], + dropout=0.1, + adn_ordering=adn_ordering, + act="elu", + norm="instance", + ) + objects = {"D": Dropout, "N": InstanceNorm1d, "A": ELU} + for i, letter in enumerate(adn_ordering): + assert isinstance(net.hidden0.adn[i], objects[letter]) + assert isinstance(net.hidden1.adn[i], objects[letter]) + for letter in set(["A", "D", "N"]) - set(adn_ordering): + with pytest.raises(AttributeError): + getattr(net.hidden0.adn, letter) + with pytest.raises(AttributeError): + getattr(net.hidden1.adn, letter) + + +def test_checks(): + with pytest.raises(ValueError): + MLP(in_channels=10, out_channels=2, hidden_channels=[6, 4], norm="group") diff --git a/tests/unittests/monai_networks/nn/test_resnet.py b/tests/unittests/monai_networks/nn/test_resnet.py new file mode 100644 index 000000000..20ed028d0 --- /dev/null +++ b/tests/unittests/monai_networks/nn/test_resnet.py @@ -0,0 +1,173 @@ +import pytest +import torch + +from clinicadl.monai_networks.nn import ResNet, get_resnet +from clinicadl.monai_networks.nn.layers.resnet import ResNetBlock, ResNetBottleneck +from clinicadl.monai_networks.nn.layers.utils import ActFunction +from clinicadl.monai_networks.nn.resnet import SOTAResNet + +INPUT_1D = torch.randn(3, 1, 16) +INPUT_2D = torch.randn(3, 2, 15, 16) +INPUT_3D = torch.randn(3, 3, 20, 21, 22) + + +@pytest.mark.parametrize( + "input_tensor,num_outputs,block_type,n_res_blocks,n_features,init_conv_size,init_conv_stride,bottleneck_reduction,act,output_act", + [ + (INPUT_1D, 2, "basic", (2, 3), (4, 8), 7, 1, 2, "relu", None), + ( + INPUT_2D, + None, + "bottleneck", + (3, 2, 2), + (8, 12, 16), + 5, + (2, 1), + 4, + "elu", + "sigmoid", + ), + (INPUT_3D, 1, "bottleneck", (2,), (3,), (4, 3, 4), 2, 1, "tanh", "sigmoid"), + ], +) +def test_resnet( + input_tensor, + num_outputs, + block_type, + n_res_blocks, + n_features, + init_conv_size, + init_conv_stride, + bottleneck_reduction, + act, + output_act, +): + batch_size = input_tensor.shape[0] + spatial_dims = len(input_tensor.shape[2:]) + net = ResNet( + spatial_dims=spatial_dims, + in_channels=input_tensor.shape[1], + num_outputs=num_outputs, + block_type=block_type, + n_res_blocks=n_res_blocks, + n_features=n_features, + init_conv_size=init_conv_size, + init_conv_stride=init_conv_stride, + bottleneck_reduction=bottleneck_reduction, + act=act, + output_act=output_act, + ) + output = net(input_tensor) + + if num_outputs: + assert output.shape == (batch_size, num_outputs) + else: + assert len(output.shape) == len(input_tensor.shape) + + if output_act and num_outputs: + assert net.fc.output_act is not None + elif output_act and num_outputs is None: + with pytest.raises(AttributeError): + net.fc.output_act + + for i, (n_blocks, n_feats) in enumerate(zip(n_res_blocks, n_features), start=1): + layer = getattr(net, f"layer{i}") + for k in range(n_blocks): + res_block = layer[k] + if block_type == "basic": + assert isinstance(res_block, ResNetBlock) + else: + assert isinstance(res_block, ResNetBottleneck) + if block_type == "basic": + assert res_block.conv2.out_channels == n_feats + else: + assert res_block.conv1.out_channels == n_feats // bottleneck_reduction + assert res_block.conv3.out_channels == n_feats + with pytest.raises(IndexError): + layer[k + 1] + with pytest.raises(AttributeError): + getattr(net, f"layer{i+1}") + + assert ( + net.conv0.kernel_size == init_conv_size + if isinstance(init_conv_size, tuple) + else (init_conv_size,) * spatial_dims + ) + assert ( + net.conv0.stride == init_conv_stride + if isinstance(init_conv_stride, tuple) + else (init_conv_stride,) * spatial_dims + ) + + +@pytest.mark.parametrize("act", [act for act in ActFunction]) +def test_activations(act): + batch_size = INPUT_2D.shape[0] + net = ResNet( + spatial_dims=len(INPUT_2D.shape[2:]), + in_channels=INPUT_2D.shape[1], + num_outputs=2, + n_features=(8, 16), + n_res_blocks=(2, 2), + act=act, + ) + assert net(INPUT_2D).shape == (batch_size, 2) + + +def test_activation_parameters(): + act = ("ELU", {"alpha": 0.1}) + output_act = ("ELU", {"alpha": 0.2}) + net = ResNet( + spatial_dims=len(INPUT_2D.shape[2:]), + in_channels=INPUT_2D.shape[1], + num_outputs=2, + n_features=(8, 16), + n_res_blocks=(2, 2), + act=act, + output_act=output_act, + ) + assert isinstance(net.layer1[0].act1, torch.nn.ELU) + assert net.layer1[0].act1.alpha == 0.1 + assert isinstance(net.layer2[1].act2, torch.nn.ELU) + assert net.layer2[1].act2.alpha == 0.1 + assert isinstance(net.act0, torch.nn.ELU) + assert net.act0.alpha == 0.1 + assert isinstance(net.fc.output_act, torch.nn.ELU) + assert net.fc.output_act.alpha == 0.2 + + +@pytest.mark.parametrize( + "name,num_outputs,output_act", + [ + (SOTAResNet.RESNET_18, 1, "sigmoid"), + (SOTAResNet.RESNET_34, 2, None), + (SOTAResNet.RESNET_50, None, "sigmoid"), + (SOTAResNet.RESNET_101, None, None), + (SOTAResNet.RESNET_152, None, None), + ], +) +def test_get_resnet(name, num_outputs, output_act): + resnet = get_resnet( + name, num_outputs=num_outputs, output_act=output_act, pretrained=True + ) + if num_outputs: + assert resnet.fc.out.out_features == num_outputs + else: + assert resnet.fc is None + + if output_act and num_outputs: + assert resnet.fc.output_act is not None + elif output_act and num_outputs is None: + with pytest.raises(AttributeError): + resnet.fc.output_act + + +def test_get_resnet_output(): + from torchvision.models import resnet18 + + resnet = get_resnet(SOTAResNet.RESNET_18, num_outputs=None, pretrained=True) + gt = resnet18(weights="DEFAULT") + gt.avgpool = torch.nn.Identity() + gt.fc = torch.nn.Identity() + x = torch.randn(1, 3, 128, 128) + assert (torch.flatten(resnet(x), start_dim=1) == gt(x)).all() diff --git a/tests/unittests/monai_networks/nn/test_senet.py b/tests/unittests/monai_networks/nn/test_senet.py new file mode 100644 index 000000000..b46eb663a --- /dev/null +++ b/tests/unittests/monai_networks/nn/test_senet.py @@ -0,0 +1,172 @@ +import pytest +import torch + +from clinicadl.monai_networks.nn import SEResNet, get_seresnet +from clinicadl.monai_networks.nn.layers.senet import SEResNetBlock, SEResNetBottleneck +from clinicadl.monai_networks.nn.layers.utils import ActFunction +from clinicadl.monai_networks.nn.senet import SOTAResNet + +INPUT_1D = torch.randn(3, 1, 16) +INPUT_2D = torch.randn(3, 2, 15, 16) +INPUT_3D = torch.randn(3, 3, 20, 21, 22) + + +@pytest.mark.parametrize( + "input_tensor,num_outputs,block_type,n_res_blocks,n_features,init_conv_size,init_conv_stride,bottleneck_reduction,act,output_act,se_reduction", + [ + (INPUT_1D, 2, "basic", (2, 3), (4, 8), 7, 1, 2, "relu", None, 4), + ( + INPUT_2D, + None, + "bottleneck", + (3, 2, 2), + (8, 12, 16), + 5, + (2, 1), + 4, + "elu", + "sigmoid", + 2, + ), + (INPUT_3D, 1, "bottleneck", (2,), (3,), (4, 3, 4), 2, 1, "tanh", "sigmoid", 2), + ], +) +def test_seresnet( + input_tensor, + num_outputs, + block_type, + n_res_blocks, + n_features, + init_conv_size, + init_conv_stride, + bottleneck_reduction, + act, + output_act, + se_reduction, +): + batch_size = input_tensor.shape[0] + spatial_dims = len(input_tensor.shape[2:]) + net = SEResNet( + spatial_dims=spatial_dims, + in_channels=input_tensor.shape[1], + num_outputs=num_outputs, + block_type=block_type, + n_res_blocks=n_res_blocks, + n_features=n_features, + init_conv_size=init_conv_size, + init_conv_stride=init_conv_stride, + bottleneck_reduction=bottleneck_reduction, + act=act, + output_act=output_act, + se_reduction=se_reduction, + ) + output = net(input_tensor) + + if num_outputs: + assert output.shape == (batch_size, num_outputs) + else: + assert len(output.shape) == len(input_tensor.shape) + + if output_act and num_outputs: + assert net.fc.output_act is not None + elif output_act and num_outputs is None: + with pytest.raises(AttributeError): + net.fc.output_act + + for i, (n_blocks, n_feats) in enumerate(zip(n_res_blocks, n_features), start=1): + layer = getattr(net, f"layer{i}") + for k in range(n_blocks): + res_block = layer[k] + if block_type == "basic": + assert isinstance(res_block, SEResNetBlock) + else: + assert isinstance(res_block, SEResNetBottleneck) + if block_type == "basic": + assert res_block.conv2.out_channels == n_feats + else: + assert res_block.conv1.out_channels == n_feats // bottleneck_reduction + assert res_block.conv3.out_channels == n_feats + with pytest.raises(IndexError): + layer[k + 1] + with pytest.raises(AttributeError): + getattr(net, f"layer{i+1}") + + assert ( + net.conv0.kernel_size == init_conv_size + if isinstance(init_conv_size, tuple) + else (init_conv_size,) * spatial_dims + ) + assert ( + net.conv0.stride == init_conv_stride + if isinstance(init_conv_stride, tuple) + else (init_conv_stride,) * spatial_dims + ) + + +@pytest.mark.parametrize("act", [act for act in ActFunction]) +def test_activations(act): + batch_size = INPUT_2D.shape[0] + net = SEResNet( + spatial_dims=len(INPUT_2D.shape[2:]), + in_channels=INPUT_2D.shape[1], + num_outputs=2, + n_features=(8, 16), + n_res_blocks=(2, 2), + act=act, + se_reduction=2, + ) + assert net(INPUT_2D).shape == (batch_size, 2) + + +def test_activation_parameters(): + act = ("ELU", {"alpha": 0.1}) + output_act = ("ELU", {"alpha": 0.2}) + net = SEResNet( + spatial_dims=len(INPUT_2D.shape[2:]), + in_channels=INPUT_2D.shape[1], + num_outputs=2, + n_features=(8, 16), + n_res_blocks=(2, 2), + act=act, + output_act=output_act, + se_reduction=2, + ) + assert isinstance(net.layer1[0].act1, torch.nn.ELU) + assert net.layer1[0].act1.alpha == 0.1 + assert isinstance(net.layer2[1].act2, torch.nn.ELU) + assert net.layer2[1].act2.alpha == 0.1 + assert isinstance(net.act0, torch.nn.ELU) + assert net.act0.alpha == 0.1 + assert isinstance(net.fc.output_act, torch.nn.ELU) + assert net.fc.output_act.alpha == 0.2 + + +@pytest.mark.parametrize( + "name,num_outputs,output_act", + [ + (SOTAResNet.SE_RESNET_50, 1, "sigmoid"), + (SOTAResNet.SE_RESNET_101, 2, None), + (SOTAResNet.SE_RESNET_152, None, "sigmoid"), + ], +) +def test_get_seresnet(name, num_outputs, output_act): + seresnet = get_seresnet( + name, + num_outputs=num_outputs, + output_act=output_act, + ) + if num_outputs: + assert seresnet.fc.out.out_features == num_outputs + else: + assert seresnet.fc is None + + if output_act and num_outputs: + assert seresnet.fc.output_act is not None + elif output_act and num_outputs is None: + with pytest.raises(AttributeError): + seresnet.fc.output_act + + +def test_get_seresnet_error(): + with pytest.raises(ValueError): + get_seresnet(SOTAResNet.SE_RESNET_50, num_outputs=1, pretrained=True) diff --git a/tests/unittests/monai_networks/nn/test_unet.py b/tests/unittests/monai_networks/nn/test_unet.py new file mode 100644 index 000000000..b8f06faa8 --- /dev/null +++ b/tests/unittests/monai_networks/nn/test_unet.py @@ -0,0 +1,127 @@ +import pytest +import torch + +from clinicadl.monai_networks.nn import UNet +from clinicadl.monai_networks.nn.layers.utils import ActFunction + +INPUT_1D = torch.randn(2, 1, 16) +INPUT_2D = torch.randn(2, 2, 32, 64) +INPUT_3D = torch.randn(2, 3, 16, 32, 8) + + +@pytest.mark.parametrize( + "input_tensor,out_channels,channels,act,output_act,dropout,error", + [ + (INPUT_1D, 1, (2, 3, 4), "relu", "sigmoid", None, False), + (INPUT_2D, 1, (2, 4, 5), "relu", None, 0.0, False), + (INPUT_3D, 2, (2, 3), None, ("softmax", {"dim": 1}), 0.1, False), + ( + INPUT_3D, + 2, + (2,), + None, + ("softmax", {"dim": 1}), + 0.1, + True, + ), # channels length is less than 2 + ], +) +def test_unet(input_tensor, out_channels, channels, act, output_act, dropout, error): + batch_size, in_channels, *img_size = input_tensor.shape + spatial_dims = len(img_size) + if error: + with pytest.raises(ValueError): + UNet( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + channels=channels, + act=act, + output_act=output_act, + dropout=dropout, + ) + else: + net = UNet( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + channels=channels, + act=act, + output_act=output_act, + dropout=dropout, + ) + + out = net(input_tensor) + assert out.shape == (batch_size, out_channels, *img_size) + + if output_act: + assert net.output_act is not None + else: + assert net.output_act is None + + assert net.doubleconv[1].conv.out_channels == channels[0] + if dropout: + assert net.doubleconv[1].adn.D.p == dropout + else: + with pytest.raises(AttributeError): + net.doubleconv[1].conv.adn.D + + for i in range(1, len(channels)): + down = getattr(net, f"down{i}").doubleconv + up = getattr(net, f"doubleconv{i}") + assert down[0].conv.in_channels == channels[i - 1] + assert down[1].conv.out_channels == channels[i] + assert up[0].conv.in_channels == channels[i - 1] * 2 + assert up[1].conv.out_channels == channels[i - 1] + for m in (down, up): + if dropout is not None: + assert m[1].adn.D.p == dropout + else: + with pytest.raises(AttributeError): + m[1].adn.D + with pytest.raises(AttributeError): + down = getattr(net, f"down{i+1}") + with pytest.raises(AttributeError): + getattr(net, f"doubleconv{i+1}") + + +@pytest.mark.parametrize("act", [act for act in ActFunction]) +def test_activations(act): + batch_size, in_channels, *img_size = INPUT_2D.shape + net = UNet( + spatial_dims=2, + in_channels=in_channels, + out_channels=2, + channels=(2, 4), + act=act, + output_act=act, + ) + assert net(INPUT_2D).shape == (batch_size, 2, *img_size) + + +def test_activation_parameters(): + in_channels = INPUT_2D.shape[1] + act = ("ELU", {"alpha": 0.1}) + output_act = ("ELU", {"alpha": 0.2}) + net = UNet( + spatial_dims=2, + in_channels=in_channels, + out_channels=2, + channels=(2, 4), + act=act, + output_act=output_act, + ) + assert isinstance(net.doubleconv[0].adn.A, torch.nn.ELU) + assert net.doubleconv[0].adn.A.alpha == 0.1 + + assert isinstance(net.down1.doubleconv[0].adn.A, torch.nn.ELU) + assert net.down1.doubleconv[0].adn.A.alpha == 0.1 + + assert isinstance(net.upsample1[1].adn.A, torch.nn.ELU) + assert net.upsample1[1].adn.A.alpha == 0.1 + + assert isinstance(net.doubleconv1[1].adn.A, torch.nn.ELU) + assert net.doubleconv1[1].adn.A.alpha == 0.1 + + assert isinstance(net.output_act, torch.nn.ELU) + assert net.output_act.alpha == 0.2 diff --git a/tests/unittests/monai_networks/nn/test_vae.py b/tests/unittests/monai_networks/nn/test_vae.py new file mode 100644 index 000000000..ca2fb24b8 --- /dev/null +++ b/tests/unittests/monai_networks/nn/test_vae.py @@ -0,0 +1,99 @@ +import pytest +import torch +from numpy import isclose +from torch.nn import ReLU + +from clinicadl.monai_networks.nn import VAE + + +@pytest.mark.parametrize( + "input_tensor,kernel_size,stride,padding,dilation,pooling,pooling_indices", + [ + (torch.randn(2, 1, 21), 3, 1, 0, 1, ("max", {"kernel_size": 2}), [0]), + ( + torch.randn(2, 1, 65, 85), + (3, 5), + (1, 2), + 0, + (1, 2), + ("max", {"kernel_size": 2, "stride": 1}), + [0], + ), + ( + torch.randn(2, 1, 64, 62, 61), # to test output padding + 4, + 2, + (1, 1, 0), + 1, + ("avg", {"kernel_size": 3, "stride": 2}), + [0], + ), + ( + torch.randn(2, 1, 51, 55, 45), + 3, + 2, + 0, + 1, + ("max", {"kernel_size": 2, "ceil_mode": True}), + [0, 1], + ), + ( + torch.randn(2, 1, 51, 55, 45), + 3, + 2, + 0, + 1, + [ + ("max", {"kernel_size": 2, "ceil_mode": True}), + ("max", {"kernel_size": 2, "stride": 1, "ceil_mode": False}), + ], + [0, 1], + ), + ], +) +def test_output_shape( + input_tensor, kernel_size, stride, padding, dilation, pooling, pooling_indices +): + latent_size = 3 + net = VAE( + in_shape=input_tensor.shape[1:], + latent_size=latent_size, + conv_args={ + "channels": [2, 4, 8], + "kernel_size": kernel_size, + "stride": stride, + "padding": padding, + "dilation": dilation, + "pooling": pooling, + "pooling_indices": pooling_indices, + }, + ) + recon, mu, log_var = net(input_tensor) + assert recon.shape == input_tensor.shape + assert mu.shape == (input_tensor.shape[0], latent_size) + assert log_var.shape == (input_tensor.shape[0], latent_size) + + +def test_mu_log_var(): + net = VAE( + in_shape=(1, 5, 5), + latent_size=4, + conv_args={"channels": []}, + mlp_args={"hidden_channels": [12], "output_act": "relu", "act": "celu"}, + ) + assert net.mu.linear.in_features == 12 + assert net.log_var.linear.in_features == 12 + assert isinstance(net.mu.output_act, ReLU) + assert isinstance(net.log_var.output_act, ReLU) + assert net.encoder(torch.randn(2, 1, 5, 5)).shape == (2, 12) + _, mu, log_var = net(torch.randn(2, 1, 5, 5)) + assert not isclose(mu.detach().numpy(), log_var.detach().numpy()).all() + + net = VAE( + in_shape=(1, 5, 5), + latent_size=4, + conv_args={"channels": []}, + mlp_args={"hidden_channels": [12]}, + ) + assert net.mu.linear.in_features == 12 + assert net.log_var.linear.in_features == 12 diff --git a/tests/unittests/monai_networks/nn/test_vit.py b/tests/unittests/monai_networks/nn/test_vit.py new file mode 100644 index 000000000..741d6e5f8 --- /dev/null +++ b/tests/unittests/monai_networks/nn/test_vit.py @@ -0,0 +1,279 @@ +import numpy as np +import pytest +import torch + +from clinicadl.monai_networks.nn import ViT, get_vit +from clinicadl.monai_networks.nn.layers.utils import ActFunction +from clinicadl.monai_networks.nn.vit import SOTAViT + +INPUT_1D = torch.randn(2, 1, 16) +INPUT_2D = torch.randn(2, 2, 15, 16) +INPUT_3D = torch.randn(2, 3, 24, 24, 24) + + +@pytest.mark.parametrize( + "input_tensor,patch_size,num_outputs,embedding_dim,num_layers,num_heads,mlp_dim,pos_embed_type,output_act,dropout,error", + [ + (INPUT_1D, 4, 1, 25, 3, 5, 26, None, "softmax", None, False), + ( + INPUT_1D, + 5, + 1, + 25, + 3, + 5, + 26, + None, + "softmax", + None, + True, + ), # img not divisible by patch + ( + INPUT_1D, + 4, + 1, + 25, + 3, + 4, + 26, + None, + "softmax", + None, + True, + ), # embedding not divisible by num heads + (INPUT_1D, 4, 1, 24, 5, 4, 26, "sincos", "softmax", None, True), # sincos + (INPUT_2D, (3, 4), None, 24, 2, 4, 42, "learnable", "tanh", 0.1, False), + ( + INPUT_2D, + 4, + None, + 24, + 2, + 6, + 42, + "learnable", + "tanh", + 0.1, + True, + ), # img not divisible by patch + ( + INPUT_2D, + (3, 4), + None, + 24, + 2, + 5, + 42, + "learnable", + "tanh", + 0.1, + True, + ), # embedding not divisible by num heads + ( + INPUT_2D, + (3, 4), + None, + 18, + 2, + 6, + 42, + "sincos", + "tanh", + 0.1, + True, + ), # sincos : embedding not divisible by 4 + (INPUT_2D, (3, 4), None, 24, 2, 6, 42, "sincos", "tanh", 0.1, False), + ( + INPUT_3D, + 6, + 2, + 15, + 2, + 3, + 42, + "sincos", + None, + 0.0, + True, + ), # sincos : embedding not divisible by 6 + (INPUT_3D, 6, 2, 18, 2, 3, 42, "sincos", None, 0.0, False), + ], +) +def test_vit( + input_tensor, + patch_size, + num_outputs, + embedding_dim, + num_layers, + num_heads, + mlp_dim, + pos_embed_type, + output_act, + dropout, + error, +): + batch_size = input_tensor.shape[0] + img_size = input_tensor.shape[2:] + spatial_dims = len(img_size) + if error: + with pytest.raises(ValueError): + ViT( + in_shape=input_tensor.shape[1:], + patch_size=patch_size, + num_outputs=num_outputs, + embedding_dim=embedding_dim, + num_layers=num_layers, + num_heads=num_heads, + mlp_dim=mlp_dim, + pos_embed_type=pos_embed_type, + output_act=output_act, + dropout=dropout, + ) + else: + net = ViT( + in_shape=input_tensor.shape[1:], + patch_size=patch_size, + num_outputs=num_outputs, + embedding_dim=embedding_dim, + num_layers=num_layers, + num_heads=num_heads, + mlp_dim=mlp_dim, + pos_embed_type=pos_embed_type, + output_act=output_act, + dropout=dropout, + ) + output = net(input_tensor) + + if num_outputs: + assert output.shape == (batch_size, num_outputs) + else: + n_patches = int( + np.prod( + np.array(img_size) + // np.array( + patch_size + if isinstance(patch_size, tuple) + else (patch_size,) * spatial_dims + ) + ) + ) + assert output.shape == (batch_size, n_patches, embedding_dim) + + if output_act and num_outputs: + assert net.fc.output_act is not None + elif output_act and num_outputs is None: + with pytest.raises(AttributeError): + net.fc.output_act + + assert net.conv_proj.out_channels == embedding_dim + encoder = net.encoder.layers + for transformer_block in encoder: + assert isinstance(transformer_block.norm1, torch.nn.LayerNorm) + assert isinstance(transformer_block.norm2, torch.nn.LayerNorm) + assert transformer_block.self_attention.num_heads == num_heads + assert transformer_block.self_attention.dropout == ( + dropout if dropout is not None else 0.0 + ) + assert transformer_block.self_attention.embed_dim == embedding_dim + assert transformer_block.mlp[0].out_features == mlp_dim + assert transformer_block.mlp[2].p == ( + dropout if dropout is not None else 0.0 + ) + assert transformer_block.mlp[4].p == ( + dropout if dropout is not None else 0.0 + ) + assert net.encoder.dropout.p == (dropout if dropout is not None else 0.0) + assert isinstance(net.encoder.norm, torch.nn.LayerNorm) + + pos_embedding = net.encoder.pos_embedding + if pos_embed_type is None: + assert not pos_embedding.requires_grad + assert (pos_embedding == torch.zeros_like(pos_embedding)).all() + elif pos_embed_type == "sincos": + assert not pos_embedding.requires_grad + if num_outputs: + assert ( + pos_embedding[0, 1, 0] == 0.0 + ) # first element of of sincos embedding of first patch is zero + else: + assert pos_embedding[0, 0, 0] == 0.0 + else: + assert pos_embedding.requires_grad + if num_outputs: + assert pos_embedding[0, 1, 0] != 0.0 + else: + assert pos_embedding[0, 0, 0] != 0.0 + + with pytest.raises(IndexError): + encoder[num_layers] + + +@pytest.mark.parametrize("act", [act for act in ActFunction]) +def test_activations(act): + batch_size = INPUT_2D.shape[0] + net = ViT( + in_shape=INPUT_2D.shape[1:], + patch_size=(3, 4), + num_outputs=1, + embedding_dim=12, + num_layers=2, + num_heads=3, + mlp_dim=24, + output_act=act, + ) + assert net(INPUT_2D).shape == (batch_size, 1) + + +def test_activation_parameters(): + output_act = ("ELU", {"alpha": 0.2}) + net = ViT( + in_shape=(1, 12, 12), + patch_size=3, + num_outputs=1, + embedding_dim=12, + num_layers=2, + num_heads=3, + mlp_dim=24, + output_act=output_act, + ) + assert isinstance(net.fc.output_act, torch.nn.ELU) + assert net.fc.output_act.alpha == 0.2 + + +@pytest.mark.parametrize( + "name,num_outputs,output_act,img_size", + [ + (SOTAViT.B_16, 1, "sigmoid", (224, 224)), + (SOTAViT.B_32, 2, None, (224, 224)), + (SOTAViT.L_16, None, "sigmoid", (224, 224)), + (SOTAViT.L_32, None, None, (224, 224)), + ], +) +def test_get_vit(name, num_outputs, output_act, img_size): + input_tensor = torch.randn(1, 3, *img_size) + + vit = get_vit(name, num_outputs=num_outputs, output_act=output_act, pretrained=True) + if num_outputs: + assert vit.fc.out.out_features == num_outputs + else: + assert vit.fc is None + + if output_act and num_outputs: + assert vit.fc.output_act is not None + elif output_act and num_outputs is None: + assert vit.fc is None + + vit(input_tensor) + + +def test_get_vit_output(): + from torchvision.models import vit_b_16 + + gt = vit_b_16(weights="DEFAULT") + gt.heads = torch.nn.Identity() + x = torch.randn(1, 3, 224, 224) + + vit = get_vit(SOTAViT.B_16, num_outputs=1, pretrained=True) + vit.fc = torch.nn.Identity() + with torch.no_grad(): + assert (vit(x) == gt(x)).all() diff --git a/tests/unittests/monai_networks/nn/utils/__init__.py b/tests/unittests/monai_networks/nn/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unittests/monai_networks/nn/utils/test_checks.py b/tests/unittests/monai_networks/nn/utils/test_checks.py new file mode 100644 index 000000000..27cc234f5 --- /dev/null +++ b/tests/unittests/monai_networks/nn/utils/test_checks.py @@ -0,0 +1,127 @@ +import pytest + +from clinicadl.monai_networks.nn.utils.checks import ( + _check_conv_parameter, + check_adn_ordering, + check_conv_args, + check_mlp_args, + check_norm_layer, + check_pool_indices, + ensure_list_of_tuples, +) + + +@pytest.mark.parametrize( + "adn,error", + [("ADN", False), ("ND", False), ("A", False), ("AAD", True), ("ADM", True)], +) +def test_check_adn_ordering(adn, error): + if error: + with pytest.raises(ValueError): + check_adn_ordering(adn) + else: + check_adn_ordering(adn) + + +@pytest.mark.parametrize( + "parameter,expected_output", + [ + (5, (5, 5, 5)), + ((5, 4, 4), (5, 4, 4)), + ([5, 4], [(5, 5, 5), (4, 4, 4)]), + ([5, (4, 3, 3)], [(5, 5, 5), (4, 3, 3)]), + ((5, 5), None), + ([5, 5, 5], None), + ([5, (4, 4)], None), + (5.0, None), + ], +) +def test_check_conv_parameter(parameter, expected_output): + if expected_output: + assert ( + _check_conv_parameter(parameter, dim=3, n_layers=2, name="abc") + == expected_output + ) + else: + with pytest.raises(ValueError): + _check_conv_parameter(parameter, dim=3, n_layers=2, name="abc") + + +@pytest.mark.parametrize( + "parameter,expected_output", + [ + (5, [(5, 5, 5), (5, 5, 5)]), + ((5, 4, 4), [(5, 4, 4), (5, 4, 4)]), + ([5, 4], [(5, 5, 5), (4, 4, 4)]), + ([5, (4, 3, 3)], [(5, 5, 5), (4, 3, 3)]), + ], +) +def test_ensure_list_of_tuples(parameter, expected_output): + assert ( + ensure_list_of_tuples(parameter, dim=3, n_layers=2, name="abc") + == expected_output + ) + + +@pytest.mark.parametrize( + "indices,n_layers,error", + [ + ([0, 1, 2], 4, False), + ([0, 1, 2], 3, False), + ([-1, 1, 2], 3, False), + ([0, 1, 2], 2, True), + ([-2, 1, 2], 3, True), + ], +) +def test_check_pool_indices(indices, n_layers, error): + if error: + with pytest.raises(ValueError): + _ = check_pool_indices(indices, n_layers) + else: + check_pool_indices(indices, n_layers) + + +@pytest.mark.parametrize( + "inputs,error", + [ + (None, False), + ("abc", True), + ("batch", False), + ("group", True), + (("batch",), True), + (("batch", 3), True), + (("batch", {"eps": 0.1}), False), + (("group", {"num_groups": 2}), False), + (("group", {"num_groups": 2, "eps": 0.1}), False), + ], +) +def test_check_norm_layer(inputs, error): + if error: + with pytest.raises(ValueError): + _ = check_norm_layer(inputs) + else: + assert check_norm_layer(inputs) == inputs + + +@pytest.mark.parametrize( + "conv_args,error", + [(None, True), ({"kernel_size": 3}, True), ({"channels": [2]}, False)], +) +def test_check_conv_args(conv_args, error): + if error: + with pytest.raises(ValueError): + check_conv_args(conv_args) + else: + check_conv_args(conv_args) + + +@pytest.mark.parametrize( + "mlp_args,error", + [({"act": "tanh"}, True), ({"hidden_channels": [2]}, False)], +) +def test_check_mlp_args(mlp_args, error): + if error: + with pytest.raises(ValueError): + check_mlp_args(mlp_args) + else: + check_mlp_args(mlp_args) diff --git a/tests/unittests/monai_networks/nn/utils/test_shapes.py b/tests/unittests/monai_networks/nn/utils/test_shapes.py new file mode 100644 index 000000000..b7ae2d444 --- /dev/null +++ b/tests/unittests/monai_networks/nn/utils/test_shapes.py @@ -0,0 +1,281 @@ +import pytest +import torch + +from clinicadl.monai_networks.nn.utils.shapes import ( + _calculate_adaptivepool_out_shape, + _calculate_avgpool_out_shape, + _calculate_maxpool_out_shape, + _calculate_upsample_out_shape, + calculate_conv_out_shape, + calculate_convtranspose_out_shape, + calculate_pool_out_shape, + calculate_unpool_out_shape, +) + +INPUT_1D = torch.randn(2, 1, 10) +INPUT_2D = torch.randn(2, 1, 32, 32) +INPUT_3D = torch.randn(2, 1, 20, 21, 22) + + +@pytest.mark.parametrize( + "input_tensor,kernel_size,stride,padding,dilation", + [ + (INPUT_3D, 7, 2, (1, 2, 3), 3), + (INPUT_2D, (5, 3), 1, 0, (2, 2)), + (INPUT_1D, 3, 1, 2, 1), + ], +) +def test_calculate_conv_out_shape(input_tensor, kernel_size, stride, padding, dilation): + in_shape = input_tensor.shape[2:] + dim = len(input_tensor.shape[2:]) + args = { + "in_channels": 1, + "out_channels": 1, + "kernel_size": kernel_size, + "stride": stride, + "padding": padding, + "dilation": dilation, + } + if dim == 1: + conv = torch.nn.Conv1d + elif dim == 2: + conv = torch.nn.Conv2d + else: + conv = torch.nn.Conv3d + + output_shape = conv(**args)(input_tensor).shape[2:] + assert ( + calculate_conv_out_shape(in_shape, kernel_size, stride, padding, dilation) + == output_shape + ) + + +@pytest.mark.parametrize( + "input_tensor,kernel_size,stride,padding,dilation,output_padding", + [ + (INPUT_3D, 7, 2, (1, 2, 3), 3, 0), + (INPUT_2D, (5, 3), 1, 0, (2, 2), (1, 0)), + (INPUT_1D, 3, 3, 2, 1, 2), + ], +) +def test_calculate_convtranspose_out_shape( + input_tensor, kernel_size, stride, padding, dilation, output_padding +): + in_shape = input_tensor.shape[2:] + dim = len(input_tensor.shape[2:]) + args = { + "in_channels": 1, + "out_channels": 1, + "kernel_size": kernel_size, + "stride": stride, + "padding": padding, + "dilation": dilation, + "output_padding": output_padding, + } + if dim == 1: + conv = torch.nn.ConvTranspose1d + elif dim == 2: + conv = torch.nn.ConvTranspose2d + else: + conv = torch.nn.ConvTranspose3d + + output_shape = conv(**args)(input_tensor).shape[2:] + assert ( + calculate_convtranspose_out_shape( + in_shape, kernel_size, stride, padding, output_padding, dilation + ) + == output_shape + ) + + +@pytest.mark.parametrize( + "input_tensor,kernel_size,stride,padding,dilation,ceil_mode", + [ + (INPUT_3D, 7, 2, (1, 2, 3), 3, False), + (INPUT_3D, 7, 2, (1, 2, 3), 3, True), + (INPUT_2D, (5, 3), 1, 0, (2, 2), False), + (INPUT_2D, (5, 3), 1, 0, (2, 2), True), + (INPUT_1D, 2, 1, 1, 1, False), + (INPUT_1D, 2, 1, 1, 1, True), + ], +) +def test_calculate_maxpool_out_shape( + input_tensor, kernel_size, stride, padding, dilation, ceil_mode +): + in_shape = input_tensor.shape[2:] + dim = len(input_tensor.shape[2:]) + args = { + "kernel_size": kernel_size, + "stride": stride, + "padding": padding, + "dilation": dilation, + "ceil_mode": ceil_mode, + } + if dim == 1: + max_pool = torch.nn.MaxPool1d + elif dim == 2: + max_pool = torch.nn.MaxPool2d + else: + max_pool = torch.nn.MaxPool3d + + output_shape = max_pool(**args)(input_tensor).shape[2:] + assert ( + _calculate_maxpool_out_shape( + in_shape, kernel_size, stride, padding, dilation, ceil_mode=ceil_mode + ) + == output_shape + ) + + +@pytest.mark.parametrize( + "input_tensor,kernel_size,stride,padding,ceil_mode", + [ + (INPUT_3D, 7, 2, (1, 2, 3), False), + (INPUT_3D, 7, 2, (1, 2, 3), True), + (INPUT_2D, (5, 3), 1, 0, False), + (INPUT_2D, (5, 3), 1, 0, True), + (INPUT_1D, 2, 1, 1, False), + (INPUT_1D, 2, 1, 1, True), + ( + INPUT_1D, + 2, + 3, + 1, + True, + ), # special case with ceil_mode (see: https://pytorch.org/docs/stable/generated/torch.nn.AvgPool1d.html) + ], +) +def test_calculate_avgpool_out_shape( + input_tensor, kernel_size, stride, padding, ceil_mode +): + in_shape = input_tensor.shape[2:] + dim = len(in_shape) + args = { + "kernel_size": kernel_size, + "stride": stride, + "padding": padding, + "ceil_mode": ceil_mode, + } + if dim == 1: + avg_pool = torch.nn.AvgPool1d + elif dim == 2: + avg_pool = torch.nn.AvgPool2d + else: + avg_pool = torch.nn.AvgPool3d + output_shape = avg_pool(**args)(input_tensor).shape[2:] + assert ( + _calculate_avgpool_out_shape( + in_shape, kernel_size, stride, padding, ceil_mode=ceil_mode + ) + == output_shape + ) + + +@pytest.mark.parametrize( + "input_tensor,kwargs", + [ + (INPUT_3D, {"output_size": 1}), + (INPUT_2D, {"output_size": (1, 2)}), + (INPUT_1D, {"output_size": 3}), + ], +) +def test_calculate_adaptivepool_out_shape(input_tensor, kwargs): + in_shape = input_tensor.shape[2:] + dim = len(in_shape) + if dim == 1: + avg_pool = torch.nn.AdaptiveAvgPool1d + max_pool = torch.nn.AdaptiveMaxPool1d + elif dim == 2: + avg_pool = torch.nn.AdaptiveAvgPool2d + max_pool = torch.nn.AdaptiveMaxPool2d + else: + avg_pool = torch.nn.AdaptiveAvgPool3d + max_pool = torch.nn.AdaptiveMaxPool3d + + output_shape = max_pool(**kwargs)(input_tensor).shape[2:] + assert _calculate_adaptivepool_out_shape(in_shape, **kwargs) == output_shape + + output_shape = avg_pool(**kwargs)(input_tensor).shape[2:] + assert _calculate_adaptivepool_out_shape(in_shape, **kwargs) == output_shape + + +def test_calculate_pool_out_shape(): + in_shape = INPUT_3D.shape[2:] + assert calculate_pool_out_shape( + pool_mode="max", + in_shape=in_shape, + kernel_size=7, + stride=2, + padding=(1, 2, 3), + dilation=3, + ceil_mode=True, + ) == (3, 4, 6) + assert calculate_pool_out_shape( + pool_mode="avg", + in_shape=in_shape, + kernel_size=7, + stride=2, + padding=(1, 2, 3), + ceil_mode=True, + ) == (9, 10, 12) + assert calculate_pool_out_shape( + pool_mode="adaptiveavg", + in_shape=in_shape, + output_size=(3, 4, 5), + ) == (3, 4, 5) + assert calculate_pool_out_shape( + pool_mode="adaptivemax", + in_shape=in_shape, + output_size=1, + ) == (1, 1, 1) + with pytest.raises(ValueError): + calculate_pool_out_shape( + pool_mode="abc", + in_shape=in_shape, + kernel_size=7, + stride=2, + padding=(1, 2, 3), + dilation=3, + ceil_mode=True, + ) + + +@pytest.mark.parametrize( + "input_tensor,kwargs", + [ + (INPUT_3D, {"scale_factor": 2}), + (INPUT_2D, {"size": (40, 41)}), + (INPUT_2D, {"size": 40}), + (INPUT_2D, {"scale_factor": (3, 2)}), + (INPUT_1D, {"scale_factor": 2}), + ], +) +def test_calculate_upsample_out_shape(input_tensor, kwargs): + in_shape = input_tensor.shape[2:] + unpool = torch.nn.Upsample(**kwargs) + + output_shape = unpool(input_tensor).shape[2:] + assert _calculate_upsample_out_shape(in_shape, **kwargs) == output_shape + + +def test_calculate_unpool_out_shape(): + in_shape = INPUT_3D.shape[2:] + assert calculate_unpool_out_shape( + unpool_mode="convtranspose", + in_shape=in_shape, + kernel_size=5, + stride=1, + padding=0, + output_padding=0, + dilation=1, + ) == (24, 25, 26) + assert calculate_unpool_out_shape( + unpool_mode="upsample", + in_shape=in_shape, + scale_factor=2, + ) == (40, 42, 44) + with pytest.raises(ValueError): + calculate_unpool_out_shape( + unpool_mode="abc", + in_shape=in_shape, + ) diff --git a/tests/unittests/monai_networks/test_factory.py b/tests/unittests/monai_networks/test_factory.py index 28e8113fe..961238111 100644 --- a/tests/unittests/monai_networks/test_factory.py +++ b/tests/unittests/monai_networks/test_factory.py @@ -1,10 +1,15 @@ import pytest -from monai.networks.nets import ResNet -from monai.networks.nets.resnet import ResNetBottleneck -from torch.nn import Conv2d -from clinicadl.monai_networks import get_network -from clinicadl.monai_networks.config import create_network_config +from clinicadl.monai_networks import ( + ImplementedNetworks, + get_network, + get_network_from_config, +) +from clinicadl.monai_networks.config.autoencoder import AutoEncoderConfig +from clinicadl.monai_networks.factory import _update_config_with_defaults +from clinicadl.monai_networks.nn import AutoEncoder + +tested = [] @pytest.mark.parametrize( @@ -13,124 +18,285 @@ ( "AutoEncoder", { - "spatial_dims": 3, - "in_channels": 1, - "out_channels": 1, - "channels": [2, 2], - "strides": [1, 1], + "in_shape": (1, 64, 65), + "latent_size": 1, + "conv_args": {"channels": [2, 4]}, }, ), ( - "VarAutoEncoder", + "VAE", { - "spatial_dims": 3, - "in_shape": (1, 16, 16, 16), - "out_channels": 1, - "latent_size": 16, - "channels": [2, 2], - "strides": [1, 1], + "in_shape": (1, 64, 65), + "latent_size": 1, + "conv_args": {"channels": [2, 4]}, }, ), ( - "Regressor", + "CNN", { - "in_shape": (1, 16, 16, 16), - "out_shape": (1, 16, 16, 16), - "channels": [2, 2], - "strides": [1, 1], + "in_shape": (1, 64, 65), + "num_outputs": 1, + "conv_args": {"channels": [2, 4]}, }, ), ( - "Classifier", + "Generator", { - "in_shape": (1, 16, 16, 16), - "classes": 2, - "channels": [2, 2], - "strides": [1, 1], + "latent_size": 1, + "start_shape": (1, 5, 5), + "conv_args": {"channels": [2, 4]}, }, ), ( - "Discriminator", - {"in_shape": (1, 16, 16, 16), "channels": [2, 2], "strides": [1, 1]}, + "ConvDecoder", + { + "spatial_dims": 2, + "in_channels": 1, + "channels": [2, 4], + }, ), ( - "Critic", - {"in_shape": (1, 16, 16, 16), "channels": [2, 2], "strides": [1, 1]}, + "ConvEncoder", + { + "spatial_dims": 2, + "in_channels": 1, + "channels": [2, 4], + }, ), - ("DenseNet", {"spatial_dims": 3, "in_channels": 1, "out_channels": 1}), ( - "FullyConnectedNet", - {"in_channels": 3, "out_channels": 1, "hidden_channels": [2, 3]}, + "MLP", + { + "in_channels": 1, + "out_channels": 2, + "hidden_channels": [2, 4], + }, ), ( - "VarFullyConnectedNet", + "AttentionUNet", { + "spatial_dims": 2, "in_channels": 1, - "out_channels": 1, - "latent_size": 16, - "encode_channels": [2, 2], - "decode_channels": [2, 2], + "out_channels": 2, }, ), ( - "Generator", + "UNet", { - "latent_shape": (3,), - "start_shape": (1, 16, 16, 16), - "channels": [2, 2], - "strides": [1, 1], + "spatial_dims": 2, + "in_channels": 1, + "out_channels": 2, }, ), ( "ResNet", { - "block": "bottleneck", - "layers": (4, 4, 4, 4), - "block_inplanes": (5, 5, 5, 5), "spatial_dims": 2, + "in_channels": 1, + "num_outputs": 1, }, ), - ("ResNetFeatures", {"model_name": "resnet10"}), - ("SegResNet", {}), ( - "UNet", + "DenseNet", { - "spatial_dims": 3, + "spatial_dims": 2, "in_channels": 1, - "out_channels": 1, - "channels": [2, 2, 2], - "strides": [1, 1], + "num_outputs": 1, }, ), ( - "AttentionUnet", + "SEResNet", { - "spatial_dims": 3, + "spatial_dims": 2, "in_channels": 1, - "out_channels": 1, - "channels": [2, 2, 2], - "strides": [1, 1], + "num_outputs": 1, + }, + ), + ( + "ViT", + { + "in_shape": (1, 64, 65), + "patch_size": (4, 5), + "num_outputs": 1, + }, + ), + ( + "ResNet-18", + { + "num_outputs": 1, + }, + ), + ( + "ResNet-34", + { + "num_outputs": 1, + }, + ), + ( + "ResNet-50", + { + "num_outputs": 1, + }, + ), + ( + "ResNet-101", + { + "num_outputs": 1, + }, + ), + ( + "ResNet-152", + { + "num_outputs": 1, + "pretrained": True, + }, + ), + ( + "DenseNet-121", + { + "num_outputs": 1, + }, + ), + ( + "DenseNet-161", + { + "num_outputs": 1, + }, + ), + ( + "DenseNet-169", + { + "num_outputs": 1, + }, + ), + ( + "DenseNet-201", + { + "num_outputs": 1, + "pretrained": True, + }, + ), + ( + "SEResNet-50", + { + "num_outputs": 1, + }, + ), + ( + "SEResNet-101", + { + "num_outputs": 1, + }, + ), + ( + "SEResNet-152", + { + "num_outputs": 1, + }, + ), + ( + "ViT-B/16", + { + "num_outputs": 1, + "pretrained": True, + }, + ), + ( + "ViT-B/32", + { + "num_outputs": 1, + }, + ), + ( + "ViT-L/16", + { + "num_outputs": 1, + }, + ), + ( + "ViT-L/32", + { + "num_outputs": 1, }, ), - ("ViT", {"in_channels": 3, "img_size": 16, "patch_size": 4}), - ("ViTAutoEnc", {"in_channels": 3, "img_size": 16, "patch_size": 4}), ], ) def test_get_network(network_name, params): - config = create_network_config(network_name)(**params) - network, updated_config = get_network(config) + tested.append(network_name) + _ = get_network(name=network_name, **params) + if network_name == "ViT-L/32": # the last one + assert set(tested) == set( + net.value for net in ImplementedNetworks + ) # check we haven't miss a network + + +def test_update_config_with_defaults(): + config = AutoEncoderConfig( + latent_size=1, + in_shape=(1, 10, 10), + conv_args={"channels": [1, 2], "dropout": 0.2}, + mlp_args={"hidden_channels": [5], "act": "relu"}, + ) + _update_config_with_defaults(config, AutoEncoder.__init__) + assert config.in_shape == (1, 10, 10) + assert config.latent_size == 1 + assert config.conv_args.channels == [1, 2] + assert config.conv_args.dropout == 0.2 + assert config.conv_args.act == "prelu" + assert config.mlp_args.hidden_channels == [5] + assert config.mlp_args.act == "relu" + assert config.mlp_args.norm == "batch" + assert config.out_channels is None + + +def test_parameters(): + net, updated_config = get_network( + "AutoEncoder", + return_config=True, + latent_size=1, + in_shape=(1, 10, 10), + conv_args={"channels": [1, 2], "dropout": 0.2}, + mlp_args={"hidden_channels": [5], "act": "relu"}, + ) + assert isinstance(net, AutoEncoder) + assert net.encoder.mlp.out_channels == 1 + assert net.encoder.mlp.hidden_channels == [5] + assert net.encoder.mlp.act == "relu" + assert net.encoder.mlp.norm == "batch" + assert net.in_shape == (1, 10, 10) + assert net.encoder.convolutions.channels == (1, 2) + assert net.encoder.convolutions.dropout == 0.2 + assert net.encoder.convolutions.act == "prelu" + + assert updated_config.in_shape == (1, 10, 10) + assert updated_config.latent_size == 1 + assert updated_config.conv_args.channels == [1, 2] + assert updated_config.conv_args.dropout == 0.2 + assert updated_config.conv_args.act == "prelu" + assert updated_config.mlp_args.hidden_channels == [5] + assert updated_config.mlp_args.act == "relu" + assert updated_config.mlp_args.norm == "batch" + assert updated_config.out_channels is None + + +def test_without_return(): + net = get_network( + "AutoEncoder", + return_config=False, + latent_size=1, + in_shape=(1, 10, 10), + conv_args={"channels": [1, 2]}, + ) + assert isinstance(net, AutoEncoder) - if network_name == "ResNet": - assert isinstance(network, ResNet) - assert isinstance(network.layer1[0], ResNetBottleneck) - assert len(network.layer1) == 4 - assert network.layer1[0].conv1.in_channels == 5 - assert isinstance(network.layer1[0].conv1, Conv2d) - assert updated_config.network == "ResNet" - assert updated_config.block == "bottleneck" - assert updated_config.layers == (4, 4, 4, 4) - assert updated_config.block_inplanes == (5, 5, 5, 5) - assert updated_config.spatial_dims == 2 - assert updated_config.conv1_t_size == 7 - assert updated_config.act == ("relu", {"inplace": True}) +def test_get_network_from_config(): + config = AutoEncoderConfig( + latent_size=1, + in_shape=(1, 10, 10), + conv_args={"channels": [1, 2], "dropout": 0.2}, + mlp_args={"hidden_channels": [5], "act": "relu"}, + ) + net, updated_config = get_network_from_config(config) + assert isinstance(net, AutoEncoder) + assert updated_config.conv_args.act == "prelu" + assert config.conv_args.act == "DefaultFromLibrary" From 1ae722754de8d5d66a7f9e7d8096178bf4917512 Mon Sep 17 00:00:00 2001 From: camillebrianceau <57992134+camillebrianceau@users.noreply.github.com> Date: Fri, 18 Oct 2024 13:26:19 +0200 Subject: [PATCH 08/16] Cb extract validator (#666) --- clinicadl/API_test.py | 14 +- clinicadl/caps_dataset/data_config.py | 18 +- clinicadl/commandline/modules_options/ssda.py | 45 - .../commandline/modules_options/validation.py | 2 +- .../pipelines/generate/trivial/cli.py | 1 - .../commandline/pipelines/interpret/cli.py | 12 +- .../pipelines/interpret/options.py | 14 +- .../commandline/pipelines/predict/cli.py | 6 +- .../commandline/pipelines/predict/options.py | 4 +- .../pipelines/train/classification/cli.py | 8 +- .../pipelines/train/from_json/cli.py | 4 +- .../pipelines/train/reconstruction/cli.py | 7 - .../pipelines/train/regression/cli.py | 7 - .../commandline/pipelines/train/resume/cli.py | 2 +- clinicadl/config/config/ssda.py | 41 - clinicadl/interpret/config.py | 84 +- clinicadl/maps_manager/maps_manager.py | 2 +- clinicadl/nn/networks/__init__.py | 1 - clinicadl/nn/networks/cnn.py | 2 - clinicadl/nn/utils.py | 1 - clinicadl/predict/config.py | 40 - clinicadl/{predict => predictor}/__init__.py | 0 clinicadl/predictor/config.py | 105 ++ .../predictor.py} | 981 ++++++++++-------- clinicadl/{predict => predictor}/utils.py | 0 .../{splitter => predictor}/validation.py | 0 .../random_search/random_search_utils.py | 1 - clinicadl/resources/config/train_config.toml | 1 - clinicadl/splitter/config.py | 2 +- clinicadl/splitter/splitter.py | 36 +- clinicadl/{maps_manager => }/tmp_config.py | 21 +- clinicadl/trainer/config/classification.py | 2 +- clinicadl/trainer/config/reconstruction.py | 2 +- clinicadl/trainer/config/regression.py | 2 +- clinicadl/trainer/config/train.py | 6 +- clinicadl/trainer/tasks_utils.py | 3 +- clinicadl/trainer/trainer.py | 791 ++------------ clinicadl/utils/cli_param/option.py | 7 - clinicadl/utils/iotools/train_utils.py | 62 ++ clinicadl/utils/iotools/trainer_utils.py | 3 +- clinicadl/validator/config.py | 47 - clinicadl/validator/validator.py | 496 --------- tests/test_interpret.py | 23 +- tests/test_predict.py | 8 +- tests/test_train_ae.py | 5 + tests/test_train_cnn.py | 6 + tests/test_train_from_json.py | 1 + tests/test_transfer_learning.py | 1 + tests/testing_tools.py | 8 + tests/unittests/nn/networks/test_ssda.py | 11 - tests/unittests/train/test_utils.py | 3 - .../train/trainer/test_training_config.py | 28 +- 52 files changed, 997 insertions(+), 1980 deletions(-) delete mode 100644 clinicadl/commandline/modules_options/ssda.py delete mode 100644 clinicadl/config/config/ssda.py delete mode 100644 clinicadl/predict/config.py rename clinicadl/{predict => predictor}/__init__.py (100%) create mode 100644 clinicadl/predictor/config.py rename clinicadl/{predict/predict_manager.py => predictor/predictor.py} (54%) rename clinicadl/{predict => predictor}/utils.py (100%) rename clinicadl/{splitter => predictor}/validation.py (100%) rename clinicadl/{maps_manager => }/tmp_config.py (97%) delete mode 100644 clinicadl/validator/config.py delete mode 100644 clinicadl/validator/validator.py delete mode 100644 tests/unittests/nn/networks/test_ssda.py diff --git a/clinicadl/API_test.py b/clinicadl/API_test.py index 0581b879a..d144c1597 100644 --- a/clinicadl/API_test.py +++ b/clinicadl/API_test.py @@ -2,7 +2,11 @@ from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig from clinicadl.caps_dataset.data import return_dataset +from clinicadl.predictor.config import PredictConfig +from clinicadl.predictor.predictor import Predictor from clinicadl.prepare_data.prepare_data import DeepLearningPrepareData +from clinicadl.splitter.config import SplitterConfig +from clinicadl.splitter.splitter import Splitter from clinicadl.trainer.config.classification import ClassificationConfig from clinicadl.trainer.trainer import Trainer from clinicadl.utils.enum import ExtractionMethod, Preprocessing, Task @@ -27,11 +31,11 @@ multi_cohort, ) -split_config = SplitConfig() +split_config = SplitterConfig() splitter = Splitter(split_config) -validator_config = ValidatorConfig() -validator = Validator(validator_config) +validator_config = PredictConfig() +validator = Predictor(validator_config) train_config = ClassificationConfig() trainer = Trainer(train_config, validator) @@ -78,6 +82,6 @@ test_loader = trainer.get_dataloader(dataset, split, network, "test", config) validator.predict(test_loader) -interpret_config = InterpretConfig(**kwargs) -predict_manager = PredictManager(interpret_config) +interpret_config = PredictConfig(**kwargs) +predict_manager = Predictor(interpret_config) predict_manager.interpret() diff --git a/clinicadl/caps_dataset/data_config.py b/clinicadl/caps_dataset/data_config.py index 35aed91b5..80694fcd0 100644 --- a/clinicadl/caps_dataset/data_config.py +++ b/clinicadl/caps_dataset/data_config.py @@ -24,7 +24,7 @@ class DataConfig(BaseModel): # TODO : put in data module that must be passed by the user. """ - caps_directory: Path + caps_directory: Optional[Path] = None baseline: bool = False diagnoses: Tuple[str, ...] = ("AD", "CN") data_df: Optional[pd.DataFrame] = None @@ -147,15 +147,17 @@ def preprocessing_dict(self) -> Dict[str, Any]: f"in {caps_dict}." ) - preprocessing_dict = read_preprocessing(preprocessing_json) + preprocessing_dict = read_preprocessing(preprocessing_json) - if ( - preprocessing_dict["mode"] == "roi" - and "roi_background_value" not in preprocessing_dict - ): - preprocessing_dict["roi_background_value"] = 0 + if ( + preprocessing_dict["mode"] == "roi" + and "roi_background_value" not in preprocessing_dict + ): + preprocessing_dict["roi_background_value"] = 0 - return preprocessing_dict + return preprocessing_dict + else: + return None @computed_field @property diff --git a/clinicadl/commandline/modules_options/ssda.py b/clinicadl/commandline/modules_options/ssda.py deleted file mode 100644 index 8119726ef..000000000 --- a/clinicadl/commandline/modules_options/ssda.py +++ /dev/null @@ -1,45 +0,0 @@ -import click - -from clinicadl.config.config.ssda import SSDAConfig -from clinicadl.config.config_utils import get_default_from_config_class as get_default -from clinicadl.config.config_utils import get_type_from_config_class as get_type - -# SSDA -caps_target = click.option( - "--caps_target", - "-d", - type=get_type("caps_target", SSDAConfig), - default=get_default("caps_target", SSDAConfig), - help="CAPS of target data.", - show_default=True, -) -preprocessing_json_target = click.option( - "--preprocessing_json_target", - "-d", - type=get_type("preprocessing_json_target", SSDAConfig), - default=get_default("preprocessing_json_target", SSDAConfig), - help="Path to json target.", - show_default=True, -) -ssda_network = click.option( - "--ssda_network/--single_network", - default=get_default("ssda_network", SSDAConfig), - help="If provided uses a ssda-network framework.", - show_default=True, -) -tsv_target_lab = click.option( - "--tsv_target_lab", - "-d", - type=get_type("tsv_target_lab", SSDAConfig), - default=get_default("tsv_target_lab", SSDAConfig), - help="TSV of labeled target data.", - show_default=True, -) -tsv_target_unlab = click.option( - "--tsv_target_unlab", - "-d", - type=get_type("tsv_target_unlab", SSDAConfig), - default=get_default("tsv_target_unlab", SSDAConfig), - help="TSV of unllabeled target data.", - show_default=True, -) diff --git a/clinicadl/commandline/modules_options/validation.py b/clinicadl/commandline/modules_options/validation.py index 858dd956e..089357866 100644 --- a/clinicadl/commandline/modules_options/validation.py +++ b/clinicadl/commandline/modules_options/validation.py @@ -2,7 +2,7 @@ from clinicadl.config.config_utils import get_default_from_config_class as get_default from clinicadl.config.config_utils import get_type_from_config_class as get_type -from clinicadl.splitter.validation import ValidationConfig +from clinicadl.predictor.validation import ValidationConfig # Validation valid_longitudinal = click.option( diff --git a/clinicadl/commandline/pipelines/generate/trivial/cli.py b/clinicadl/commandline/pipelines/generate/trivial/cli.py index b48651811..4798dc904 100644 --- a/clinicadl/commandline/pipelines/generate/trivial/cli.py +++ b/clinicadl/commandline/pipelines/generate/trivial/cli.py @@ -118,7 +118,6 @@ def create_trivial_image(subject_id: int) -> pd.DataFrame: if caps_config.data.mask_path is None: caps_config.data.mask_path = get_mask_path() path_to_mask = caps_config.data.mask_path / f"mask-{label + 1}.nii" - print(path_to_mask) if path_to_mask.is_file(): atlas_to_mask = nib.loadsave.load(path_to_mask).get_fdata() else: diff --git a/clinicadl/commandline/pipelines/interpret/cli.py b/clinicadl/commandline/pipelines/interpret/cli.py index 3509eaf23..db92f06e2 100644 --- a/clinicadl/commandline/pipelines/interpret/cli.py +++ b/clinicadl/commandline/pipelines/interpret/cli.py @@ -1,3 +1,5 @@ +from pathlib import Path + import click from clinicadl.commandline import arguments @@ -10,7 +12,7 @@ ) from clinicadl.commandline.pipelines.interpret import options from clinicadl.interpret.config import InterpretConfig -from clinicadl.predict.predict_manager import PredictManager +from clinicadl.predictor.predictor import Predictor @click.command("interpret", no_args_is_help=True) @@ -40,9 +42,13 @@ def cli(**kwargs): NAME is the name of the saliency map task. METHOD is the method used to extract an attribution map. """ + from clinicadl.utils.iotools.train_utils import merge_cli_and_maps_json_options - interpret_config = InterpretConfig(**kwargs) - predict_manager = PredictManager(interpret_config) + dict_ = merge_cli_and_maps_json_options( + Path(kwargs["input_maps"]) / "maps.json", **kwargs + ) + interpret_config = InterpretConfig(**dict_) + predict_manager = Predictor(interpret_config) predict_manager.interpret() diff --git a/clinicadl/commandline/pipelines/interpret/options.py b/clinicadl/commandline/pipelines/interpret/options.py index 5313b4a90..43cada4c4 100644 --- a/clinicadl/commandline/pipelines/interpret/options.py +++ b/clinicadl/commandline/pipelines/interpret/options.py @@ -2,28 +2,28 @@ from clinicadl.config.config_utils import get_default_from_config_class as get_default from clinicadl.config.config_utils import get_type_from_config_class as get_type -from clinicadl.interpret.config import InterpretConfig +from clinicadl.interpret.config import InterpretBaseConfig # interpret specific name = click.argument( "name", - type=get_type("name", InterpretConfig), + type=get_type("name", InterpretBaseConfig), ) method = click.argument( "method", - type=get_type("method", InterpretConfig), # ["gradients", "grad-cam"] + type=get_type("method", InterpretBaseConfig), # ["gradients", "grad-cam"] ) level = click.option( "--level_grad_cam", - type=get_type("level", InterpretConfig), - default=get_default("level", InterpretConfig), + type=get_type("level", InterpretBaseConfig), + default=get_default("level", InterpretBaseConfig), help="level of the feature map (after the layer corresponding to the number) chosen for grad-cam.", show_default=True, ) target_node = click.option( "--target_node", - type=get_type("target_node", InterpretConfig), - default=get_default("target_node", InterpretConfig), + type=get_type("target_node", InterpretBaseConfig), + default=get_default("target_node", InterpretBaseConfig), help="Which target node the gradients explain. Default takes the first output node.", show_default=True, ) diff --git a/clinicadl/commandline/pipelines/predict/cli.py b/clinicadl/commandline/pipelines/predict/cli.py index fa7303008..184f46ad7 100644 --- a/clinicadl/commandline/pipelines/predict/cli.py +++ b/clinicadl/commandline/pipelines/predict/cli.py @@ -10,8 +10,8 @@ validation, ) from clinicadl.commandline.pipelines.predict import options -from clinicadl.predict.config import PredictConfig -from clinicadl.predict.predict_manager import PredictManager +from clinicadl.predictor.config import PredictConfig +from clinicadl.predictor.predictor import Predictor @click.command(name="predict", no_args_is_help=True) @@ -61,7 +61,7 @@ def cli(input_maps_directory, data_group, **kwargs): """ predict_config = PredictConfig(**kwargs) - predict_manager = PredictManager(predict_config) + predict_manager = Predictor(predict_config) predict_manager.predict() diff --git a/clinicadl/commandline/pipelines/predict/options.py b/clinicadl/commandline/pipelines/predict/options.py index 003dfe275..cbb8980ca 100644 --- a/clinicadl/commandline/pipelines/predict/options.py +++ b/clinicadl/commandline/pipelines/predict/options.py @@ -1,13 +1,11 @@ import click from clinicadl.config.config_utils import get_default_from_config_class as get_default -from clinicadl.predict.config import PredictConfig +from clinicadl.predictor.config import PredictConfig # predict specific use_labels = click.option( "--use_labels/--no_labels", - show_default=True, - default=get_default("use_labels", PredictConfig), help="Set this option to --no_labels if your dataset does not contain ground truth labels.", ) save_tensor = click.option( diff --git a/clinicadl/commandline/pipelines/train/classification/cli.py b/clinicadl/commandline/pipelines/train/classification/cli.py index 539f6cd42..8ac287402 100644 --- a/clinicadl/commandline/pipelines/train/classification/cli.py +++ b/clinicadl/commandline/pipelines/train/classification/cli.py @@ -13,7 +13,6 @@ optimizer, reproducibility, split, - ssda, transforms, validation, ) @@ -63,12 +62,6 @@ @dataloader.batch_size @dataloader.sampler @dataloader.n_proc -# ssda option -@ssda.ssda_network -@ssda.caps_target -@ssda.tsv_target_lab -@ssda.tsv_target_unlab -@ssda.preprocessing_json_target # Cross validation @split.n_splits @split.split @@ -115,4 +108,5 @@ def cli(**kwargs): options = merge_cli_and_config_file_options(Task.CLASSIFICATION, **kwargs) config = ClassificationConfig(**options) trainer = Trainer(config) + trainer.train(split_list=config.split.split, overwrite=True) diff --git a/clinicadl/commandline/pipelines/train/from_json/cli.py b/clinicadl/commandline/pipelines/train/from_json/cli.py index c0130a9b9..517ec8fa5 100644 --- a/clinicadl/commandline/pipelines/train/from_json/cli.py +++ b/clinicadl/commandline/pipelines/train/from_json/cli.py @@ -27,6 +27,8 @@ def cli(**kwargs): logger.info(f"Reading JSON file at path {kwargs['config_file']}...") trainer = Trainer.from_json( - config_file=kwargs["config_file"], maps_path=kwargs["output_maps_directory"] + config_file=kwargs["config_file"], + maps_path=kwargs["output_maps_directory"], + split=kwargs["split"], ) trainer.train(split_list=kwargs["split"], overwrite=True) diff --git a/clinicadl/commandline/pipelines/train/reconstruction/cli.py b/clinicadl/commandline/pipelines/train/reconstruction/cli.py index d63bf63f8..fc39ef54e 100644 --- a/clinicadl/commandline/pipelines/train/reconstruction/cli.py +++ b/clinicadl/commandline/pipelines/train/reconstruction/cli.py @@ -13,7 +13,6 @@ optimizer, reproducibility, split, - ssda, transforms, validation, ) @@ -63,12 +62,6 @@ @dataloader.batch_size @dataloader.sampler @dataloader.n_proc -# ssda option -@ssda.ssda_network -@ssda.caps_target -@ssda.tsv_target_lab -@ssda.tsv_target_unlab -@ssda.preprocessing_json_target # Cross validation @split.n_splits @split.split diff --git a/clinicadl/commandline/pipelines/train/regression/cli.py b/clinicadl/commandline/pipelines/train/regression/cli.py index ff6dd68ca..59e816192 100644 --- a/clinicadl/commandline/pipelines/train/regression/cli.py +++ b/clinicadl/commandline/pipelines/train/regression/cli.py @@ -13,7 +13,6 @@ optimizer, reproducibility, split, - ssda, transforms, validation, ) @@ -61,12 +60,6 @@ @dataloader.batch_size @dataloader.sampler @dataloader.n_proc -# ssda o -@ssda.ssda_network -@ssda.caps_target -@ssda.tsv_target_lab -@ssda.tsv_target_unlab -@ssda.preprocessing_json_target # Cross validation @split.n_splits @split.split diff --git a/clinicadl/commandline/pipelines/train/resume/cli.py b/clinicadl/commandline/pipelines/train/resume/cli.py index 1fc34a0f4..12451d18a 100644 --- a/clinicadl/commandline/pipelines/train/resume/cli.py +++ b/clinicadl/commandline/pipelines/train/resume/cli.py @@ -16,4 +16,4 @@ def cli(input_maps_directory, split): INPUT_MAPS_DIRECTORY is the path to the MAPS folder where training job has started. """ trainer = Trainer.from_maps(input_maps_directory) - trainer.resume(split) + trainer.resume() diff --git a/clinicadl/config/config/ssda.py b/clinicadl/config/config/ssda.py deleted file mode 100644 index caf52634d..000000000 --- a/clinicadl/config/config/ssda.py +++ /dev/null @@ -1,41 +0,0 @@ -from logging import getLogger -from pathlib import Path -from typing import Any, Dict - -from pydantic import BaseModel, ConfigDict, computed_field - -from clinicadl.utils.iotools.utils import read_preprocessing - -logger = getLogger("clinicadl.ssda_config") - - -class SSDAConfig(BaseModel): - """Config class to perform SSDA.""" - - caps_target: Path = Path("") - preprocessing_json_target: Path = Path("") - ssda_network: bool = False - tsv_target_lab: Path = Path("") - tsv_target_unlab: Path = Path("") - # pydantic config - model_config = ConfigDict(validate_assignment=True) - - @computed_field - @property - def preprocessing_dict_target(self) -> Dict[str, Any]: - """ - Gets the preprocessing dictionary from a target preprocessing json file. - - Returns - ------- - Dict[str, Any] - The preprocessing dictionary. - """ - if not self.ssda_network: - return {} - - preprocessing_json_target = ( - self.caps_target / "tensor_extraction" / self.preprocessing_json_target - ) - - return read_preprocessing(preprocessing_json_target) diff --git a/clinicadl/interpret/config.py b/clinicadl/interpret/config.py index 41c8dcea9..03036d4c9 100644 --- a/clinicadl/interpret/config.py +++ b/clinicadl/interpret/config.py @@ -1,23 +1,33 @@ from logging import getLogger from pathlib import Path -from typing import Optional +from typing import Any, Dict, Optional from pydantic import BaseModel, field_validator -from clinicadl.caps_dataset.data_config import DataConfig as DataBaseConfig +from clinicadl.caps_dataset.data_config import DataConfig from clinicadl.caps_dataset.dataloader_config import DataLoaderConfig from clinicadl.interpret.gradients import GradCam, Gradients, VanillaBackProp -from clinicadl.maps_manager.config import MapsManagerConfig +from clinicadl.maps_manager.config import MapsManagerConfig as MapsManagerConfigBase +from clinicadl.maps_manager.maps_manager import MapsManager +from clinicadl.predictor.validation import ValidationConfig from clinicadl.splitter.config import SplitConfig -from clinicadl.splitter.validation import ValidationConfig +from clinicadl.transforms.config import TransformsConfig from clinicadl.utils.computational.computational import ComputationalConfig from clinicadl.utils.enum import InterpretationMethod +from clinicadl.utils.exceptions import ClinicaDLArgumentError logger = getLogger("clinicadl.interpret_config") -class DataConfig(DataBaseConfig): - caps_directory: Optional[Path] = None +class MapsManagerConfig(MapsManagerConfigBase): + save_tensor: bool = False + + def check_output_saving_tensor(self, network_task: str) -> None: + # Check if task is reconstruction for "save_tensor" and "save_nifti" + if self.save_tensor and network_task != "reconstruction": + raise ClinicaDLArgumentError( + "Cannot save tensors if the network task is not reconstruction. Please remove --save_tensor option." + ) class InterpretBaseConfig(BaseModel): @@ -44,13 +54,57 @@ def get_method(self) -> Gradients: raise ValueError(f"The method {self.method.value} is not implemented") -class InterpretConfig( - MapsManagerConfig, - InterpretBaseConfig, - DataConfig, - ValidationConfig, - ComputationalConfig, - DataLoaderConfig, - SplitConfig, -): +class InterpretConfig(BaseModel): """Config class to perform Transfer Learning.""" + + maps_manager: MapsManagerConfig + data: DataConfig + validation: ValidationConfig + computational: ComputationalConfig + dataloader: DataLoaderConfig + split: SplitConfig + interpret: InterpretBaseConfig + + def __init__(self, **kwargs): + super().__init__( + maps_manager=kwargs, + computational=kwargs, + dataloader=kwargs, + data=kwargs, + split=kwargs, + validation=kwargs, + interpret=kwargs, + ) + + def _update(self, config_dict: Dict[str, Any]) -> None: + """Updates the configs with a dict given by the user.""" + self.data.__dict__.update(config_dict) + self.split.__dict__.update(config_dict) + self.validation.__dict__.update(config_dict) + self.maps_manager.__dict__.update(config_dict) + self.split.__dict__.update(config_dict) + self.computational.__dict__.update(config_dict) + self.dataloader.__dict__.update(config_dict) + self.interpret.__dict__.update(config_dict) + + def adapt_with_maps_manager_info(self, maps_manager: MapsManager): + self.maps_manager.check_output_saving_nifti(maps_manager.network_task) + self.data.diagnoses = ( + maps_manager.diagnoses + if self.data.diagnoses is None or len(self.data.diagnoses) == 0 + else self.data.diagnoses + ) + + self.dataloader.batch_size = ( + maps_manager.batch_size + if not self.dataloader.batch_size + else self.dataloader.batch_size + ) + self.dataloader.n_proc = ( + maps_manager.n_proc + if not self.dataloader.n_proc + else self.dataloader.n_proc + ) + + self.split.adapt_cross_val_with_maps_manager_info(maps_manager) + self.maps_manager.check_output_saving_tensor(maps_manager.network_task) diff --git a/clinicadl/maps_manager/maps_manager.py b/clinicadl/maps_manager/maps_manager.py index 76cb544fe..10550a021 100644 --- a/clinicadl/maps_manager/maps_manager.py +++ b/clinicadl/maps_manager/maps_manager.py @@ -17,7 +17,7 @@ from clinicadl.metrics.utils import ( check_selection_metric, ) -from clinicadl.predict.utils import get_prediction +from clinicadl.predictor.utils import get_prediction from clinicadl.splitter.config import SplitterConfig from clinicadl.splitter.splitter import Splitter from clinicadl.trainer.tasks_utils import ( diff --git a/clinicadl/nn/networks/__init__.py b/clinicadl/nn/networks/__init__.py index c77097e60..3b88830fb 100644 --- a/clinicadl/nn/networks/__init__.py +++ b/clinicadl/nn/networks/__init__.py @@ -8,7 +8,6 @@ resnet18, ) from .random import RandomArchitecture -from .ssda import Conv5_FC3_SSDA from .unet import UNet from .vae import ( CVAE_3D, diff --git a/clinicadl/nn/networks/cnn.py b/clinicadl/nn/networks/cnn.py index eb2104b1e..5fe596bcb 100644 --- a/clinicadl/nn/networks/cnn.py +++ b/clinicadl/nn/networks/cnn.py @@ -63,8 +63,6 @@ def __init__(self, convolution_layers: nn.Module, fc_layers: nn.Module) -> None: def forward(self, x): inter = self.convolutions(x) - print(self.convolutions) - print(inter.shape) return self.fc(inter) diff --git a/clinicadl/nn/utils.py b/clinicadl/nn/utils.py index dc3afd71c..263afc407 100644 --- a/clinicadl/nn/utils.py +++ b/clinicadl/nn/utils.py @@ -64,7 +64,6 @@ def compute_output_size( input_ = torch.randn(input_size).unsqueeze(0) if isinstance(layer, nn.MaxUnpool3d) or isinstance(layer, nn.MaxUnpool2d): indices = torch.zeros_like(input_, dtype=int) - print(indices) output = layer(input_, indices) else: output = layer(input_) diff --git a/clinicadl/predict/config.py b/clinicadl/predict/config.py deleted file mode 100644 index a96b4b104..000000000 --- a/clinicadl/predict/config.py +++ /dev/null @@ -1,40 +0,0 @@ -from logging import getLogger - -from clinicadl.caps_dataset.data_config import DataConfig as DataBaseConfig -from clinicadl.caps_dataset.dataloader_config import DataLoaderConfig -from clinicadl.maps_manager.config import ( - MapsManagerConfig as MapsManagerBaseConfig, -) -from clinicadl.splitter.config import SplitConfig -from clinicadl.splitter.validation import ValidationConfig -from clinicadl.utils.computational.computational import ComputationalConfig -from clinicadl.utils.exceptions import ClinicaDLArgumentError # type: ignore - -logger = getLogger("clinicadl.predict_config") - - -class MapsManagerConfig(MapsManagerBaseConfig): - save_tensor: bool = False - save_latent_tensor: bool = False - - def check_output_saving_tensor(self, network_task: str) -> None: - # Check if task is reconstruction for "save_tensor" and "save_nifti" - if self.save_tensor and network_task != "reconstruction": - raise ClinicaDLArgumentError( - "Cannot save tensors if the network task is not reconstruction. Please remove --save_tensor option." - ) - - -class DataConfig(DataBaseConfig): - use_labels: bool = True - - -class PredictConfig( - MapsManagerConfig, - DataConfig, - ValidationConfig, - ComputationalConfig, - DataLoaderConfig, - SplitConfig, -): - """Config class to perform Transfer Learning.""" diff --git a/clinicadl/predict/__init__.py b/clinicadl/predictor/__init__.py similarity index 100% rename from clinicadl/predict/__init__.py rename to clinicadl/predictor/__init__.py diff --git a/clinicadl/predictor/config.py b/clinicadl/predictor/config.py new file mode 100644 index 000000000..ead42d1c6 --- /dev/null +++ b/clinicadl/predictor/config.py @@ -0,0 +1,105 @@ +from logging import getLogger +from typing import Any, Dict + +from pydantic import BaseModel, ConfigDict, computed_field + +from clinicadl.caps_dataset.data_config import DataConfig as DataBaseConfig +from clinicadl.caps_dataset.dataloader_config import DataLoaderConfig +from clinicadl.maps_manager.config import ( + MapsManagerConfig as MapsManagerBaseConfig, +) +from clinicadl.maps_manager.maps_manager import MapsManager +from clinicadl.predictor.validation import ValidationConfig +from clinicadl.splitter.config import SplitConfig +from clinicadl.transforms.config import TransformsConfig +from clinicadl.utils.computational.computational import ComputationalConfig +from clinicadl.utils.enum import Task +from clinicadl.utils.exceptions import ClinicaDLArgumentError # type: ignore + +logger = getLogger("clinicadl.predict_config") + + +class MapsManagerConfig(MapsManagerBaseConfig): + save_tensor: bool = False + save_latent_tensor: bool = False + + def check_output_saving_tensor(self, network_task: str) -> None: + # Check if task is reconstruction for "save_tensor" and "save_nifti" + if self.save_tensor and network_task != "reconstruction": + raise ClinicaDLArgumentError( + "Cannot save tensors if the network task is not reconstruction. Please remove --save_tensor option." + ) + + +class DataConfig(DataBaseConfig): + use_labels: bool = True + + +class PredictConfig(BaseModel): + """Config class to perform Transfer Learning.""" + + maps_manager: MapsManagerConfig + data: DataConfig + validation: ValidationConfig + computational: ComputationalConfig + dataloader: DataLoaderConfig + split: SplitConfig + transforms: TransformsConfig + + model_config = ConfigDict(validate_assignment=True) + + def __init__(self, **kwargs): + super().__init__( + maps_manager=kwargs, + computational=kwargs, + dataloader=kwargs, + data=kwargs, + split=kwargs, + validation=kwargs, + transforms=kwargs, + ) + + def _update(self, config_dict: Dict[str, Any]) -> None: + """Updates the configs with a dict given by the user.""" + self.data.__dict__.update(config_dict) + self.split.__dict__.update(config_dict) + self.validation.__dict__.update(config_dict) + self.maps_manager.__dict__.update(config_dict) + self.split.__dict__.update(config_dict) + self.computational.__dict__.update(config_dict) + self.dataloader.__dict__.update(config_dict) + self.transforms.__dict__.update(config_dict) + + def adapt_with_maps_manager_info(self, maps_manager: MapsManager): + self.maps_manager.check_output_saving_nifti(maps_manager.network_task) + self.data.diagnoses = ( + maps_manager.diagnoses + if self.data.diagnoses is None or len(self.data.diagnoses) == 0 + else self.data.diagnoses + ) + + self.dataloader.batch_size = ( + maps_manager.batch_size + if not self.dataloader.batch_size + else self.dataloader.batch_size + ) + self.dataloader.n_proc = ( + maps_manager.n_proc + if not self.dataloader.n_proc + else self.dataloader.n_proc + ) + + self.split.adapt_cross_val_with_maps_manager_info(maps_manager) + self.maps_manager.check_output_saving_tensor(maps_manager.network_task) + + self.transforms = TransformsConfig( + normalize=maps_manager.normalize, + data_augmentation=maps_manager.data_augmentation, + size_reduction=maps_manager.size_reduction, + size_reduction_factor=maps_manager.size_reduction_factor, + ) + + if self.split.split is None and self.split.n_splits == 0: + from clinicadl.splitter.split_utils import find_splits + + self.split.split = find_splits(self.maps_manager.maps_dir) diff --git a/clinicadl/predict/predict_manager.py b/clinicadl/predictor/predictor.py similarity index 54% rename from clinicadl/predict/predict_manager.py rename to clinicadl/predictor/predictor.py index 55515dc8e..30fbbe5b8 100644 --- a/clinicadl/predict/predict_manager.py +++ b/clinicadl/predictor/predictor.py @@ -2,12 +2,13 @@ import shutil from logging import getLogger from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Set, Tuple, Union import pandas as pd import torch import torch.distributed as dist from torch.amp import autocast +from torch.nn.modules.loss import _Loss from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler @@ -16,314 +17,123 @@ ) from clinicadl.interpret.config import InterpretConfig from clinicadl.maps_manager.maps_manager import MapsManager +from clinicadl.metrics.metric_module import MetricModule from clinicadl.metrics.utils import ( check_selection_metric, find_selection_metrics, ) -from clinicadl.predict.config import PredictConfig -from clinicadl.trainer.tasks_utils import generate_label_code, get_criterion +from clinicadl.network.network import Network +from clinicadl.predictor.config import PredictConfig +from clinicadl.trainer.tasks_utils import ( + columns, + compute_metrics, + generate_label_code, + generate_test_row, + get_criterion, +) from clinicadl.transforms.config import TransformsConfig from clinicadl.utils.computational.ddp import DDP, cluster +from clinicadl.utils.enum import Task from clinicadl.utils.exceptions import ( ClinicaDLArgumentError, ClinicaDLDataLeakageError, MAPSError, ) -from clinicadl.validator.validator import Validator logger = getLogger("clinicadl.predict_manager") level_list: List[str] = ["warning", "info", "debug"] -class PredictManager: +class Predictor: def __init__(self, _config: Union[PredictConfig, InterpretConfig]) -> None: - self.maps_manager = MapsManager(_config.maps_dir) self._config = _config - self.validator = Validator() + + from clinicadl.splitter.config import SplitterConfig + from clinicadl.splitter.splitter import Splitter + + self.maps_manager = MapsManager(_config.maps_manager.maps_dir) + self._config.adapt_with_maps_manager_info(self.maps_manager) + tmp = self._config.data.model_dump( + exclude=set(["preprocessing_dict", "mode", "caps_dict"]) + ) + tmp.update(self._config.split.model_dump()) + tmp.update(self._config.validation.model_dump()) + self.splitter = Splitter(SplitterConfig(**tmp)) def predict( self, label_code: Union[str, dict[str, int]] = "default", ): - """Performs the prediction task on a subset of caps_directory defined in a TSV file. - Parameters - ---------- - data_group : str - name of the data group tested. - caps_directory : Path (optional, default=None) - path to the CAPS folder. For more information please refer to - [clinica documentation](https://aramislab.paris.inria.fr/clinica/docs/public/latest/CAPS/Introduction/). - Default will load the value of an existing data group - tsv_path : Path (optional, default=None) - path to a TSV file containing the list of participants and sessions to test. - Default will load the DataFrame of an existing data group - split_list : List[int] (optional, default=None) - list of splits to test. Default perform prediction on all splits available. - selection_metrics : List[str] (optional, default=None) - list of selection metrics to test. - Default performs the prediction on all selection metrics available. - multi_cohort : bool (optional, default=False) - If True considers that tsv_path is the path to a multi-cohort TSV. - diagnoses : List[str] (optional, default=()) - List of diagnoses to load if tsv_path is a split_directory. - Default uses the same as in training step. - use_labels : bool (optional, default=True) - If True, the labels must exist in test meta-data and metrics are computed. - batch_size : int (optional, default=None) - If given, sets the value of batch_size, else use the same as in training step. - n_proc : int (optional, default=None) - If given, sets the value of num_workers, else use the same as in training step. - gpu : bool (optional, default=None) - If given, a new value for the device of the model will be computed. - amp : bool (optional, default=False) - If enabled, uses Automatic Mixed Precision (requires GPU usage). - overwrite : bool (optional, default=False) - If True erase the occurrences of data_group. - label : str (optional, default=None) - Target label used for training (if network_task in [`regression`, `classification`]). - label_code : Optional[Dict[str, int]] (optional, default="default") - dictionary linking the target values to a node number. - save_tensor : bool (optional, default=False) - If true, save the tensor predicted for reconstruction task - save_nifti : bool (optional, default=False) - If true, save the nifti associated to the prediction for reconstruction task. - save_latent_tensor : bool (optional, default=False) - If true, save the tensor from the latent space for reconstruction task. - skip_leak_check : bool (optional, default=False) - If true, skip the leak check (not recommended). - Examples - -------- - >>> _input_ - _output_ - """ - - assert isinstance(self._config, PredictConfig) - - self._config.check_output_saving_nifti(self.maps_manager.network_task) - self._config.diagnoses = ( - self.maps_manager.diagnoses - if self._config.diagnoses is None or len(self._config.diagnoses) == 0 - else self._config.diagnoses - ) - - self._config.batch_size = ( - self.maps_manager.batch_size - if not self._config.batch_size - else self._config.batch_size - ) - self._config.n_proc = ( - self.maps_manager.n_proc if not self._config.n_proc else self._config.n_proc - ) + """Performs the prediction task on a subset of caps_directory defined in a TSV file.""" - self._config.adapt_cross_val_with_maps_manager_info(self.maps_manager) - self._config.check_output_saving_tensor(self.maps_manager.network_task) - - transforms = TransformsConfig( - normalize=self.maps_manager.normalize, - data_augmentation=self.maps_manager.data_augmentation, - size_reduction=self.maps_manager.size_reduction, - size_reduction_factor=self.maps_manager.size_reduction_factor, - ) - group_df = self._config.create_groupe_df() + group_df = self._config.data.create_groupe_df() self._check_data_group(group_df) criterion = get_criterion( self.maps_manager.network_task, self.maps_manager.loss ) - self._check_data_group(df=group_df) - assert self._config.split # don't know if needed ? try to raise an exception ? - # assert self._config.label - - for split in self._config.split: + for split in self.splitter.split_iterator(): logger.info(f"Prediction of split {split}") group_df, group_parameters = self.get_group_info( - self._config.data_group, split + self._config.maps_manager.data_group, split ) # Find label code if not given - if self._config.is_given_label_code(self.maps_manager.label, label_code): + if self._config.data.is_given_label_code( + self.maps_manager.label, label_code + ): generate_label_code( - self.maps_manager.network_task, group_df, self._config.label + self.maps_manager.network_task, group_df, self._config.data.label ) # Erase previous TSV files on master process - if not self._config.selection_metrics: + if not self._config.validation.selection_metrics: split_selection_metrics = find_selection_metrics( self.maps_manager.maps_path, split, ) else: - split_selection_metrics = self._config.selection_metrics + split_selection_metrics = self._config.validation.selection_metrics for selection in split_selection_metrics: tsv_dir = ( self.maps_manager.maps_path / f"split-{split}" / f"best-{selection}" - / self._config.data_group + / self._config.maps_manager.data_group ) - tsv_pattern = f"{self._config.data_group}*.tsv" + tsv_pattern = f"{self._config.maps_manager.data_group}*.tsv" for tsv_file in tsv_dir.glob(tsv_pattern): tsv_file.unlink() - self._config.check_label(self.maps_manager.label) + + self._config.data.check_label(self.maps_manager.label) if self.maps_manager.multi_network: - self._predict_multi( - group_parameters, - group_df, - transforms, - label_code, - criterion, - split, - split_selection_metrics, - ) + for network in range(self.maps_manager.num_networks): + self._predict_single( + group_parameters, + group_df, + self._config.transforms, + label_code, + criterion, + split, + split_selection_metrics, + network, + ) else: self._predict_single( group_parameters, group_df, - transforms, + self._config.transforms, label_code, criterion, split, split_selection_metrics, ) if cluster.master: - self.validator._ensemble_prediction( - self.maps_manager, - self._config.data_group, - split, - self._config.selection_metrics, - self._config.use_labels, - self._config.skip_leak_check, - ) - - def _predict_multi( - self, - group_parameters, - group_df, - transforms, - label_code, - criterion, - split, - split_selection_metrics, - ): - """_summary_ - Parameters - ---------- - group_parameters : _type_ - _description_ - group_df : _type_ - _description_ - all_transforms : _type_ - _description_ - use_labels : _type_ - _description_ - label : _type_ - _description_ - label_code : _type_ - _description_ - batch_size : _type_ - _description_ - n_proc : _type_ - _description_ - criterion : _type_ - _description_ - data_group : _type_ - _description_ - split : _type_ - _description_ - split_selection_metrics : _type_ - _description_ - gpu : _type_ - _description_ - amp : _type_ - _description_ - save_tensor : _type_ - _description_ - save_latent_tensor : _type_ - _description_ - save_nifti : _type_ - _description_ - selection_metrics : _type_ - _description_ - Examples - -------- - >>> _input_ - _output_ - Notes - ----- - _notes_ - See Also - -------- - - _related_ - """ - assert isinstance(self._config, PredictConfig) - # assert self._config.label - - for network in range(self.maps_manager.num_networks): - data_test = return_dataset( - group_parameters["caps_directory"], - group_df, - self.maps_manager.preprocessing_dict, - transforms_config=transforms, - multi_cohort=group_parameters["multi_cohort"], - label_presence=self._config.use_labels, - label=self._config.label, - label_code=( - self.maps_manager.label_code - if label_code == "default" - else label_code - ), - cnn_index=network, - ) - test_loader = DataLoader( - data_test, - batch_size=( - self._config.batch_size - if self._config.batch_size is not None - else self.maps_manager.batch_size - ), - shuffle=False, - sampler=DistributedSampler( - data_test, - num_replicas=cluster.world_size, - rank=cluster.rank, - shuffle=False, - ), - num_workers=self._config.n_proc - if self._config.n_proc is not None - else self.maps_manager.n_proc, - ) - self.validator._test_loader( - maps_manager=self.maps_manager, - dataloader=test_loader, - criterion=criterion, - data_group=self._config.data_group, - split=split, - selection_metrics=split_selection_metrics, - use_labels=self._config.use_labels, - gpu=self._config.gpu, - amp=self._config.amp, - network=network, - ) - if self._config.save_tensor: - logger.debug("Saving tensors") - self.validator._compute_output_tensors( + self._ensemble_prediction( self.maps_manager, - data_test, - self._config.data_group, + self._config.maps_manager.data_group, split, - self._config.selection_metrics, - gpu=self._config.gpu, - network=network, - ) - if self._config.save_nifti: - self._compute_output_nifti( - data_test, - split, - network=network, - ) - if self._config.save_latent_tensor: - self._compute_latent_tensors( - dataset=data_test, - split=split, - network=network, + self._config.validation.selection_metrics, + self._config.data.use_labels, + self._config.validation.skip_leak_check, ) def _predict_single( @@ -335,78 +145,31 @@ def _predict_single( criterion, split, split_selection_metrics, + network: Optional[int] = None, ): - """_summary_ - Parameters - ---------- - group_parameters : _type_ - _description_ - group_df : _type_ - _description_ - all_transforms : _type_ - _description_ - use_labels : _type_ - _description_ - label : _type_ - _description_ - label_code : _type_ - _description_ - batch_size : _type_ - _description_ - n_proc : _type_ - _description_ - criterion : _type_ - _description_ - data_group : _type_ - _description_ - split : _type_ - _description_ - split_selection_metrics : _type_ - _description_ - gpu : _type_ - _description_ - amp : _type_ - _description_ - save_tensor : _type_ - _description_ - save_latent_tensor : _type_ - _description_ - save_nifti : _type_ - _description_ - selection_metrics : _type_ - _description_ - Examples - -------- - >>> _input_ - _output_ - Notes - ----- - _notes_ - See Also - -------- - - _related_ - """ + """_summary_""" assert isinstance(self._config, PredictConfig) - # assert self._config.label + # assert self._config.data.label data_test = return_dataset( group_parameters["caps_directory"], group_df, self.maps_manager.preprocessing_dict, - transforms_config=transforms, + transforms_config=self._config.transforms, multi_cohort=group_parameters["multi_cohort"], - label_presence=self._config.use_labels, - label=self._config.label, + label_presence=self._config.data.use_labels, + label=self._config.data.label, label_code=( self.maps_manager.label_code if label_code == "default" else label_code ), + cnn_index=network, ) test_loader = DataLoader( data_test, batch_size=( - self._config.batch_size - if self._config.batch_size is not None + self._config.dataloader.batch_size + if self._config.dataloader.batch_size is not None else self.maps_manager.batch_size ), shuffle=False, @@ -416,40 +179,44 @@ def _predict_single( rank=cluster.rank, shuffle=False, ), - num_workers=self._config.n_proc - if self._config.n_proc is not None + num_workers=self._config.dataloader.n_proc + if self._config.dataloader.n_proc is not None else self.maps_manager.n_proc, ) - self.validator._test_loader( - self.maps_manager, - test_loader, - criterion, - self._config.data_group, - split, - split_selection_metrics, - use_labels=self._config.use_labels, - gpu=self._config.gpu, - amp=self._config.amp, + self._test_loader( + maps_manager=self.maps_manager, + dataloader=test_loader, + criterion=criterion, + data_group=self._config.maps_manager.data_group, + split=split, + selection_metrics=split_selection_metrics, + use_labels=self._config.data.use_labels, + gpu=self._config.computational.gpu, + amp=self._config.computational.amp, + network=network, ) - if self._config.save_tensor: + if self._config.maps_manager.save_tensor: logger.debug("Saving tensors") - self.validator._compute_output_tensors( - self.maps_manager, - data_test, - self._config.data_group, - split, - self._config.selection_metrics, - gpu=self._config.gpu, + self._compute_output_tensors( + maps_manager=self.maps_manager, + dataset=data_test, + data_group=self._config.maps_manager.data_group, + split=split, + selection_metrics=self._config.validation.selection_metrics, + gpu=self._config.computational.gpu, + network=network, ) - if self._config.save_nifti: + if self._config.maps_manager.save_nifti: self._compute_output_nifti( - data_test, - split, + dataset=data_test, + split=split, + network=network, ) - if self._config.save_latent_tensor: + if self._config.maps_manager.save_latent_tensor: self._compute_latent_tensors( dataset=data_test, split=split, + network=network, ) def _compute_latent_tensors( @@ -478,13 +245,13 @@ def _compute_latent_tensors( network : _type_ (optional, default=None) Index of the network tested (only used in multi-network setting). """ - for selection_metric in self._config.selection_metrics: + for selection_metric in self._config.validation.selection_metrics: # load the best trained model during the training model, _ = self.maps_manager._init_model( transfer_path=self.maps_manager.maps_path, split=split, transfer_selection=selection_metric, - gpu=self._config.gpu, + gpu=self._config.computational.gpu, network=network, nb_unfrozen_layer=self.maps_manager.nb_unfrozen_layer, ) @@ -498,7 +265,7 @@ def _compute_latent_tensors( self.maps_manager.maps_path / f"split-{split}" / f"best-{selection_metric}" - / self._config.data_group + / self._config.maps_manager.data_group / "latent_tensors" ) if cluster.master: @@ -555,13 +322,13 @@ def _compute_output_nifti( import nibabel as nib from numpy import eye - for selection_metric in self._config.selection_metrics: + for selection_metric in self._config.validation.selection_metrics: # load the best trained model during the training model, _ = self.maps_manager._init_model( transfer_path=self.maps_manager.maps_path, split=split, transfer_selection=selection_metric, - gpu=self._config.gpu, + gpu=self._config.computational.gpu, network=network, nb_unfrozen_layer=self.maps_manager.nb_unfrozen_layer, ) @@ -575,7 +342,7 @@ def _compute_output_nifti( self.maps_manager.maps_path / f"split-{split}" / f"best-{selection_metric}" - / self._config.data_group + / self._config.maps_manager.data_group / "nifti_images" ) if cluster.master: @@ -608,77 +375,10 @@ def _compute_output_nifti( def interpret(self): """Performs the interpretation task on a subset of caps_directory defined in a TSV file. The mean interpretation is always saved, to save the individual interpretations set save_individual to True. - Parameters - ---------- - data_group : str - Name of the data group interpreted. - name : str - Name of the interpretation procedure. - method : str - Method used for extraction (ex: gradients, grad-cam...). - caps_directory : Path (optional, default=None) - Path to the CAPS folder. For more information please refer to - [clinica documentation](https://aramislab.paris.inria.fr/clinica/docs/public/latest/CAPS/Introduction/). - Default will load the value of an existing data group. - tsv_path : Path (optional, default=None) - Path to a TSV file containing the list of participants and sessions to test. - Default will load the DataFrame of an existing data group. - split_list : list[int] (optional, default=None) - List of splits to interpret. Default perform interpretation on all splits available. - selection_metrics : list[str] (optional, default=None) - List of selection metrics to interpret. - Default performs the interpretation on all selection metrics available. - multi_cohort : bool (optional, default=False) - If True considers that tsv_path is the path to a multi-cohort TSV. - diagnoses : list[str] (optional, default=()) - List of diagnoses to load if tsv_path is a split_directory. - Default uses the same as in training step. - target_node : int (optional, default=0) - Node from which the interpretation is computed. - save_individual : bool (optional, default=False) - If True saves the individual map of each participant / session couple. - batch_size : int (optional, default=None) - If given, sets the value of batch_size, else use the same as in training step. - n_proc : int (optional, default=None) - If given, sets the value of num_workers, else use the same as in training step. - gpu : bool (optional, default=None) - If given, a new value for the device of the model will be computed. - amp : bool (optional, default=False) - If enabled, uses Automatic Mixed Precision (requires GPU usage). - overwrite : bool (optional, default=False) - If True erase the occurrences of data_group. - overwrite_name : bool (optional, default=False) - If True erase the occurrences of name. - level : int (optional, default=None) - Layer number in the convolutional part after which the feature map is chosen. - save_nifti : bool (optional, default=False) - If True, save the interpretation map in nifti format. - Raises - ------ - NotImplementedError - If the method is not implemented - NotImplementedError - If the interpretaion of multi network is asked - MAPSError - If the interpretation has already been determined. """ assert isinstance(self._config, InterpretConfig) - self._config.diagnoses = ( - self.maps_manager.diagnoses - if self._config.diagnoses is None or len(self._config.diagnoses) == 0 - else self._config.diagnoses - ) - self._config.batch_size = ( - self.maps_manager.batch_size - if not self._config.batch_size - else self._config.batch_size - ) - self._config.n_proc = ( - self.maps_manager.n_proc if not self._config.n_proc else self._config.n_proc - ) - - self._config.adapt_cross_val_with_maps_manager_info(self.maps_manager) + self._config.adapt_with_maps_manager_info(self.maps_manager) if self.maps_manager.multi_network: raise NotImplementedError( @@ -690,14 +390,13 @@ def interpret(self): size_reduction=self.maps_manager.size_reduction, size_reduction_factor=self.maps_manager.size_reduction_factor, ) - group_df = self._config.create_groupe_df() + group_df = self._config.data.create_groupe_df() self._check_data_group(group_df) - assert self._config.split - for split in self._config.split: + for split in self.splitter.split_iterator(): logger.info(f"Interpretation of split {split}") df_group, parameters_group = self.get_group_info( - self._config.data_group, split + self._config.maps_manager.data_group, split ) data_test = return_dataset( parameters_group["caps_directory"], @@ -711,30 +410,30 @@ def interpret(self): ) test_loader = DataLoader( data_test, - batch_size=self._config.batch_size, + batch_size=self._config.dataloader.batch_size, shuffle=False, - num_workers=self._config.n_proc, + num_workers=self._config.dataloader.n_proc, ) - if not self._config.selection_metrics: - self._config.selection_metrics = find_selection_metrics( + if not self._config.validation.selection_metrics: + self._config.validation.selection_metrics = find_selection_metrics( self.maps_manager.maps_path, split, ) - for selection_metric in self._config.selection_metrics: + for selection_metric in self._config.validation.selection_metrics: logger.info(f"Interpretation of metric {selection_metric}") results_path = ( self.maps_manager.maps_path / f"split-{split}" / f"best-{selection_metric}" - / self._config.data_group - / f"interpret-{self._config.name}" + / self._config.maps_manager.data_group + / f"interpret-{self._config.interpret.name}" ) if (results_path).is_dir(): - if self._config.overwrite_name: + if self._config.interpret.overwrite_name: shutil.rmtree(results_path) else: raise MAPSError( - f"Interpretation name {self._config.name} is already written. " + f"Interpretation name {self._config.interpret.name} is already written. " f"Please choose another name or set overwrite_name to True." ) results_path.mkdir(parents=True) @@ -742,28 +441,28 @@ def interpret(self): transfer_path=self.maps_manager.maps_path, split=split, transfer_selection=selection_metric, - gpu=self._config.gpu, + gpu=self._config.computational.gpu, ) - interpreter = self._config.get_method()(model) + interpreter = self._config.interpret.get_method()(model) cum_maps = [0] * data_test.elem_per_image for data in test_loader: images = data["image"].to(model.device) map_pt = interpreter.generate_gradients( images, - self._config.target_node, - level=self._config.level, - amp=self._config.amp, + self._config.interpret.target_node, + level=self._config.interpret.level, + amp=self._config.computational.amp, ) for i in range(len(data["participant_id"])): mode_id = data[f"{self.maps_manager.mode}_id"][i] cum_maps[mode_id] += map_pt[i] - if self._config.save_individual: + if self._config.interpret.save_individual: single_path = ( results_path / f"{data['participant_id'][i]}_{data['session_id'][i]}_{self.maps_manager.mode}-{data[f'{self.maps_manager.mode}_id'][i]}_map.pt" ) torch.save(map_pt[i], single_path) - if self._config.save_nifti: + if self._config.maps_manager.save_nifti: import nibabel as nib from numpy import eye @@ -781,7 +480,7 @@ def interpret(self): mode_map, results_path / f"mean_{self.maps_manager.mode}-{i}_map.pt", ) - if self._config.save_nifti: + if self._config.maps_manager.save_nifti: import nibabel as nib from numpy import eye @@ -801,22 +500,6 @@ def _check_data_group( Parameters ---------- - data_group : str - name of the data group - caps_directory : str (optional, default=None) - input CAPS directory - df : pd.DataFrame (optional, default=None) - Table of participant_id / session_id of the data group - multi_cohort : bool (optional, default=False) - indicates if the input data comes from several CAPS - overwrite : bool (optional, default=False) - If True former definition of data group is erased - label : str (optional, default=None) - label name if applicable - split_list : list[int] (optional, default=None) - _description_ - skip_leak_check : bool (optional, default=False) - _description_ Raises ------ @@ -828,17 +511,21 @@ def _check_data_group( when caps_directory or df are not given and data group does not exist """ - group_dir = self.maps_manager.maps_path / "groups" / self._config.data_group + group_dir = ( + self.maps_manager.maps_path + / "groups" + / self._config.maps_manager.data_group + ) logger.debug(f"Group path {group_dir}") if group_dir.is_dir(): # Data group already exists - if self._config.overwrite: - if self._config.data_group in ["train", "validation"]: + if self._config.maps_manager.overwrite: + if self._config.maps_manager.data_group in ["train", "validation"]: raise MAPSError("Cannot overwrite train or validation data group.") else: - # if not split_list: - # split_list = self.maps_manager.find_splits() + if not self._config.split.split: + self._config.split.split = self.maps_manager.find_splits() assert self._config.split - for split in self._config.split: + for split in self._config.split.split: selection_metrics = find_selection_metrics( self.maps_manager.maps_path, split, @@ -848,40 +535,40 @@ def _check_data_group( self.maps_manager.maps_path / f"split-{split}" / f"best-{selection}" - / self._config.data_group + / self._config.maps_manager.data_group ) if results_path.is_dir(): shutil.rmtree(results_path) elif df is not None or ( - self._config.caps_directory is not None - and self._config.caps_directory != Path("") + self._config.data.caps_directory is not None + and self._config.data.caps_directory != Path("") ): raise ClinicaDLArgumentError( - f"Data group {self._config.data_group} is already defined. " + f"Data group {self._config.maps_manager.data_group} is already defined. " f"Please do not give any caps_directory, tsv_path or multi_cohort to use it. " - f"To erase {self._config.data_group} please set overwrite to True." + f"To erase {self._config.maps_manager.data_group} please set overwrite to True." ) elif not group_dir.is_dir() and ( - self._config.caps_directory is None or df is None + self._config.data.caps_directory is None or df is None ): # Data group does not exist yet / was overwritten + missing data raise ClinicaDLArgumentError( - f"The data group {self._config.data_group} does not already exist. " + f"The data group {self._config.maps_manager.data_group} does not already exist. " f"Please specify a caps_directory and a tsv_path to create this data group." ) elif ( not group_dir.is_dir() ): # Data group does not exist yet / was overwritten + all data is provided - if self._config.skip_leak_check: + if self._config.validation.skip_leak_check: logger.info("Skipping data leakage check") else: - self._check_leakage(self._config.data_group, df) + self._check_leakage(self._config.maps_manager.data_group, df) self._write_data_group( - self._config.data_group, + self._config.maps_manager.data_group, df, - self._config.caps_directory, - self._config.multi_cohort, - label=self._config.label, + self._config.data.caps_directory, + self._config.data.multi_cohort, + label=self._config.data.label, ) def get_group_info( @@ -997,8 +684,8 @@ def _write_data_group( group_path.mkdir(parents=True) columns = ["participant_id", "session_id", "cohort"] - if self._config.label in df.columns.values: - columns += [self._config.label] + if self._config.data.label in df.columns.values: + columns += [self._config.data.label] if label is not None and label in df.columns.values: columns += [label] @@ -1088,3 +775,379 @@ def get_interpretation( weights_only=True, ) return map_pt + + def test( + self, + mode: str, + metrics_module: MetricModule, + n_classes: int, + network_task, + model: Network, + dataloader: DataLoader, + criterion: _Loss, + use_labels: bool = True, + amp: bool = False, + report_ci=False, + ) -> Tuple[pd.DataFrame, Dict[str, float]]: + """ + Computes the predictions and evaluation metrics. + + Parameters + ---------- + model: Network + The model trained. + dataloader: DataLoader + Wrapper of a CapsDataset. + criterion: _Loss + Function to calculate the loss. + use_labels: bool + If True the true_label will be written in output DataFrame + and metrics dict will be created. + amp: bool + If True, enables Pytorch's automatic mixed precision. + + Returns + ------- + the results and metrics on the image level. + """ + model.eval() + dataloader.dataset.eval() + + results_df = pd.DataFrame(columns=columns(network_task, mode, n_classes)) + total_loss = {} + with torch.no_grad(): + for i, data in enumerate(dataloader): + # initialize the loss list to save the loss components + with autocast("cuda", enabled=amp): + outputs, loss_dict = model(data, criterion, use_labels=use_labels) + + if i == 0: + for loss_component in loss_dict.keys(): + total_loss[loss_component] = 0 + for loss_component in total_loss.keys(): + total_loss[loss_component] += loss_dict[loss_component].float() + + # Generate detailed DataFrame + for idx in range(len(data["participant_id"])): + row = generate_test_row( + network_task, + mode, + metrics_module, + n_classes, + idx, + data, + outputs.float(), + ) + row_df = pd.DataFrame( + row, columns=columns(network_task, mode, n_classes) + ) + results_df = pd.concat([results_df, row_df]) + + del outputs, loss_dict + dataframes = [None] * dist.get_world_size() + dist.gather_object( + results_df, dataframes if dist.get_rank() == 0 else None, dst=0 + ) + if dist.get_rank() == 0: + results_df = pd.concat(dataframes) + del dataframes + results_df.reset_index(inplace=True, drop=True) + + if not use_labels: + metrics_dict = None + else: + metrics_dict = compute_metrics( + network_task, results_df, metrics_module, report_ci=report_ci + ) + for loss_component in total_loss.keys(): + dist.reduce(total_loss[loss_component], dst=0) + loss_value = total_loss[loss_component].item() / cluster.world_size + + if report_ci: + metrics_dict["Metric_names"].append(loss_component) + metrics_dict["Metric_values"].append(loss_value) + metrics_dict["Lower_CI"].append("N/A") + metrics_dict["Upper_CI"].append("N/A") + metrics_dict["SE"].append("N/A") + + else: + metrics_dict[loss_component] = loss_value + + torch.cuda.empty_cache() + + return results_df, metrics_dict + + def test_da( + self, + mode: str, + metrics_module: MetricModule, + n_classes: int, + network_task: Union[str, Task], + model: Network, + dataloader: DataLoader, + criterion: _Loss, + alpha: float = 0, + use_labels: bool = True, + target: bool = True, + report_ci=False, + ) -> Tuple[pd.DataFrame, Dict[str, float]]: + """ + Computes the predictions and evaluation metrics. + + Args: + model: the model trained. + dataloader: wrapper of a CapsDataset. + criterion: function to calculate the loss. + use_labels: If True the true_label will be written in output DataFrame + and metrics dict will be created. + Returns: + the results and metrics on the image level. + """ + model.eval() + dataloader.dataset.eval() + results_df = pd.DataFrame(columns=columns(network_task, mode, n_classes)) + total_loss = 0 + with torch.no_grad(): + for i, data in enumerate(dataloader): + outputs, loss_dict = model.compute_outputs_and_loss_test( + data, criterion, alpha, target + ) + total_loss += loss_dict["loss"].item() + + # Generate detailed DataFrame + for idx in range(len(data["participant_id"])): + row = generate_test_row( + network_task, + mode, + metrics_module, + n_classes, + idx, + data, + outputs, + ) + row_df = pd.DataFrame( + row, columns=columns(network_task, mode, n_classes) + ) + results_df = pd.concat([results_df, row_df]) + + del outputs, loss_dict + results_df.reset_index(inplace=True, drop=True) + + if not use_labels: + metrics_dict = None + else: + metrics_dict = compute_metrics( + network_task, results_df, metrics_module, report_ci=report_ci + ) + if report_ci: + metrics_dict["Metric_names"].append("loss") + metrics_dict["Metric_values"].append(total_loss) + metrics_dict["Lower_CI"].append("N/A") + metrics_dict["Upper_CI"].append("N/A") + metrics_dict["SE"].append("N/A") + + else: + metrics_dict["loss"] = total_loss + + torch.cuda.empty_cache() + + return results_df, metrics_dict + + def _test_loader( + self, + maps_manager: MapsManager, + dataloader, + criterion, + data_group: str, + split: int, + selection_metrics, + use_labels=True, + gpu=None, + amp=False, + network=None, + report_ci=True, + ): + """ + Launches the testing task on a dataset wrapped by a DataLoader and writes prediction TSV files. + + Args: + dataloader (torch.utils.data.DataLoader): DataLoader wrapping the test CapsDataset. + criterion (torch.nn.modules.loss._Loss): optimization criterion used during training. + data_group (str): name of the data group used for the testing task. + split (int): Index of the split used to train the model tested. + selection_metrics (list[str]): List of metrics used to select the best models which are tested. + use_labels (bool): If True, the labels must exist in test meta-data and metrics are computed. + gpu (bool): If given, a new value for the device of the model will be computed. + amp (bool): If enabled, uses Automatic Mixed Precision (requires GPU usage). + network (int): Index of the network tested (only used in multi-network setting). + """ + for selection_metric in selection_metrics: + if cluster.master: + log_dir = ( + maps_manager.maps_path + / f"split-{split}" + / f"best-{selection_metric}" + / data_group + ) + maps_manager.write_description_log( + log_dir, + data_group, + dataloader.dataset.config.data.caps_dict, + dataloader.dataset.config.data.data_df, + ) + + # load the best trained model during the training + model, _ = maps_manager._init_model( + transfer_path=maps_manager.maps_path, + split=split, + transfer_selection=selection_metric, + gpu=gpu, + network=network, + ) + model = DDP( + model, + fsdp=maps_manager.fully_sharded_data_parallel, + amp=maps_manager.amp, + ) + + prediction_df, metrics = self.test( + mode=maps_manager.mode, + metrics_module=maps_manager.metrics_module, + n_classes=maps_manager.n_classes, + network_task=maps_manager.network_task, + model=model, + dataloader=dataloader, + criterion=criterion, + use_labels=use_labels, + amp=amp, + report_ci=report_ci, + ) + if use_labels: + if network is not None: + metrics[f"{maps_manager.mode}_id"] = network + + loss_to_log = ( + metrics["Metric_values"][-1] if report_ci else metrics["loss"] + ) + + logger.info( + f"{maps_manager.mode} level {data_group} loss is {loss_to_log} for model selected on {selection_metric}" + ) + + if cluster.master: + # Replace here + maps_manager._mode_level_to_tsv( + prediction_df, + metrics, + split, + selection_metric, + data_group=data_group, + ) + + @torch.no_grad() + def _compute_output_tensors( + self, + maps_manager: MapsManager, + dataset, + data_group, + split, + selection_metrics, + nb_images=None, + gpu=None, + network=None, + ): + """ + Compute the output tensors and saves them in the MAPS. + + Args: + dataset (clinicadl.caps_dataset.data.CapsDataset): wrapper of the data set. + data_group (str): name of the data group used for the task. + split (int): split number. + selection_metrics (list[str]): metrics used for model selection. + nb_images (int): number of full images to write. Default computes the outputs of the whole data set. + gpu (bool): If given, a new value for the device of the model will be computed. + network (int): Index of the network tested (only used in multi-network setting). + """ + for selection_metric in selection_metrics: + # load the best trained model during the training + model, _ = maps_manager._init_model( + transfer_path=maps_manager.maps_path, + split=split, + transfer_selection=selection_metric, + gpu=gpu, + network=network, + nb_unfrozen_layer=maps_manager.nb_unfrozen_layer, + ) + model = DDP( + model, + fsdp=maps_manager.fully_sharded_data_parallel, + amp=maps_manager.amp, + ) + model.eval() + + tensor_path = ( + maps_manager.maps_path + / f"split-{split}" + / f"best-{selection_metric}" + / data_group + / "tensors" + ) + if cluster.master: + tensor_path.mkdir(parents=True, exist_ok=True) + dist.barrier() + + if nb_images is None: # Compute outputs for the whole data set + nb_modes = len(dataset) + else: + nb_modes = nb_images * dataset.elem_per_image + + for i in [ + *range(cluster.rank, nb_modes, cluster.world_size), + *range(int(nb_modes % cluster.world_size <= cluster.rank)), + ]: + data = dataset[i] + image = data["image"] + x = image.unsqueeze(0).to(model.device) + with autocast("cuda", enabled=maps_manager.std_amp): + output = model(x) + output = output.squeeze(0).cpu().float() + participant_id = data["participant_id"] + session_id = data["session_id"] + mode_id = data[f"{maps_manager.mode}_id"] + input_filename = f"{participant_id}_{session_id}_{maps_manager.mode}-{mode_id}_input.pt" + output_filename = f"{participant_id}_{session_id}_{maps_manager.mode}-{mode_id}_output.pt" + torch.save(image, tensor_path / input_filename) + torch.save(output, tensor_path / output_filename) + logger.debug(f"File saved at {[input_filename, output_filename]}") + + def _ensemble_prediction( + self, + maps_manager: MapsManager, + data_group, + split, + selection_metrics, + use_labels=True, + skip_leak_check=False, + ): + """Computes the results on the image-level.""" + + if not selection_metrics: + selection_metrics = find_selection_metrics(maps_manager.maps_path, split) + + for selection_metric in selection_metrics: + ##################### + # Soft voting + if maps_manager.num_networks > 1 and not skip_leak_check: + maps_manager._ensemble_to_tsv( + split, + selection=selection_metric, + data_group=data_group, + use_labels=use_labels, + ) + elif maps_manager.mode != "image" and not skip_leak_check: + maps_manager._mode_to_image_tsv( + split, + selection=selection_metric, + data_group=data_group, + use_labels=use_labels, + ) diff --git a/clinicadl/predict/utils.py b/clinicadl/predictor/utils.py similarity index 100% rename from clinicadl/predict/utils.py rename to clinicadl/predictor/utils.py diff --git a/clinicadl/splitter/validation.py b/clinicadl/predictor/validation.py similarity index 100% rename from clinicadl/splitter/validation.py rename to clinicadl/predictor/validation.py diff --git a/clinicadl/random_search/random_search_utils.py b/clinicadl/random_search/random_search_utils.py index ed164ea0c..f8f3bca9a 100644 --- a/clinicadl/random_search/random_search_utils.py +++ b/clinicadl/random_search/random_search_utils.py @@ -124,7 +124,6 @@ def random_sampling(rs_options: Dict[str, Any]) -> Dict[str, Any]: "mode": "fixed", "multi_cohort": "fixed", "multi_network": "choice", - "ssda_netork": "fixed", "n_fcblocks": "randint", "n_splits": "fixed", "n_proc": "fixed", diff --git a/clinicadl/resources/config/train_config.toml b/clinicadl/resources/config/train_config.toml index f4f2afe30..9e5f54657 100644 --- a/clinicadl/resources/config/train_config.toml +++ b/clinicadl/resources/config/train_config.toml @@ -4,7 +4,6 @@ [Model] architecture = "default" # ex : Conv5_FC3 multi_network = false -ssda_network = false [Architecture] # CNN diff --git a/clinicadl/splitter/config.py b/clinicadl/splitter/config.py index 53413fdda..59fdbaad8 100644 --- a/clinicadl/splitter/config.py +++ b/clinicadl/splitter/config.py @@ -7,8 +7,8 @@ from pydantic.types import NonNegativeInt from clinicadl.caps_dataset.data_config import DataConfig +from clinicadl.predictor.validation import ValidationConfig from clinicadl.splitter.split_utils import find_splits -from clinicadl.splitter.validation import ValidationConfig logger = getLogger("clinicadl.split_config") diff --git a/clinicadl/splitter/splitter.py b/clinicadl/splitter/splitter.py index 3bbdde461..d39b14a5b 100644 --- a/clinicadl/splitter/splitter.py +++ b/clinicadl/splitter/splitter.py @@ -1,4 +1,5 @@ import abc +import shutil from logging import getLogger from pathlib import Path from typing import Dict, List, Optional, Tuple @@ -6,6 +7,8 @@ import pandas as pd from clinicadl.splitter.config import SplitterConfig +from clinicadl.utils import cluster +from clinicadl.utils.exceptions import MAPSError logger = getLogger("clinicadl.split_manager") @@ -14,7 +17,7 @@ class Splitter: def __init__( self, config: SplitterConfig, - split_list: Optional[List[int]] = None, + # split_list: Optional[List[int]] = None, ): """_summary_ @@ -29,19 +32,19 @@ def __init__( """ self.config = config - self.split_list = split_list + # self.config.split.split = split_list - self.caps_dict = self.config.data.caps_dict # TODO : check if useful ? + # self.caps_dict = self.config.data.caps_dict # TODO : check if useful ? def max_length(self) -> int: """Maximum number of splits""" return self.config.split.n_splits def __len__(self): - if not self.split_list: + if not self.config.split.split: return self.config.split.n_splits else: - return len(self.split_list) + return len(self.config.split.split) @property def allowed_splits_list(self): @@ -203,13 +206,32 @@ def _get_tsv_paths(self, cohort_path, *args) -> Tuple[Path, Path]: def split_iterator(self): """Returns an iterable to iterate on all splits wanted.""" - if not self.split_list: + + if not self.config.split.split: return range(self.config.split.n_splits) else: - return self.split_list + return self.config.split.split def _check_item(self, item): if item not in self.allowed_splits_list: raise IndexError( f"Split index {item} out of allowed splits {self.allowed_splits_list}." ) + + def check_split_list(self, maps_path, overwrite): + existing_splits = [] + for split in self.split_iterator(): + split_path = maps_path / f"split-{split}" + if split_path.is_dir(): + if overwrite: + if cluster.master: + shutil.rmtree(split_path) + else: + existing_splits.append(split) + + if len(existing_splits) > 0: + raise MAPSError( + f"Splits {existing_splits} already exist. Please " + f"specify a list of splits not intersecting the previous list, " + f"or use overwrite to erase previously trained splits." + ) diff --git a/clinicadl/maps_manager/tmp_config.py b/clinicadl/tmp_config.py similarity index 97% rename from clinicadl/maps_manager/tmp_config.py rename to clinicadl/tmp_config.py index a31af7edb..620db133e 100644 --- a/clinicadl/maps_manager/tmp_config.py +++ b/clinicadl/tmp_config.py @@ -58,6 +58,7 @@ class TmpConfig(BaseModel): arguments needed : caps_directory, maps_path, loss """ + # ??? output_size: Optional[int] = None n_classes: Optional[int] = None network_task: Optional[str] = None @@ -70,18 +71,21 @@ class TmpConfig(BaseModel): std_amp: Optional[bool] = None preprocessing_dict: Optional[dict] = None + # CALLBACKS emissions_calculator: bool = False track_exp: Optional[ExperimentTracking] = None + # COMPUTATIONAL amp: bool = False fully_sharded_data_parallel: bool = False gpu: bool = True + # SPLIT n_splits: NonNegativeInt = 0 split: Optional[Tuple[NonNegativeInt, ...]] = None tsv_path: Optional[Path] = None # not needed in predict ? - # DataConfig + # DATA caps_directory: Path baseline: bool = False diagnoses: Tuple[str, ...] = ("AD", "CN") @@ -94,55 +98,68 @@ class TmpConfig(BaseModel): data_tsv: Optional[Path] = None n_subjects: int = 300 + # DATALOADER batch_size: PositiveInt = 8 n_proc: PositiveInt = 2 sampler: Sampler = Sampler.RANDOM + # EARLY STOPPING patience: NonNegativeInt = 0 tolerance: NonNegativeFloat = 0.0 + patience_epochs: NonNegativeInt = 0 + # LEARNING RATE adaptive_learning_rate: bool = False + # MAPS MANAGER maps_path: Path data_group: Optional[str] = None overwrite: bool = False save_nifti: bool = False + # NETWORK architecture: str = "default" dropout: NonNegativeFloat = 0.0 loss: str multi_network: bool = False + # OPTIMIZATION accumulation_steps: PositiveInt = 1 epochs: PositiveInt = 20 profiler: bool = False + # OPTIMIZER learning_rate: PositiveFloat = 1e-4 optimizer: Optimizer = Optimizer.ADAM weight_decay: NonNegativeFloat = 1e-4 + # REPRODUCIBILITY compensation: Compensation = Compensation.MEMORY deterministic: bool = False save_all_models: bool = False seed: int = 0 config_file: Optional[Path] = None + # SSDA caps_target: Path = Path("") preprocessing_json_target: Path = Path("") ssda_network: bool = False tsv_target_lab: Path = Path("") tsv_target_unlab: Path = Path("") + # TRANSFER LEARNING nb_unfrozen_layer: NonNegativeInt = 0 transfer_path: Optional[Path] = None transfer_selection_metric: str = "loss" + # TRANSFORMS data_augmentation: Tuple[Transform, ...] = () train_transformations: Optional[Tuple[Transform, ...]] = None normalize: bool = True size_reduction: bool = False size_reduction_factor: SizeReductionFactor = SizeReductionFactor.TWO + # VALIDATION evaluation_steps: NonNegativeInt = 0 selection_metrics: Tuple[str, ...] = () valid_longitudinal: bool = False @@ -282,7 +299,7 @@ def adapt_cross_val_with_maps_manager_info( ): # maps_manager is of type MapsManager but need to be in a MapsConfig type in the future # TEMPORARY if not self.split: - self.split = find_splits(maps_manager.maps_path, maps_manager.split_name) + self.split = find_splits(maps_manager.maps_path) logger.debug(f"List of splits {self.split}") def create_groupe_df(self): diff --git a/clinicadl/trainer/config/classification.py b/clinicadl/trainer/config/classification.py index 5e71d032e..f09021559 100644 --- a/clinicadl/trainer/config/classification.py +++ b/clinicadl/trainer/config/classification.py @@ -5,7 +5,7 @@ from clinicadl.caps_dataset.data_config import DataConfig as BaseDataConfig from clinicadl.network.config import NetworkConfig as BaseNetworkConfig -from clinicadl.splitter.validation import ValidationConfig as BaseValidationConfig +from clinicadl.predictor.validation import ValidationConfig as BaseValidationConfig from clinicadl.trainer.config.train import TrainConfig from clinicadl.utils.enum import ClassificationLoss, ClassificationMetric, Task diff --git a/clinicadl/trainer/config/reconstruction.py b/clinicadl/trainer/config/reconstruction.py index bf39886d4..d4b90ee2d 100644 --- a/clinicadl/trainer/config/reconstruction.py +++ b/clinicadl/trainer/config/reconstruction.py @@ -4,7 +4,7 @@ from pydantic import PositiveFloat, PositiveInt, computed_field, field_validator from clinicadl.network.config import NetworkConfig as BaseNetworkConfig -from clinicadl.splitter.validation import ValidationConfig as BaseValidationConfig +from clinicadl.predictor.validation import ValidationConfig as BaseValidationConfig from clinicadl.trainer.config.train import TrainConfig from clinicadl.utils.enum import ( Normalization, diff --git a/clinicadl/trainer/config/regression.py b/clinicadl/trainer/config/regression.py index 37e690f01..f094d5552 100644 --- a/clinicadl/trainer/config/regression.py +++ b/clinicadl/trainer/config/regression.py @@ -5,7 +5,7 @@ from clinicadl.caps_dataset.data_config import DataConfig as BaseDataConfig from clinicadl.network.config import NetworkConfig as BaseNetworkConfig -from clinicadl.splitter.validation import ValidationConfig as BaseValidationConfig +from clinicadl.predictor.validation import ValidationConfig as BaseValidationConfig from clinicadl.trainer.config.train import TrainConfig from clinicadl.utils.enum import RegressionLoss, RegressionMetric, Task diff --git a/clinicadl/trainer/config/train.py b/clinicadl/trainer/config/train.py index c44febe6b..30a92c92a 100644 --- a/clinicadl/trainer/config/train.py +++ b/clinicadl/trainer/config/train.py @@ -14,13 +14,12 @@ from clinicadl.caps_dataset.dataloader_config import DataLoaderConfig from clinicadl.config.config.lr_scheduler import LRschedulerConfig from clinicadl.config.config.reproducibility import ReproducibilityConfig -from clinicadl.config.config.ssda import SSDAConfig from clinicadl.maps_manager.config import MapsManagerConfig from clinicadl.network.config import NetworkConfig from clinicadl.optimizer.optimization import OptimizationConfig from clinicadl.optimizer.optimizer import OptimizerConfig +from clinicadl.predictor.validation import ValidationConfig from clinicadl.splitter.config import SplitConfig -from clinicadl.splitter.validation import ValidationConfig from clinicadl.trainer.transfer_learning import TransferLearningConfig from clinicadl.transforms.config import TransformsConfig from clinicadl.utils.computational.computational import ComputationalConfig @@ -50,7 +49,6 @@ class TrainConfig(BaseModel, ABC): optimizer: OptimizerConfig reproducibility: ReproducibilityConfig split: SplitConfig - ssda: SSDAConfig transfer_learning: TransferLearningConfig transforms: TransformsConfig validation: ValidationConfig @@ -77,7 +75,6 @@ def __init__(self, **kwargs): optimizer=kwargs, reproducibility=kwargs, split=kwargs, - ssda=kwargs, transfer_learning=kwargs, transforms=kwargs, validation=kwargs, @@ -97,7 +94,6 @@ def _update(self, config_dict: Dict[str, Any]) -> None: self.optimizer.__dict__.update(config_dict) self.reproducibility.__dict__.update(config_dict) self.split.__dict__.update(config_dict) - self.ssda.__dict__.update(config_dict) self.transfer_learning.__dict__.update(config_dict) self.transforms.__dict__.update(config_dict) self.validation.__dict__.update(config_dict) diff --git a/clinicadl/trainer/tasks_utils.py b/clinicadl/trainer/tasks_utils.py index dc28d0acd..e3946790c 100644 --- a/clinicadl/trainer/tasks_utils.py +++ b/clinicadl/trainer/tasks_utils.py @@ -603,6 +603,7 @@ def generate_sampler( network_task: Union[str, Task], dataset: CapsDataset, sampler_option: str = "random", + label_code: Optional[dict] = None, n_bins: int = 5, dp_degree: Optional[int] = None, rank: Optional[int] = None, @@ -622,7 +623,7 @@ def generate_sampler( def calculate_weights_classification(df): labels = df[dataset.config.data.label].unique() - codes = {dataset.config.data.label_code[label] for label in labels} + codes = {label_code[label] for label in labels} count = np.zeros(len(codes)) for idx in df.index: diff --git a/clinicadl/trainer/trainer.py b/clinicadl/trainer/trainer.py index 66ceb0dd1..775ecd2c6 100644 --- a/clinicadl/trainer/trainer.py +++ b/clinicadl/trainer/trainer.py @@ -1,6 +1,6 @@ from __future__ import annotations # noqa: I001 -import shutil + from contextlib import nullcontext from datetime import datetime from logging import getLogger @@ -33,7 +33,8 @@ patch_to_read_json, ) from clinicadl.trainer.tasks_utils import create_training_config -from clinicadl.validator.validator import Validator +from clinicadl.predictor.predictor import Predictor +from clinicadl.predictor.config import PredictConfig from clinicadl.splitter.splitter import Splitter from clinicadl.splitter.config import SplitterConfig from clinicadl.transforms.config import TransformsConfig @@ -67,7 +68,12 @@ def __init__( self.config = config self.maps_manager = self._init_maps_manager(config) - self.validator = Validator() + predict_config = PredictConfig(**config.get_dict()) + self.validator = Predictor(predict_config) + + # test + splitter_config = SplitterConfig(**self.config.get_dict()) + self.splitter = Splitter(splitter_config) self._check_args() def _init_maps_manager(self, config) -> MapsManager: @@ -86,7 +92,12 @@ def _init_maps_manager(self, config) -> MapsManager: ) # TODO : precise which parameters in config are useful @classmethod - def from_json(cls, config_file: str | Path, maps_path: str | Path) -> Trainer: + def from_json( + cls, + config_file: str | Path, + maps_path: str | Path, + split: Optional[list[int]] = None, + ) -> Trainer: """ Creates a Trainer from a json configuration file. @@ -113,6 +124,7 @@ def from_json(cls, config_file: str | Path, maps_path: str | Path) -> Trainer: raise FileNotFoundError(f"No file found at {str(config_file)}.") config_dict = patch_to_read_json(read_json(config_file)) # TODO : remove patch config_dict["maps_dir"] = maps_path + config_dict["split"] = split if split else () config_object = create_training_config(config_dict["network_task"])( **config_dict ) @@ -147,7 +159,7 @@ def from_maps(cls, maps_path: str | Path) -> Trainer: ) return cls.from_json(maps_path / "maps.json", maps_path) - def resume(self, splits: List[int]) -> None: + def resume(self) -> None: """ Resume a prematurely stopped training. @@ -157,13 +169,13 @@ def resume(self, splits: List[int]) -> None: The splits that must be resumed. """ stopped_splits = set(find_stopped_splits(self.config.maps_manager.maps_dir)) - finished_splits = set(find_finished_splits(self.maps_manager.maps_path)) - # TODO : check these two lines. Why do we need a split_manager? + finished_splits = set(find_finished_splits(self.config.maps_manager.maps_dir)) + # TODO : check these two lines. Why do we need a self.splitter? splitter_config = SplitterConfig(**self.config.get_dict()) - split_manager = Splitter(splitter_config, split_list=splits) + self.splitter = Splitter(splitter_config) - split_iterator = split_manager.split_iterator() + split_iterator = self.splitter.split_iterator() ### absent_splits = set(split_iterator) - stopped_splits - finished_splits @@ -184,9 +196,20 @@ def resume(self, splits: List[int]) -> None: def _check_args(self): self.config.reproducibility.seed = get_seed(self.config.reproducibility.seed) - # if (len(self.config.data.label_code) == 0): + # if len(self.config.data.label_code) == 0: # self.config.data.label_code = self.maps_manager.label_code # TODO: deal with label_code and replace self.maps_manager.label_code + from clinicadl.trainer.tasks_utils import generate_label_code + + if ( + "label_code" not in self.config.data.model_dump() + or len(self.config.data.label_code) == 0 + or self.config.data.label_code is None + ): # Allows to set custom label code in TOML + train_df = self.splitter[0]["train"] + self.config.data.label_code = generate_label_code( + self.config.network_task, train_df, self.config.data.label + ) def train( self, @@ -211,53 +234,51 @@ def train( If splits specified in input already exist and overwrite is False. """ - self.check_split_list(split_list=split_list, overwrite=overwrite) - - if self.config.ssda.ssda_network: - self._train_ssda(split_list, resume=False) - - else: - splitter_config = SplitterConfig(**self.config.get_dict()) - split_manager = Splitter(splitter_config, split_list=split_list) - - for split in split_manager.split_iterator(): - logger.info(f"Training split {split}") - seed_everything( - self.config.reproducibility.seed, - self.config.reproducibility.deterministic, - self.config.reproducibility.compensation, - ) - - split_df_dict = split_manager[split] + # splitter_config = SplitterConfig(**self.config.get_dict()) + # self.splitter = Splitter(splitter_config) + # self.splitter.check_split_list(self.config.maps_manager.maps_dir, self.config.maps_manager.overwrite) + self.splitter.check_split_list( + self.config.maps_manager.maps_dir, + overwrite, # overwrite change so careful it is not the maps manager overwrite parameters here + ) + for split in self.splitter.split_iterator(): + logger.info(f"Training split {split}") + seed_everything( + self.config.reproducibility.seed, + self.config.reproducibility.deterministic, + self.config.reproducibility.compensation, + ) - if self.config.model.multi_network: - resume, first_network = self.init_first_network(False, split) - for network in range(first_network, self.maps_manager.num_networks): - self._train_single( - split, split_df_dict, network=network, resume=resume - ) - else: - self._train_single(split, split_df_dict, resume=False) + split_df_dict = self.splitter[split] - def check_split_list(self, split_list, overwrite): - existing_splits = [] - splitter_config = SplitterConfig(**self.config.get_dict()) - split_manager = Splitter(splitter_config, split_list=split_list) - for split in split_manager.split_iterator(): - split_path = self.maps_manager.maps_path / f"split-{split}" - if split_path.is_dir(): - if overwrite: - if cluster.master: - shutil.rmtree(split_path) - else: - existing_splits.append(split) - - if len(existing_splits) > 0: - raise MAPSError( - f"Splits {existing_splits} already exist. Please " - f"specify a list of splits not intersecting the previous list, " - f"or use overwrite to erase previously trained splits." - ) + if self.config.model.multi_network: + resume, first_network = self.init_first_network(False, split) + for network in range(first_network, self.maps_manager.num_networks): + self._train_single( + split, split_df_dict, network=network, resume=resume + ) + else: + self._train_single(split, split_df_dict, resume=False) + + # def check_split_list(self, split_list, overwrite): + # existing_splits = [] + # splitter_config = SplitterConfig(**self.config.get_dict()) + # self.splitter = Splitter(splitter_config) + # for split in self.splitter.split_iterator(): + # split_path = self.maps_manager.maps_path / f"split-{split}" + # if split_path.is_dir(): + # if overwrite: + # if cluster.master: + # shutil.rmtree(split_path) + # else: + # existing_splits.append(split) + + # if len(existing_splits) > 0: + # raise MAPSError( + # f"Splits {existing_splits} already exist. Please " + # f"specify a list of splits not intersecting the previous list, " + # f"or use overwrite to erase previously trained splits." + # ) def _resume( self, @@ -279,8 +300,8 @@ def _resume( """ missing_splits = [] splitter_config = SplitterConfig(**self.config.get_dict()) - split_manager = Splitter(splitter_config, split_list=split_list) - for split in split_manager.split_iterator(): + self.splitter = Splitter(splitter_config) + for split in self.splitter.split_iterator(): if not (self.maps_manager.maps_path / f"split-{split}" / "tmp").is_dir(): missing_splits.append(split) @@ -290,26 +311,23 @@ def _resume( f"Please try train command on these splits and resume only others." ) - if self.config.ssda.ssda_network: - self._train_ssda(split_list, resume=True) - else: - for split in split_manager.split_iterator(): - logger.info(f"Training split {split}") - seed_everything( - self.config.reproducibility.seed, - self.config.reproducibility.deterministic, - self.config.reproducibility.compensation, - ) + for split in self.splitter.split_iterator(): + logger.info(f"Training split {split}") + seed_everything( + self.config.reproducibility.seed, + self.config.reproducibility.deterministic, + self.config.reproducibility.compensation, + ) - split_df_dict = split_manager[split] - if self.config.model.multi_network: - resume, first_network = self.init_first_network(True, split) - for network in range(first_network, self.maps_manager.num_networks): - self._train_single( - split, split_df_dict, network=network, resume=resume - ) - else: - self._train_single(split, split_df_dict, resume=True) + split_df_dict = self.splitter[split] + if self.config.model.multi_network: + resume, first_network = self.init_first_network(True, split) + for network in range(first_network, self.maps_manager.num_networks): + self._train_single( + split, split_df_dict, network=network, resume=resume + ) + else: + self._train_single(split, split_df_dict, resume=True) def init_first_network(self, resume: bool, split: int): first_network = 0 @@ -347,7 +365,7 @@ def get_dataloader( transforms_config=self.config.transforms, multi_cohort=self.config.data.multi_cohort, label=self.config.data.label, - label_code=self.maps_manager.label_code, + label_code=self.config.data.label_code, cnn_index=cnn_index, ) if homemade_sampler: @@ -355,6 +373,7 @@ def get_dataloader( network_task=self.maps_manager.network_task, dataset=dataset, sampler_option=sampler_option, + label_code=self.config.data.label_code, dp_degree=dp_degree, rank=rank, ) @@ -452,218 +471,6 @@ def _train_single( self.maps_manager._erase_tmp(split) - def _train_ssda( - self, - split_list: Optional[List[int]] = None, - resume: bool = False, - ) -> None: - """ - Trains a single CNN for a source and target domain using semi-supervised domain adaptation. - - Parameters - ---------- - split_list : Optional[List[int]] (optional, default=None) - List of splits on which the training task is performed. - If None, performs training on all splits of the cross-validation. - resume : bool (optional, default=False) - If True, the job is resumed from checkpoint. - """ - - splitter_config = SplitterConfig(**self.config.get_dict()) - - split_manager = Splitter(splitter_config, split_list=split_list) - split_manager_target_lab = Splitter(splitter_config, split_list=split_list) - - for split in split_manager.split_iterator(): - logger.info(f"Training split {split}") - seed_everything( - self.config.reproducibility.seed, - self.config.reproducibility.deterministic, - self.config.reproducibility.compensation, - ) - - split_df_dict = split_manager[split] - split_df_dict_target_lab = split_manager_target_lab[split] - - logger.debug("Loading source training data...") - data_train_source = return_dataset( - self.config.data.caps_directory, - split_df_dict["train"], - self.config.data.preprocessing_dict, - transforms_config=self.config.transforms, - multi_cohort=self.config.data.multi_cohort, - label=self.config.data.label, - label_code=self.maps_manager.label_code, - ) - - logger.debug("Loading target labelled training data...") - data_train_target_labeled = return_dataset( - Path(self.config.ssda.caps_target), # TO CHECK - split_df_dict_target_lab["train"], - self.config.ssda.preprocessing_dict_target, - transforms_config=self.config.transforms, - multi_cohort=False, # A checker - label=self.config.data.label, - label_code=self.maps_manager.label_code, - ) - from torch.utils.data import ConcatDataset - - combined_dataset = ConcatDataset( - [data_train_source, data_train_target_labeled] - ) - - logger.debug("Loading target unlabelled training data...") - data_target_unlabeled = return_dataset( - Path(self.config.ssda.caps_target), - pd.read_csv(self.config.ssda.tsv_target_unlab, sep="\t"), - self.config.ssda.preprocessing_dict_target, - transforms_config=self.config.transforms, - multi_cohort=False, # A checker - label=self.config.data.label, - label_code=self.maps_manager.label_code, - ) - - logger.debug("Loading validation source data...") - data_valid_source = return_dataset( - self.config.data.caps_directory, - split_df_dict["validation"], - self.config.data.preprocessing_dict, - transforms_config=self.config.transforms, - multi_cohort=self.config.data.multi_cohort, - label=self.config.data.label, - label_code=self.maps_manager.label_code, - ) - logger.debug("Loading validation target labelled data...") - data_valid_target_labeled = return_dataset( - Path(self.config.ssda.caps_target), - split_df_dict_target_lab["validation"], - self.config.ssda.preprocessing_dict_target, - transforms_config=self.config.transforms, - multi_cohort=False, - label=self.config.data.label, - label_code=self.maps_manager.label_code, - ) - train_source_sampler = generate_sampler( - self.maps_manager.network_task, - data_train_source, - self.config.dataloader.sampler, - ) - - logger.info( - f"Getting train and validation loader with batch size {self.config.dataloader.batch_size}" - ) - - ## Oversampling of the target dataset - from torch.utils.data import SubsetRandomSampler - - # Create index lists for target labeled dataset - labeled_indices = list(range(len(data_train_target_labeled))) - - # Oversample the indices for the target labelled dataset to match the size of the labeled source dataset - data_train_source_size = ( - len(data_train_source) // self.config.dataloader.batch_size - ) - labeled_oversampled_indices = labeled_indices * ( - data_train_source_size // len(labeled_indices) - ) - - # Append remaining indices to match the size of the largest dataset - labeled_oversampled_indices += labeled_indices[ - : data_train_source_size % len(labeled_indices) - ] - - # Create SubsetRandomSamplers using the oversampled indices - labeled_sampler = SubsetRandomSampler(labeled_oversampled_indices) - - train_source_loader = DataLoader( - data_train_source, - batch_size=self.config.dataloader.batch_size, - sampler=train_source_sampler, - # shuffle=True, # len(data_train_source) < len(data_train_target_labeled), - num_workers=self.config.dataloader.n_proc, - worker_init_fn=pl_worker_init_function, - drop_last=True, - ) - logger.info( - f"Train source loader size is {len(train_source_loader)*self.config.dataloader.batch_size}" - ) - train_target_loader = DataLoader( - data_train_target_labeled, - batch_size=1, # To limit the need of oversampling - # sampler=train_target_sampler, - sampler=labeled_sampler, - num_workers=self.config.dataloader.n_proc, - worker_init_fn=pl_worker_init_function, - # shuffle=True, # len(data_train_target_labeled) < len(data_train_source), - drop_last=True, - ) - logger.info( - f"Train target labeled loader size oversample is {len(train_target_loader)}" - ) - - data_train_target_labeled.df = data_train_target_labeled.df[ - ["participant_id", "session_id", "diagnosis", "cohort", "domain"] - ] - - train_target_unl_loader = DataLoader( - data_target_unlabeled, - batch_size=self.config.dataloader.batch_size, - num_workers=self.config.dataloader.n_proc, - # sampler=unlabeled_sampler, - worker_init_fn=pl_worker_init_function, - shuffle=True, - drop_last=True, - ) - - logger.info( - f"Train target unlabeled loader size is {len(train_target_unl_loader)*self.config.dataloader.batch_size}" - ) - - valid_loader_source = DataLoader( - data_valid_source, - batch_size=self.config.dataloader.batch_size, - shuffle=False, - num_workers=self.config.dataloader.n_proc, - ) - logger.info( - f"Validation loader source size is {len(valid_loader_source)*self.config.dataloader.batch_size}" - ) - - valid_loader_target = DataLoader( - data_valid_target_labeled, - batch_size=self.config.dataloader.batch_size, # To check - shuffle=False, - num_workers=self.config.dataloader.n_proc, - ) - logger.info( - f"Validation loader target size is {len(valid_loader_target)*self.config.dataloader.batch_size}" - ) - - self._train_ssdann( - train_source_loader, - train_target_loader, - train_target_unl_loader, - valid_loader_target, - valid_loader_source, - split, - resume=resume, - ) - - self.validator._ensemble_prediction( - self.maps_manager, - "train", - split, - self.config.validation.selection_metrics, - ) - self.validator._ensemble_prediction( - self.maps_manager, - "validation", - split, - self.config.validation.selection_metrics, - ) - - self.maps_manager._erase_tmp(split) - def _train( self, train_loader: DataLoader, @@ -985,412 +792,6 @@ def _train( self.callback_handler.on_train_end(parameters=self.maps_manager.parameters) - def _train_ssdann( - self, - train_source_loader: DataLoader, - train_target_loader: DataLoader, - train_target_unl_loader: DataLoader, - valid_loader: DataLoader, - valid_source_loader: DataLoader, - split: int, - network: Optional[Any] = None, - resume: bool = False, - evaluate_source: bool = True, # TO MODIFY - ): - """ - _summary_ - - Parameters - ---------- - train_source_loader : torch.utils.data.DataLoader - _description_ - train_target_loader : torch.utils.data.DataLoader - _description_ - train_target_unl_loader : torch.utils.data.DataLoader - _description_ - valid_loader : torch.utils.data.DataLoader - _description_ - valid_source_loader : torch.utils.data.DataLoader - _description_ - split : int - _description_ - network : Optional[Any] (optional, default=None) - _description_ - resume : bool (optional, default=False) - _description_ - evaluate_source : bool (optional, default=True) - _description_ - - Raises - ------ - Exception - _description_ - """ - model, beginning_epoch = self.maps_manager._init_model( - split=split, - resume=resume, - transfer_path=self.config.transfer_learning.transfer_path, - transfer_selection=self.config.transfer_learning.transfer_selection_metric, - ) - - criterion = get_criterion( - self.maps_manager.network_task, self.config.model.loss - ) - logger.debug(f"Criterion for {self.config.network_task} is {criterion}") - optimizer = self._init_optimizer(model, split=split, resume=resume) - - logger.debug(f"Optimizer used for training is optimizer") - - model.train() - train_source_loader.dataset.train() - train_target_loader.dataset.train() - train_target_unl_loader.dataset.train() - - early_stopping = EarlyStopping( - "min", - min_delta=self.config.early_stopping.tolerance, - patience=self.config.early_stopping.patience, - ) - - metrics_valid_target = {"loss": None} - metrics_valid_source = {"loss": None} - - log_writer = LogWriter( - self.maps_manager.maps_path, - evaluation_metrics(self.maps_manager.network_task) + ["loss"], - split, - resume=resume, - beginning_epoch=beginning_epoch, - network=network, - ) - epoch = log_writer.beginning_epoch - - retain_best = RetainBest( - selection_metrics=list(self.config.validation.selection_metrics) - ) - import numpy as np - - while epoch < self.config.optimization.epochs and not early_stopping.step( - metrics_valid_target["loss"] - ): - logger.info(f"Beginning epoch {epoch}.") - - model.zero_grad() - evaluation_flag, step_flag = True, True - - for i, (data_source, data_target, data_target_unl) in enumerate( - zip(train_source_loader, train_target_loader, train_target_unl_loader) - ): - p = ( - float(epoch * len(train_target_loader)) - / 10 - / len(train_target_loader) - ) - alpha = 2.0 / (1.0 + np.exp(-10 * p)) - 1 - # alpha = 0 - _, _, loss_dict = model.compute_outputs_and_loss( - data_source, data_target, data_target_unl, criterion, alpha - ) # TO CHECK - logger.debug(f"Train loss dictionary {loss_dict}") - loss = loss_dict["loss"] - loss.backward() - if (i + 1) % self.config.optimization.accumulation_steps == 0: - step_flag = False - optimizer.step() - optimizer.zero_grad() - - del loss - - # Evaluate the model only when no gradients are accumulated - if ( - self.config.validation.evaluation_steps != 0 - and (i + 1) % self.config.validation.evaluation_steps == 0 - ): - evaluation_flag = False - - # Evaluate on target data - logger.info("Evaluation on target data") - ( - _, - metrics_train_target, - ) = test_da( - mode=self.maps_manager.mode, - n_classes=self.maps_manager.n_classes, - metrics_module=self.maps_manager.metrics_module, - network_task=self.maps_manager.network_task, - model=model, - dataloader=train_target_loader, - criterion=criterion, - alpha=alpha, - target=True, - ) # TO CHECK - - ( - _, - metrics_valid_target, - ) = test_da( - mode=self.maps_manager.mode, - n_classes=self.maps_manager.n_classes, - metrics_module=self.maps_manager.metrics_module, - network_task=self.maps_manager.network_task, - model=model, - dataloader=valid_loader, - criterion=criterion, - alpha=alpha, - target=True, - ) - - model.train() - train_target_loader.dataset.train() - - log_writer.step( - epoch, - i, - metrics_train_target, - metrics_valid_target, - len(train_target_loader), - "training_target.tsv", - ) - logger.info( - f"{self.config.data.mode} level training loss for target data is {metrics_train_target['loss']} " - f"at the end of iteration {i}" - ) - logger.info( - f"{self.config.data.mode} level validation loss for target data is {metrics_valid_target['loss']} " - f"at the end of iteration {i}" - ) - - # Evaluate on source data - logger.info("Evaluation on source data") - ( - _, - metrics_train_source, - ) = test_da( - mode=self.maps_manager.mode, - n_classes=self.maps_manager.n_classes, - metrics_module=self.maps_manager.metrics_module, - network_task=self.maps_manager.network_task, - model=model, - dataloader=train_source_loader, - criterion=criterion, - alpha=alpha, - ) - ( - _, - metrics_valid_source, - ) = test_da( - mode=self.maps_manager.mode, - n_classes=self.maps_manager.n_classes, - metrics_module=self.maps_manager.metrics_module, - network_task=self.maps_manager.network_task, - model=model, - dataloader=valid_source_loader, - criterion=criterion, - alpha=alpha, - ) - - model.train() - train_source_loader.dataset.train() - - log_writer.step( - epoch, - i, - metrics_train_source, - metrics_valid_source, - len(train_source_loader), - ) - logger.info( - f"{self.config.data.mode} level training loss for source data is {metrics_train_source['loss']} " - f"at the end of iteration {i}" - ) - logger.info( - f"{self.config.data.mode} level validation loss for source data is {metrics_valid_source['loss']} " - f"at the end of iteration {i}" - ) - - # If no step has been performed, raise Exception - if step_flag: - raise Exception( - "The model has not been updated once in the epoch. The accumulation step may be too large." - ) - - # If no evaluation has been performed, warn the user - elif evaluation_flag and self.config.validation.evaluation_steps != 0: - logger.warning( - f"Your evaluation steps {self.config.validation.evaluation_steps} are too big " - f"compared to the size of the dataset. " - f"The model is evaluated only once at the end epochs." - ) - - # Update weights one last time if gradients were computed without update - if (i + 1) % self.config.optimization.accumulation_steps != 0: - optimizer.step() - optimizer.zero_grad() - # Always test the results and save them once at the end of the epoch - model.zero_grad() - logger.debug(f"Last checkpoint at the end of the epoch {epoch}") - - if evaluate_source: - logger.info( - f"Evaluate source data at the end of the epoch {epoch} with alpha: {alpha}." - ) - _, metrics_train_source = test_da( - mode=self.maps_manager.mode, - n_classes=self.maps_manager.n_classes, - metrics_module=self.maps_manager.metrics_module, - network_task=self.maps_manager.network_task, - model=model, - dataloader=train_source_loader, - criterion=criterion, - alpha=alpha, - target=True, - report_ci=False, - ) - _, metrics_valid_source = test_da( - mode=self.maps_manager.mode, - n_classes=self.maps_manager.n_classes, - metrics_module=self.maps_manager.metrics_module, - network_task=self.maps_manager.network_task, - model=model, - dataloader=valid_source_loader, - criterion=criterion, - alpha=alpha, - target=True, - report_ci=False, - ) - - log_writer.step( - epoch, - i, - metrics_train_source, - metrics_valid_source, - len(train_source_loader), - ) - - logger.info( - f"{self.config.data.mode} level training loss for source data is {metrics_train_source['loss']} " - f"at the end of iteration {i}" - ) - logger.info( - f"{self.config.data.mode} level validation loss for source data is {metrics_valid_source['loss']} " - f"at the end of iteration {i}" - ) - - _, metrics_train_target = test_da( - mode=self.maps_manager.mode, - n_classes=self.maps_manager.n_classes, - metrics_module=self.maps_manager.metrics_module, - network_task=self.maps_manager.network_task, - model=model, - dataloader=train_target_loader, - criterion=criterion, - alpha=alpha, - target=True, - ) - _, metrics_valid_target = test_da( - mode=self.maps_manager.mode, - n_classes=self.maps_manager.n_classes, - metrics_module=self.maps_manager.metrics_module, - network_task=self.maps_manager.network_task, - model=model, - dataloader=valid_loader, - criterion=criterion, - alpha=alpha, - target=True, - ) - - model.train() - train_source_loader.dataset.train() - train_target_loader.dataset.train() - - log_writer.step( - epoch, - i, - metrics_train_target, - metrics_valid_target, - len(train_target_loader), - "training_target.tsv", - ) - - logger.info( - f"{self.config.data.mode} level training loss for target data is {metrics_train_target['loss']} " - f"at the end of iteration {i}" - ) - logger.info( - f"{self.config.data.mode} level validation loss for target data is {metrics_valid_target['loss']} " - f"at the end of iteration {i}" - ) - - # Save checkpoints and best models - best_dict = retain_best.step(metrics_valid_target) - self.maps_manager._write_weights( - { - "model": model.state_dict(), - "epoch": epoch, - "name": self.config.model.architecture, - }, - best_dict, - split, - network=network, - save_all_models=False, - ) - self.maps_manager._write_weights( - { - "optimizer": optimizer.state_dict(), # TO MODIFY - "epoch": epoch, - "name": self.config.optimizer, - }, - None, - split, - filename="optimizer.pth.tar", - save_all_models=False, - ) - - epoch += 1 - - self.validator._test_loader_ssda( - self.maps_manager, - train_target_loader, - criterion, - data_group="train", - split=split, - selection_metrics=self.config.validation.selection_metrics, - network=network, - target=True, - alpha=0, - ) - self.validator._test_loader_ssda( - self.maps_manager, - valid_loader, - criterion, - data_group="validation", - split=split, - selection_metrics=self.config.validation.selection_metrics, - network=network, - target=True, - alpha=0, - ) - - if save_outputs(self.maps_manager.network_task): - self.validator._compute_output_tensors( - self.maps_manager, - train_target_loader.dataset, - "train", - split, - self.config.validation.selection_metrics, - nb_images=1, - network=network, - ) - self.validator._compute_output_tensors( - self.maps_manager, - train_target_loader.dataset, - "validation", - split, - self.config.validation.selection_metrics, - nb_images=1, - network=network, - ) - def _init_callbacks(self) -> None: """ Initializes training callbacks. diff --git a/clinicadl/utils/cli_param/option.py b/clinicadl/utils/cli_param/option.py index 6ff86cda2..75438ceda 100644 --- a/clinicadl/utils/cli_param/option.py +++ b/clinicadl/utils/cli_param/option.py @@ -58,13 +58,6 @@ multiple=True, default=None, ) -ssda_network = click.option( - "--ssda_network", - type=bool, - default=False, - show_default=True, - help="ssda training.", -) valid_longitudinal = click.option( "--valid_longitudinal/--valid_baseline", type=bool, diff --git a/clinicadl/utils/iotools/train_utils.py b/clinicadl/utils/iotools/train_utils.py index e4347de3b..71595811d 100644 --- a/clinicadl/utils/iotools/train_utils.py +++ b/clinicadl/utils/iotools/train_utils.py @@ -198,3 +198,65 @@ def merge_cli_and_config_file_options(task: Task, **kwargs) -> Dict[str, Any]: pass ### return options + + +def merge_cli_and_maps_json_options(maps_json: Path, **kwargs) -> Dict[str, Any]: + """ + Merges options from the CLI (passed by the user) and from the config file + (if it exists). + + Priority is given to options passed by the user via the CLI. If it is not + provided, it will look for the option in the possible config file. + If an option is not passed by the user and not found in the config file, it will + not be in the output. + + Parameters + ---------- + task : Task + The task that is performed (e.g. classification). + + Returns + ------- + Dict[str, Any] + A dictionary with training options. + """ + from clinicadl.caps_dataset.caps_dataset_utils import read_json + + options = read_json(maps_json) + for arg in kwargs: + if ( + click.get_current_context().get_parameter_source(arg) + == ParameterSource.COMMANDLINE + ): + options[arg] = kwargs[arg] + + return options + + +def merge_options_and_maps_json_options(maps_json: Path, **kwargs) -> Dict[str, Any]: + """ + Merges options from the CLI (passed by the user) and from the config file + (if it exists). + + Priority is given to options passed by the user via the CLI. If it is not + provided, it will look for the option in the possible config file. + If an option is not passed by the user and not found in the config file, it will + not be in the output. + + Parameters + ---------- + task : Task + The task that is performed (e.g. classification). + + Returns + ------- + Dict[str, Any] + A dictionary with training options. + """ + from clinicadl.caps_dataset.caps_dataset_utils import read_json + + options = read_json(maps_json) + for arg in kwargs: + options[arg] = kwargs[arg] + + return options diff --git a/clinicadl/utils/iotools/trainer_utils.py b/clinicadl/utils/iotools/trainer_utils.py index b77229ea6..ac1b6a3bf 100644 --- a/clinicadl/utils/iotools/trainer_utils.py +++ b/clinicadl/utils/iotools/trainer_utils.py @@ -19,8 +19,7 @@ def create_parameters_dict(config): parameters["transfer_path"] = False if parameters["data_augmentation"] == (): parameters["data_augmentation"] = False - parameters["preprocessing_dict_target"] = parameters["preprocessing_json_target"] - del parameters["preprocessing_json_target"] + del parameters["preprocessing_json"] # if "tsv_path" in parameters: # parameters["tsv_path"] = parameters["tsv_path"] diff --git a/clinicadl/validator/config.py b/clinicadl/validator/config.py deleted file mode 100644 index 2f8c8a30a..000000000 --- a/clinicadl/validator/config.py +++ /dev/null @@ -1,47 +0,0 @@ -from abc import ABC, abstractmethod -from pathlib import Path -from typing import Optional, Union - -from pydantic import ( - BaseModel, - ConfigDict, - computed_field, - field_validator, -) - -from clinicadl.utils.factories import DefaultFromLibrary - - -class ValidatorConfig(BaseModel): - """Base config class to configure the validator.""" - - maps_path: Path - mode: str - network_task: str - num_networks: Optional[int] = None - fsdp: Optional[bool] = None - amp: Optional[bool] = None - metrics_module: Optional = None - n_classes: Optional[int] = None - nb_unfrozen_layers: Optional[int] = None - std_amp: Optional[bool] = None - - # pydantic config - model_config = ConfigDict( - validate_assignment=True, - use_enum_values=True, - validate_default=True, - ) - - @computed_field - @property - @abstractmethod - def metric(self) -> str: - """The name of the metric.""" - - @field_validator("get_not_nans", mode="after") - @classmethod - def validator_get_not_nans(cls, v): - assert not v, "get_not_nans not supported in ClinicaDL. Please set to False." - - return v diff --git a/clinicadl/validator/validator.py b/clinicadl/validator/validator.py deleted file mode 100644 index c8f5e9451..000000000 --- a/clinicadl/validator/validator.py +++ /dev/null @@ -1,496 +0,0 @@ -from logging import getLogger -from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Union - -import numpy as np -import pandas as pd -import torch -import torch.distributed as dist -from torch.amp import autocast -from torch.nn.modules.loss import _Loss -from torch.utils.data import DataLoader - -from clinicadl.maps_manager.maps_manager import MapsManager -from clinicadl.metrics.metric_module import MetricModule -from clinicadl.metrics.utils import find_selection_metrics -from clinicadl.network.network import Network -from clinicadl.trainer.tasks_utils import columns, compute_metrics, generate_test_row -from clinicadl.utils import cluster -from clinicadl.utils.computational.ddp import DDP, init_ddp -from clinicadl.utils.enum import ( - ClassificationLoss, - ClassificationMetric, - ReconstructionLoss, - ReconstructionMetric, - RegressionLoss, - RegressionMetric, - Task, -) -from clinicadl.utils.exceptions import ( - ClinicaDLArgumentError, - ClinicaDLConfigurationError, - MAPSError, -) - -logger = getLogger("clinicadl.maps_manager") -level_list: List[str] = ["warning", "info", "debug"] - - -# TODO save weights on CPU for better compatibility - - -class Validator: - def test( - self, - mode: str, - metrics_module: MetricModule, - n_classes: int, - network_task, - model: Network, - dataloader: DataLoader, - criterion: _Loss, - use_labels: bool = True, - amp: bool = False, - report_ci=False, - ) -> Tuple[pd.DataFrame, Dict[str, float]]: - """ - Computes the predictions and evaluation metrics. - - Parameters - ---------- - model: Network - The model trained. - dataloader: DataLoader - Wrapper of a CapsDataset. - criterion: _Loss - Function to calculate the loss. - use_labels: bool - If True the true_label will be written in output DataFrame - and metrics dict will be created. - amp: bool - If True, enables Pytorch's automatic mixed precision. - - Returns - ------- - the results and metrics on the image level. - """ - model.eval() - dataloader.dataset.eval() - - results_df = pd.DataFrame(columns=columns(network_task, mode, n_classes)) - total_loss = {} - with torch.no_grad(): - for i, data in enumerate(dataloader): - # initialize the loss list to save the loss components - with autocast("cuda", enabled=amp): - outputs, loss_dict = model(data, criterion, use_labels=use_labels) - - if i == 0: - for loss_component in loss_dict.keys(): - total_loss[loss_component] = 0 - for loss_component in total_loss.keys(): - total_loss[loss_component] += loss_dict[loss_component].float() - - # Generate detailed DataFrame - for idx in range(len(data["participant_id"])): - row = generate_test_row( - network_task, - mode, - metrics_module, - n_classes, - idx, - data, - outputs.float(), - ) - row_df = pd.DataFrame( - row, columns=columns(network_task, mode, n_classes) - ) - results_df = pd.concat([results_df, row_df]) - - del outputs, loss_dict - dataframes = [None] * dist.get_world_size() - dist.gather_object( - results_df, dataframes if dist.get_rank() == 0 else None, dst=0 - ) - if dist.get_rank() == 0: - results_df = pd.concat(dataframes) - del dataframes - results_df.reset_index(inplace=True, drop=True) - - if not use_labels: - metrics_dict = None - else: - metrics_dict = compute_metrics( - network_task, results_df, metrics_module, report_ci=report_ci - ) - for loss_component in total_loss.keys(): - dist.reduce(total_loss[loss_component], dst=0) - loss_value = total_loss[loss_component].item() / cluster.world_size - - if report_ci: - metrics_dict["Metric_names"].append(loss_component) - metrics_dict["Metric_values"].append(loss_value) - metrics_dict["Lower_CI"].append("N/A") - metrics_dict["Upper_CI"].append("N/A") - metrics_dict["SE"].append("N/A") - - else: - metrics_dict[loss_component] = loss_value - - torch.cuda.empty_cache() - - return results_df, metrics_dict - - def test_da( - self, - mode: str, - metrics_module: MetricModule, - n_classes: int, - network_task: Union[str, Task], - model: Network, - dataloader: DataLoader, - criterion: _Loss, - alpha: float = 0, - use_labels: bool = True, - target: bool = True, - report_ci=False, - ) -> Tuple[pd.DataFrame, Dict[str, float]]: - """ - Computes the predictions and evaluation metrics. - - Args: - model: the model trained. - dataloader: wrapper of a CapsDataset. - criterion: function to calculate the loss. - use_labels: If True the true_label will be written in output DataFrame - and metrics dict will be created. - Returns: - the results and metrics on the image level. - """ - model.eval() - dataloader.dataset.eval() - results_df = pd.DataFrame(columns=columns(network_task, mode, n_classes)) - total_loss = 0 - with torch.no_grad(): - for i, data in enumerate(dataloader): - outputs, loss_dict = model.compute_outputs_and_loss_test( - data, criterion, alpha, target - ) - total_loss += loss_dict["loss"].item() - - # Generate detailed DataFrame - for idx in range(len(data["participant_id"])): - row = generate_test_row( - network_task, - mode, - metrics_module, - n_classes, - idx, - data, - outputs, - ) - row_df = pd.DataFrame( - row, columns=columns(network_task, mode, n_classes) - ) - results_df = pd.concat([results_df, row_df]) - - del outputs, loss_dict - results_df.reset_index(inplace=True, drop=True) - - if not use_labels: - metrics_dict = None - else: - metrics_dict = compute_metrics( - network_task, results_df, metrics_module, report_ci=report_ci - ) - if report_ci: - metrics_dict["Metric_names"].append("loss") - metrics_dict["Metric_values"].append(total_loss) - metrics_dict["Lower_CI"].append("N/A") - metrics_dict["Upper_CI"].append("N/A") - metrics_dict["SE"].append("N/A") - - else: - metrics_dict["loss"] = total_loss - - torch.cuda.empty_cache() - - return results_df, metrics_dict - - def _test_loader( - self, - maps_manager: MapsManager, - dataloader, - criterion, - data_group: str, - split: int, - selection_metrics, - use_labels=True, - gpu=None, - amp=False, - network=None, - report_ci=True, - ): - """ - Launches the testing task on a dataset wrapped by a DataLoader and writes prediction TSV files. - - Args: - dataloader (torch.utils.data.DataLoader): DataLoader wrapping the test CapsDataset. - criterion (torch.nn.modules.loss._Loss): optimization criterion used during training. - data_group (str): name of the data group used for the testing task. - split (int): Index of the split used to train the model tested. - selection_metrics (list[str]): List of metrics used to select the best models which are tested. - use_labels (bool): If True, the labels must exist in test meta-data and metrics are computed. - gpu (bool): If given, a new value for the device of the model will be computed. - amp (bool): If enabled, uses Automatic Mixed Precision (requires GPU usage). - network (int): Index of the network tested (only used in multi-network setting). - """ - for selection_metric in selection_metrics: - if cluster.master: - log_dir = ( - maps_manager.maps_path - / f"split-{split}" - / f"best-{selection_metric}" - / data_group - ) - maps_manager.write_description_log( - log_dir, - data_group, - dataloader.dataset.config.data.caps_dict, - dataloader.dataset.config.data.data_df, - ) - - # load the best trained model during the training - model, _ = maps_manager._init_model( - transfer_path=maps_manager.maps_path, - split=split, - transfer_selection=selection_metric, - gpu=gpu, - network=network, - ) - model = DDP( - model, - fsdp=maps_manager.fully_sharded_data_parallel, - amp=maps_manager.amp, - ) - - prediction_df, metrics = self.test( - mode=maps_manager.mode, - metrics_module=maps_manager.metrics_module, - n_classes=maps_manager.n_classes, - network_task=maps_manager.network_task, - model=model, - dataloader=dataloader, - criterion=criterion, - use_labels=use_labels, - amp=amp, - report_ci=report_ci, - ) - if use_labels: - if network is not None: - metrics[f"{maps_manager.mode}_id"] = network - - loss_to_log = ( - metrics["Metric_values"][-1] if report_ci else metrics["loss"] - ) - - logger.info( - f"{maps_manager.mode} level {data_group} loss is {loss_to_log} for model selected on {selection_metric}" - ) - - if cluster.master: - # Replace here - maps_manager._mode_level_to_tsv( - prediction_df, - metrics, - split, - selection_metric, - data_group=data_group, - ) - - def _test_loader_ssda( - self, - maps_manager: MapsManager, - dataloader, - criterion, - alpha, - data_group, - split, - selection_metrics, - use_labels=True, - gpu=None, - network=None, - target=False, - report_ci=True, - ): - """ - Launches the testing task on a dataset wrapped by a DataLoader and writes prediction TSV files. - - Args: - dataloader (torch.utils.data.DataLoader): DataLoader wrapping the test CapsDataset. - criterion (torch.nn.modules.loss._Loss): optimization criterion used during training. - data_group (str): name of the data group used for the testing task. - split (int): Index of the split used to train the model tested. - selection_metrics (list[str]): List of metrics used to select the best models which are tested. - use_labels (bool): If True, the labels must exist in test meta-data and metrics are computed. - gpu (bool): If given, a new value for the device of the model will be computed. - network (int): Index of the network tested (only used in multi-network setting). - """ - for selection_metric in selection_metrics: - log_dir = ( - maps_manager.maps_path - / f"split-{split}" - / f"best-{selection_metric}" - / data_group - ) - maps_manager.write_description_log( - log_dir, - data_group, - dataloader.dataset.caps_dict, - dataloader.dataset.df, - ) - - # load the best trained model during the training - model, _ = maps_manager._init_model( - transfer_path=maps_manager.maps_path, - split=split, - transfer_selection=selection_metric, - gpu=gpu, - network=network, - ) - prediction_df, metrics = self.test_da( - network_task=maps_manager.network_task, - model=model, - dataloader=dataloader, - criterion=criterion, - target=target, - report_ci=report_ci, - mode=maps_manager.mode, - metrics_module=maps_manager.metrics_module, - n_classes=maps_manager.n_classes, - ) - if use_labels: - if network is not None: - metrics[f"{maps_manager.mode}_id"] = network - - if report_ci: - loss_to_log = metrics["Metric_values"][-1] - else: - loss_to_log = metrics["loss"] - - logger.info( - f"{maps_manager.mode} level {data_group} loss is {loss_to_log} for model selected on {selection_metric}" - ) - - # Replace here - maps_manager._mode_level_to_tsv( - prediction_df, metrics, split, selection_metric, data_group=data_group - ) - - @torch.no_grad() - def _compute_output_tensors( - self, - maps_manager: MapsManager, - dataset, - data_group, - split, - selection_metrics, - nb_images=None, - gpu=None, - network=None, - ): - """ - Compute the output tensors and saves them in the MAPS. - - Args: - dataset (clinicadl.caps_dataset.data.CapsDataset): wrapper of the data set. - data_group (str): name of the data group used for the task. - split (int): split number. - selection_metrics (list[str]): metrics used for model selection. - nb_images (int): number of full images to write. Default computes the outputs of the whole data set. - gpu (bool): If given, a new value for the device of the model will be computed. - network (int): Index of the network tested (only used in multi-network setting). - """ - for selection_metric in selection_metrics: - # load the best trained model during the training - model, _ = maps_manager._init_model( - transfer_path=maps_manager.maps_path, - split=split, - transfer_selection=selection_metric, - gpu=gpu, - network=network, - nb_unfrozen_layer=maps_manager.nb_unfrozen_layer, - ) - model = DDP( - model, - fsdp=maps_manager.fully_sharded_data_parallel, - amp=maps_manager.amp, - ) - model.eval() - - tensor_path = ( - maps_manager.maps_path - / f"split-{split}" - / f"best-{selection_metric}" - / data_group - / "tensors" - ) - if cluster.master: - tensor_path.mkdir(parents=True, exist_ok=True) - dist.barrier() - - if nb_images is None: # Compute outputs for the whole data set - nb_modes = len(dataset) - else: - nb_modes = nb_images * dataset.elem_per_image - - for i in [ - *range(cluster.rank, nb_modes, cluster.world_size), - *range(int(nb_modes % cluster.world_size <= cluster.rank)), - ]: - data = dataset[i] - image = data["image"] - x = image.unsqueeze(0).to(model.device) - with autocast("cuda", enabled=maps_manager.std_amp): - output = model(x) - output = output.squeeze(0).cpu().float() - participant_id = data["participant_id"] - session_id = data["session_id"] - mode_id = data[f"{maps_manager.mode}_id"] - input_filename = f"{participant_id}_{session_id}_{maps_manager.mode}-{mode_id}_input.pt" - output_filename = f"{participant_id}_{session_id}_{maps_manager.mode}-{mode_id}_output.pt" - torch.save(image, tensor_path / input_filename) - torch.save(output, tensor_path / output_filename) - logger.debug(f"File saved at {[input_filename, output_filename]}") - - def _ensemble_prediction( - self, - maps_manager: MapsManager, - data_group, - split, - selection_metrics, - use_labels=True, - skip_leak_check=False, - ): - """Computes the results on the image-level.""" - - if not selection_metrics: - selection_metrics = find_selection_metrics(maps_manager.maps_path, split) - - for selection_metric in selection_metrics: - ##################### - # Soft voting - if maps_manager.num_networks > 1 and not skip_leak_check: - maps_manager._ensemble_to_tsv( - split, - selection=selection_metric, - data_group=data_group, - use_labels=use_labels, - ) - elif maps_manager.mode != "image" and not skip_leak_check: - maps_manager._mode_to_image_tsv( - split, - selection=selection_metric, - data_group=data_group, - use_labels=use_labels, - ) diff --git a/tests/test_interpret.py b/tests/test_interpret.py index 7b4c9358b..ef6f394f8 100644 --- a/tests/test_interpret.py +++ b/tests/test_interpret.py @@ -7,7 +7,7 @@ import pytest from clinicadl.interpret.config import InterpretConfig -from clinicadl.predict.predict_manager import PredictManager +from clinicadl.predictor.predictor import Predictor @pytest.fixture(params=["classification", "regression"]) @@ -77,14 +77,21 @@ def run_interpret(cnn_input, tmp_out_dir, ref_dir): assert train_error for method in list(InterpretationMethod): - interpret_config = InterpretConfig( - maps_dir=maps_path, - data_group="train", - name=f"test-{method}", - method_cls=method, + from clinicadl.utils.iotools.train_utils import ( + merge_options_and_maps_json_options, ) - interpret_manager = PredictManager(interpret_config) + + dict_ = { + "maps_dir": maps_path, + "data_group": "train", + "name": f"test-{method}", + "method_cls": method, + } + # options = merge_options_and_maps_json_options(maps_path / "maps.json", **dict_) + interpret_config = InterpretConfig(**dict_) + + interpret_manager = Predictor(interpret_config) interpret_manager.interpret() interpret_map = interpret_manager.get_interpretation( - "train", f"test-{interpret_config.method}" + "train", f"test-{interpret_config.interpret.method}" ) diff --git a/tests/test_predict.py b/tests/test_predict.py index 849f0e20d..e515ef41c 100644 --- a/tests/test_predict.py +++ b/tests/test_predict.py @@ -7,8 +7,8 @@ import pytest from clinicadl.metrics.utils import get_metrics -from clinicadl.predict.predict_manager import PredictManager -from clinicadl.predict.utils import get_prediction +from clinicadl.predictor.predictor import Predictor +from clinicadl.predictor.utils import get_prediction from .testing_tools import compare_folders, modify_maps @@ -101,7 +101,7 @@ def test_predict(cmdopt, tmp_path, test_name): # with open(json_path, "w") as f: # f.write(json_data) - from clinicadl.predict.config import PredictConfig + from clinicadl.predictor.config import PredictConfig predict_config = PredictConfig( maps_dir=model_folder, @@ -113,7 +113,7 @@ def test_predict(cmdopt, tmp_path, test_name): overwrite=True, diagnoses=["CN"], ) - predict_manager = PredictManager(predict_config) + predict_manager = Predictor(predict_config) predict_manager.predict() for mode in modes: diff --git a/tests/test_train_ae.py b/tests/test_train_ae.py index c7fbcb276..d4611e188 100644 --- a/tests/test_train_ae.py +++ b/tests/test_train_ae.py @@ -107,6 +107,11 @@ def test_train_ae(cmdopt, tmp_path, test_name): no_gpu=cmdopt["no-gpu"], adapt_base_dir=cmdopt["adapt-base-dir"], ) + json_data_out = modify_maps( + maps=json_data_out, + base_dir=base_dir, + ssda=True, + ) assert json_data_out == json_data_ref # ["mode"] == mode assert compare_folders( diff --git a/tests/test_train_cnn.py b/tests/test_train_cnn.py index 761fedbee..2a29a3166 100644 --- a/tests/test_train_cnn.py +++ b/tests/test_train_cnn.py @@ -125,7 +125,13 @@ def test_train_cnn(cmdopt, tmp_path, test_name): base_dir=base_dir, no_gpu=cmdopt["no-gpu"], adapt_base_dir=cmdopt["adapt-base-dir"], + ssda=True, ) + json_data_out = modify_maps( + maps=json_data_out, + base_dir=base_dir, + ssda=True, + ) assert json_data_out == json_data_ref # ["mode"] == mode assert compare_folders( diff --git a/tests/test_train_from_json.py b/tests/test_train_from_json.py index 06b307b0f..f1bdaff01 100644 --- a/tests/test_train_from_json.py +++ b/tests/test_train_from_json.py @@ -74,6 +74,7 @@ def test_determinism(cmdopt, tmp_path): # Reproduce experiment (train from json) config_json = tmp_out_dir / "maps_roi_cnn/maps.json" + flag_error = not system( f"clinicadl train from_json {str(config_json)} {str(reproduced_maps_dir)} -s 0" ) diff --git a/tests/test_transfer_learning.py b/tests/test_transfer_learning.py index d49cbd61f..6a7850f9b 100644 --- a/tests/test_transfer_learning.py +++ b/tests/test_transfer_learning.py @@ -169,6 +169,7 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): json_data_ref["gpu"] = json_data_out["gpu"] json_data_ref["transfer_path"] = json_data_out["transfer_path"] json_data_ref["tsv_path"] = json_data_out["tsv_path"] + json_data_out["ssda_network"] = json_data_ref["ssda_network"] ### assert json_data_out == json_data_ref # ["mode"] == mode diff --git a/tests/testing_tools.py b/tests/testing_tools.py index 4044d1022..885096374 100644 --- a/tests/testing_tools.py +++ b/tests/testing_tools.py @@ -174,6 +174,8 @@ def modify_maps( base_dir: Path, no_gpu: bool = False, adapt_base_dir: bool = False, + modify_split: bool = False, + ssda: bool = False, ) -> Dict[str, Any]: """ Modifies a MAPS dictionary if the user passed --no-gpu or --adapt-base-dir flags. @@ -208,6 +210,12 @@ def modify_maps( ) except KeyError: # maps with only caps directory pass + + if modify_split: + maps["split"] = (0,) + + if ssda: + maps["ssda_network"] = False return maps diff --git a/tests/unittests/nn/networks/test_ssda.py b/tests/unittests/nn/networks/test_ssda.py deleted file mode 100644 index 06da85ff2..000000000 --- a/tests/unittests/nn/networks/test_ssda.py +++ /dev/null @@ -1,11 +0,0 @@ -import torch - -from clinicadl.nn.networks.ssda import Conv5_FC3_SSDA - - -def test_UNet(): - input_ = torch.randn(2, 1, 64, 63, 62) - network = Conv5_FC3_SSDA(input_size=(1, 64, 63, 62), output_size=3) - output = network(input_) - for out in output: - assert out.shape == torch.Size((2, 3)) diff --git a/tests/unittests/train/test_utils.py b/tests/unittests/train/test_utils.py index 6b33787eb..2914d2d9b 100644 --- a/tests/unittests/train/test_utils.py +++ b/tests/unittests/train/test_utils.py @@ -7,7 +7,6 @@ expected_classification = { "architecture": "default", "multi_network": False, - "ssda_network": False, "dropout": 0.0, "latent_space_size": 128, "feature_size": 1024, @@ -65,7 +64,6 @@ expected_regression = { "architecture": "default", "multi_network": False, - "ssda_network": False, "dropout": 0.0, "latent_space_size": 128, "feature_size": 1024, @@ -121,7 +119,6 @@ expected_reconstruction = { "architecture": "default", "multi_network": False, - "ssda_network": False, "dropout": 0.0, "latent_space_size": 128, "feature_size": 1024, diff --git a/tests/unittests/train/trainer/test_training_config.py b/tests/unittests/train/trainer/test_training_config.py index 503b88ddf..c6b130cb8 100644 --- a/tests/unittests/train/trainer/test_training_config.py +++ b/tests/unittests/train/trainer/test_training_config.py @@ -5,9 +5,8 @@ from clinicadl.caps_dataset.data_config import DataConfig from clinicadl.caps_dataset.dataloader_config import DataLoaderConfig -from clinicadl.config.config.ssda import SSDAConfig from clinicadl.network.config import NetworkConfig -from clinicadl.splitter.validation import ValidationConfig +from clinicadl.predictor.validation import ValidationConfig from clinicadl.trainer.transfer_learning import TransferLearningConfig from clinicadl.transforms.config import TransformsConfig @@ -70,31 +69,6 @@ def test_model_config(): ) -def test_ssda_config(caps_example): - preprocessing_json_target = ( - caps_example / "tensor_extraction" / "preprocessing.json" - ) - c = SSDAConfig( - ssda_network=True, - preprocessing_json_target=preprocessing_json_target, - ) - expected_preprocessing_dict = { - "preprocessing": "t1-linear", - "mode": "image", - "use_uncropped_image": False, - "prepare_dl": False, - "extract_json": "t1-linear_mode-image.json", - "file_type": { - "pattern": "*space-MNI152NLin2009cSym_desc-Crop_res-1x1x1_T1w.nii.gz", - "description": "T1W Image registered using t1-linear and cropped (matrix size 169\u00d7208\u00d7179, 1 mm isotropic voxels)", - "needed_pipeline": "t1-linear", - }, - } - assert c.preprocessing_dict_target == expected_preprocessing_dict - c = SSDAConfig() - assert c.preprocessing_dict_target == {} - - def test_transferlearning_config(): c = TransferLearningConfig(transfer_path=False) assert c.transfer_path is None From 109ee64ab9267e1f0f83b0b57f2ba4e0150904be Mon Sep 17 00:00:00 2001 From: camillebrianceau <57992134+camillebrianceau@users.noreply.github.com> Date: Wed, 30 Oct 2024 14:02:15 +0100 Subject: [PATCH 09/16] Base for v2 (#676) * base for clinicadl v2 --- .github/workflows/lint.yaml | 4 +- .github/workflows/test.yml | 4 +- clinicadl/API_test.py | 294 +++-- clinicadl/commandline/modules_options/data.py | 2 +- .../commandline/modules_options/dataloader.py | 2 +- .../commandline/modules_options/extraction.py | 6 +- .../modules_options/maps_manager.py | 2 +- .../commandline/modules_options/network.py | 2 +- .../modules_options/optimization.py | 2 +- .../commandline/modules_options/optimizer.py | 2 +- .../modules_options/preprocessing.py | 6 +- .../pipelines/generate/artifacts/cli.py | 4 +- .../pipelines/generate/hypometabolic/cli.py | 4 +- .../pipelines/generate/random/cli.py | 4 +- .../pipelines/generate/trivial/cli.py | 4 +- .../commandline/pipelines/interpret/cli.py | 2 +- .../commandline/pipelines/predict/cli.py | 2 +- .../prepare_data/prepare_data_cli.py | 4 +- .../prepare_data_from_bids_cli.py | 4 +- .../pipelines/quality_check/pet_linear/cli.py | 2 +- .../pipelines/quality_check/t1_linear/cli.py | 2 +- .../pipelines/train/classification/cli.py | 2 +- .../pipelines/train/from_json/cli.py | 2 +- .../pipelines/train/reconstruction/cli.py | 2 +- .../pipelines/train/regression/cli.py | 2 +- .../commandline/pipelines/train/resume/cli.py | 2 +- .../{caps_dataset => dataset}/__init__.py | 0 .../data.py => dataset/caps_dataset.py} | 6 +- .../caps_dataset_config.py | 10 +- .../caps_dataset_utils.py | 8 +- clinicadl/dataset/caps_reader.py | 62 + clinicadl/dataset/concat.py | 6 + .../config}/__init__.py | 0 .../config/extraction.py} | 0 .../config/preprocessing.py} | 0 .../{caps_dataset => dataset}/data_config.py | 1 - .../dataloader_config.py | 0 .../prepare_data}/__init__.py | 0 .../prepare_data/prepare_data.py | 6 +- .../prepare_data/prepare_data_utils.py | 0 .../preprocessing => dataset}/utils.py | 32 +- .../__init__.py | 0 .../config.py | 0 .../experiment_manager/experiment_manager.py | 7 + .../maps_manager.py | 14 +- clinicadl/hugging_face/hugging_face.py | 2 +- clinicadl/interpret/config.py | 10 +- clinicadl/interpret/gradients.py | 2 +- .../{monai_metrics => metrics}/__init__.py | 0 .../config/__init__.py | 0 .../{monai_metrics => metrics}/config/base.py | 0 .../config/classification.py | 0 .../{monai_metrics => metrics}/config/enum.py | 0 .../config/factory.py | 0 .../config/generation.py | 0 .../config/reconstruction.py | 0 .../config/regression.py | 0 .../config/segmentation.py | 0 .../{monai_metrics => metrics}/factory.py | 0 .../{ => old_metrics}/metric_module.py | 0 clinicadl/metrics/{ => old_metrics}/utils.py | 0 clinicadl/{network/cnn => model}/__init__.py | 0 clinicadl/model/clinicadl_model.py | 8 + .../{monai_networks => networks}/__init__.py | 0 .../config/__init__.py | 0 .../config/autoencoder.py | 2 +- .../config/base.py | 2 +- .../config/cnn.py | 0 .../config/conv_decoder.py | 2 +- .../config/conv_encoder.py | 2 +- .../config/densenet.py | 2 +- .../config/factory.py | 0 .../config/generator.py | 0 .../config/mlp.py | 2 +- .../config/resnet.py | 4 +- .../config/senet.py | 0 .../config/unet.py | 2 +- .../config/vit.py | 4 +- .../{monai_networks => networks}/factory.py | 2 +- .../nn/__init__.py | 0 .../nn/att_unet.py | 0 .../nn/autoencoder.py | 0 .../{monai_networks => networks}/nn/cnn.py | 0 .../nn/conv_decoder.py | 0 .../nn/conv_encoder.py | 0 .../nn/densenet.py | 0 .../nn/generator.py | 0 .../unet => networks/nn/layers}/__init__.py | 0 .../nn/layers/resnet.py | 0 .../nn/layers/senet.py | 0 .../nn/layers/unet.py | 0 .../nn/layers/unpool.py | 0 .../nn/layers/utils/__init__.py | 0 .../nn/layers/utils/enum.py | 0 .../nn/layers/utils/types.py | 0 .../nn/layers/vit.py | 0 .../{monai_networks => networks}/nn/mlp.py | 0 .../{monai_networks => networks}/nn/resnet.py | 0 .../{monai_networks => networks}/nn/senet.py | 0 .../{monai_networks => networks}/nn/unet.py | 0 .../nn/utils/__init__.py | 0 .../nn/utils/checks.py | 0 .../nn/utils/shapes.py | 0 .../{monai_networks => networks}/nn/vae.py | 0 .../{monai_networks => networks}/nn/vit.py | 0 .../old_network}/__init__.py | 0 .../old_network/autoencoder}/__init__.py | 0 .../autoencoder/cnn_transformer.py | 2 +- .../old_network}/autoencoder/models.py | 8 +- .../old_network}/cnn/SECNN.py | 0 .../old_network/cnn}/__init__.py | 0 .../old_network}/cnn/models.py | 10 +- .../old_network}/cnn/random.py | 4 +- .../old_network}/cnn/resnet.py | 0 .../old_network}/cnn/resnet3D.py | 0 .../old_network}/config.py | 0 .../old_network}/network.py | 0 .../old_network}/network_utils.py | 0 .../old_network/nn}/__init__.py | 0 .../old_network}/nn/blocks/__init__.py | 0 .../old_network}/nn/blocks/decoder.py | 2 +- .../old_network}/nn/blocks/encoder.py | 2 +- .../old_network}/nn/blocks/residual.py | 0 .../old_network}/nn/blocks/se.py | 0 .../old_network}/nn/blocks/unet.py | 0 .../old_network}/nn/layers/__init__.py | 0 .../nn/layers/factory/__init__.py | 0 .../old_network}/nn/layers/factory/conv.py | 0 .../old_network}/nn/layers/factory/norm.py | 0 .../old_network}/nn/layers/factory/pool.py | 0 .../old_network}/nn/layers/pool.py | 0 .../old_network}/nn/layers/reverse.py | 0 .../old_network}/nn/layers/unflatten.py | 0 .../old_network}/nn/layers/unpool.py | 0 .../old_network}/nn/networks/__init__.py | 0 .../old_network}/nn/networks/ae.py | 10 +- .../old_network}/nn/networks/cnn.py | 2 +- .../nn/networks/factory/__init__.py | 0 .../old_network}/nn/networks/factory/ae.py | 4 +- .../nn/networks/factory/resnet.py | 2 +- .../old_network}/nn/networks/factory/secnn.py | 2 +- .../old_network}/nn/networks/random.py | 4 +- .../old_network}/nn/networks/ssda.py | 2 +- .../old_network}/nn/networks/unet.py | 2 +- .../old_network}/nn/networks/vae.py | 6 +- .../{ => networks/old_network}/nn/utils.py | 0 .../old_network}/sub_network.py | 4 +- .../networks/old_network/unet/__init__.py | 0 .../old_network}/unet/unet.py | 2 +- .../networks/old_network/vae/__init__.py | 0 .../old_network}/vae/advanced_CVAE.py | 4 +- .../old_network}/vae/base_vae.py | 2 +- .../old_network}/vae/convolutional_VAE.py | 4 +- .../old_network}/vae/vae_layers.py | 2 +- .../old_network}/vae/vae_utils.py | 0 .../old_network}/vae/vanilla_vae.py | 4 +- clinicadl/{optim => optimization}/__init__.py | 0 clinicadl/{optim => optimization}/config.py | 0 .../early_stopping/__init__.py | 0 .../early_stopping/config.py | 0 .../early_stopping/early_stopper.py | 0 .../lr_scheduler/__init__.py | 0 .../lr_scheduler/config.py | 0 .../lr_scheduler/enum.py | 0 .../lr_scheduler/factory.py | 0 .../optimizer/__init__.py | 0 .../optimizer/config.py | 0 .../{optim => optimization}/optimizer/enum.py | 0 .../optimizer/factory.py | 0 .../optimizer/utils.py | 0 clinicadl/optimizer/optimization.py | 16 - clinicadl/optimizer/optimizer.py | 18 - clinicadl/predictor/config.py | 8 +- clinicadl/predictor/old_predictor.py | 1153 ++++++++++++++++ clinicadl/predictor/predictor.py | 1157 +---------------- clinicadl/predictor/utils.py | 2 +- .../quality_check/pet_linear/quality_check.py | 4 +- clinicadl/quality_check/pet_linear/utils.py | 2 +- .../quality_check/t1_linear/quality_check.py | 2 +- clinicadl/quality_check/t1_linear/utils.py | 8 +- clinicadl/random_search/random_search.py | 2 +- .../random_search/random_search_config.py | 2 +- clinicadl/splitter/config.py | 2 +- clinicadl/splitter/kfold.py | 24 + .../splitter/{splitter.py => old_splitter.py} | 0 clinicadl/splitter/split.py | 18 + clinicadl/tmp_config.py | 32 +- clinicadl/trainer/config/classification.py | 4 +- clinicadl/trainer/config/reconstruction.py | 2 +- clinicadl/trainer/config/regression.py | 4 +- clinicadl/trainer/config/train.py | 15 +- clinicadl/trainer/old_trainer.py | 901 +++++++++++++ clinicadl/trainer/tasks_utils.py | 8 +- clinicadl/trainer/trainer.py | 905 +------------ clinicadl/transforms/config.py | 26 +- clinicadl/transforms/factory.py | 228 ++++ clinicadl/transforms/transforms.py | 236 +--- clinicadl/utils/iotools/train_utils.py | 6 +- clinicadl/utils/meta_maps/getter.py | 4 +- tests/conftest.py | 42 - tests/test_cli.py | 200 --- tests/test_generate.py | 119 -- tests/test_interpret.py | 97 -- tests/test_predict.py | 136 -- tests/test_prepare_data.py | 209 --- tests/test_qc.py | 94 -- tests/test_random_search.py | 66 - tests/test_resume.py | 75 -- tests/test_train_ae.py | 126 -- tests/test_train_cnn.py | 148 --- tests/test_train_from_json.py | 86 -- tests/test_transfer_learning.py | 185 --- tests/test_tsvtools.py | 284 ---- tests/testing_tools.py | 263 ---- .../config/test_classification.py | 4 +- .../monai_metrics/config/test_factory.py | 2 +- .../monai_metrics/config/test_generation.py | 2 +- .../config/test_reconstruction.py | 2 +- .../monai_metrics/config/test_regression.py | 2 +- .../monai_metrics/config/test_segmentation.py | 2 +- tests/unittests/monai_metrics/test_factory.py | 6 +- .../monai_networks/config/test_config.py | 34 +- .../monai_networks/config/test_factory.py | 2 +- .../monai_networks/nn/test_att_unet.py | 4 +- .../monai_networks/nn/test_autoencoder.py | 4 +- tests/unittests/monai_networks/nn/test_cnn.py | 2 +- .../monai_networks/nn/test_conv_decoder.py | 4 +- .../monai_networks/nn/test_conv_encoder.py | 4 +- .../monai_networks/nn/test_densenet.py | 6 +- .../monai_networks/nn/test_generator.py | 2 +- tests/unittests/monai_networks/nn/test_mlp.py | 4 +- .../monai_networks/nn/test_resnet.py | 8 +- .../unittests/monai_networks/nn/test_senet.py | 8 +- .../unittests/monai_networks/nn/test_unet.py | 4 +- tests/unittests/monai_networks/nn/test_vae.py | 2 +- tests/unittests/monai_networks/nn/test_vit.py | 6 +- .../monai_networks/nn/utils/test_checks.py | 2 +- .../monai_networks/nn/utils/test_shapes.py | 2 +- .../unittests/monai_networks/test_factory.py | 8 +- tests/unittests/nn/blocks/test_decoder.py | 2 +- tests/unittests/nn/blocks/test_encoder.py | 2 +- tests/unittests/nn/blocks/test_residual.py | 2 +- tests/unittests/nn/blocks/test_se.py | 4 +- tests/unittests/nn/blocks/test_unet.py | 6 +- .../nn/layers/factory/test_factories.py | 8 +- tests/unittests/nn/layers/test_layers.py | 2 +- .../nn/networks/factory/test_ae_factory.py | 6 +- .../networks/factory/test_resnet_factory.py | 4 +- .../nn/networks/factory/test_secnn_factory.py | 2 +- tests/unittests/nn/networks/test_ae.py | 2 +- tests/unittests/nn/networks/test_cnn.py | 2 +- tests/unittests/nn/networks/test_unet.py | 2 +- tests/unittests/nn/networks/test_vae.py | 2 +- tests/unittests/nn/test_utils.py | 2 +- .../optim/early_stopping/test_config.py | 2 +- .../early_stopping/test_early_stopper.py | 2 +- .../optim/lr_scheduler/test_config.py | 2 +- .../optim/lr_scheduler/test_factory.py | 2 +- .../unittests/optim/optimizer/test_config.py | 2 +- .../unittests/optim/optimizer/test_factory.py | 4 +- tests/unittests/optim/optimizer/test_utils.py | 4 +- tests/unittests/optim/test_config.py | 2 +- .../train/trainer/test_training_config.py | 11 +- tests/unittests/utils/test_clinica_utils.py | 4 +- 264 files changed, 2958 insertions(+), 4820 deletions(-) rename clinicadl/{caps_dataset => dataset}/__init__.py (100%) rename clinicadl/{caps_dataset/data.py => dataset/caps_dataset.py} (99%) rename clinicadl/{caps_dataset => dataset}/caps_dataset_config.py (93%) rename clinicadl/{caps_dataset => dataset}/caps_dataset_utils.py (96%) create mode 100644 clinicadl/dataset/caps_reader.py create mode 100644 clinicadl/dataset/concat.py rename clinicadl/{maps_manager => dataset/config}/__init__.py (100%) rename clinicadl/{caps_dataset/extraction/config.py => dataset/config/extraction.py} (100%) rename clinicadl/{caps_dataset/preprocessing/config.py => dataset/config/preprocessing.py} (100%) rename clinicadl/{caps_dataset => dataset}/data_config.py (98%) rename clinicadl/{caps_dataset => dataset}/dataloader_config.py (100%) rename clinicadl/{monai_networks/nn/layers => dataset/prepare_data}/__init__.py (100%) rename clinicadl/{ => dataset}/prepare_data/prepare_data.py (97%) rename clinicadl/{ => dataset}/prepare_data/prepare_data_utils.py (100%) rename clinicadl/{caps_dataset/preprocessing => dataset}/utils.py (76%) rename clinicadl/{network/autoencoder => experiment_manager}/__init__.py (100%) rename clinicadl/{maps_manager => experiment_manager}/config.py (100%) create mode 100644 clinicadl/experiment_manager/experiment_manager.py rename clinicadl/{maps_manager => experiment_manager}/maps_manager.py (98%) rename clinicadl/{monai_metrics => metrics}/__init__.py (100%) rename clinicadl/{monai_metrics => metrics}/config/__init__.py (100%) rename clinicadl/{monai_metrics => metrics}/config/base.py (100%) rename clinicadl/{monai_metrics => metrics}/config/classification.py (100%) rename clinicadl/{monai_metrics => metrics}/config/enum.py (100%) rename clinicadl/{monai_metrics => metrics}/config/factory.py (100%) rename clinicadl/{monai_metrics => metrics}/config/generation.py (100%) rename clinicadl/{monai_metrics => metrics}/config/reconstruction.py (100%) rename clinicadl/{monai_metrics => metrics}/config/regression.py (100%) rename clinicadl/{monai_metrics => metrics}/config/segmentation.py (100%) rename clinicadl/{monai_metrics => metrics}/factory.py (100%) rename clinicadl/metrics/{ => old_metrics}/metric_module.py (100%) rename clinicadl/metrics/{ => old_metrics}/utils.py (100%) rename clinicadl/{network/cnn => model}/__init__.py (100%) create mode 100644 clinicadl/model/clinicadl_model.py rename clinicadl/{monai_networks => networks}/__init__.py (100%) rename clinicadl/{monai_networks => networks}/config/__init__.py (100%) rename clinicadl/{monai_networks => networks}/config/autoencoder.py (95%) rename clinicadl/{monai_networks => networks}/config/base.py (97%) rename clinicadl/{monai_networks => networks}/config/cnn.py (100%) rename clinicadl/{monai_networks => networks}/config/conv_decoder.py (97%) rename clinicadl/{monai_networks => networks}/config/conv_encoder.py (97%) rename clinicadl/{monai_networks => networks}/config/densenet.py (97%) rename clinicadl/{monai_networks => networks}/config/factory.py (100%) rename clinicadl/{monai_networks => networks}/config/generator.py (100%) rename clinicadl/{monai_networks => networks}/config/mlp.py (96%) rename clinicadl/{monai_networks => networks}/config/resnet.py (95%) rename clinicadl/{monai_networks => networks}/config/senet.py (100%) rename clinicadl/{monai_networks => networks}/config/unet.py (93%) rename clinicadl/{monai_networks => networks}/config/vit.py (94%) rename clinicadl/{monai_networks => networks}/factory.py (99%) rename clinicadl/{monai_networks => networks}/nn/__init__.py (100%) rename clinicadl/{monai_networks => networks}/nn/att_unet.py (100%) rename clinicadl/{monai_networks => networks}/nn/autoencoder.py (100%) rename clinicadl/{monai_networks => networks}/nn/cnn.py (100%) rename clinicadl/{monai_networks => networks}/nn/conv_decoder.py (100%) rename clinicadl/{monai_networks => networks}/nn/conv_encoder.py (100%) rename clinicadl/{monai_networks => networks}/nn/densenet.py (100%) rename clinicadl/{monai_networks => networks}/nn/generator.py (100%) rename clinicadl/{network/unet => networks/nn/layers}/__init__.py (100%) rename clinicadl/{monai_networks => networks}/nn/layers/resnet.py (100%) rename clinicadl/{monai_networks => networks}/nn/layers/senet.py (100%) rename clinicadl/{monai_networks => networks}/nn/layers/unet.py (100%) rename clinicadl/{monai_networks => networks}/nn/layers/unpool.py (100%) rename clinicadl/{monai_networks => networks}/nn/layers/utils/__init__.py (100%) rename clinicadl/{monai_networks => networks}/nn/layers/utils/enum.py (100%) rename clinicadl/{monai_networks => networks}/nn/layers/utils/types.py (100%) rename clinicadl/{monai_networks => networks}/nn/layers/vit.py (100%) rename clinicadl/{monai_networks => networks}/nn/mlp.py (100%) rename clinicadl/{monai_networks => networks}/nn/resnet.py (100%) rename clinicadl/{monai_networks => networks}/nn/senet.py (100%) rename clinicadl/{monai_networks => networks}/nn/unet.py (100%) rename clinicadl/{monai_networks => networks}/nn/utils/__init__.py (100%) rename clinicadl/{monai_networks => networks}/nn/utils/checks.py (100%) rename clinicadl/{monai_networks => networks}/nn/utils/shapes.py (100%) rename clinicadl/{monai_networks => networks}/nn/vae.py (100%) rename clinicadl/{monai_networks => networks}/nn/vit.py (100%) rename clinicadl/{network => networks/old_network}/__init__.py (100%) rename clinicadl/{network/vae => networks/old_network/autoencoder}/__init__.py (100%) rename clinicadl/{network => networks/old_network}/autoencoder/cnn_transformer.py (98%) rename clinicadl/{network => networks/old_network}/autoencoder/models.py (89%) rename clinicadl/{network => networks/old_network}/cnn/SECNN.py (100%) rename clinicadl/{nn => networks/old_network/cnn}/__init__.py (100%) rename clinicadl/{network => networks/old_network}/cnn/models.py (97%) rename clinicadl/{network => networks/old_network}/cnn/random.py (98%) rename clinicadl/{network => networks/old_network}/cnn/resnet.py (100%) rename clinicadl/{network => networks/old_network}/cnn/resnet3D.py (100%) rename clinicadl/{network => networks/old_network}/config.py (100%) rename clinicadl/{network => networks/old_network}/network.py (100%) rename clinicadl/{network => networks/old_network}/network_utils.py (100%) rename clinicadl/{prepare_data => networks/old_network/nn}/__init__.py (100%) rename clinicadl/{ => networks/old_network}/nn/blocks/__init__.py (100%) rename clinicadl/{ => networks/old_network}/nn/blocks/decoder.py (98%) rename clinicadl/{ => networks/old_network}/nn/blocks/encoder.py (98%) rename clinicadl/{ => networks/old_network}/nn/blocks/residual.py (100%) rename clinicadl/{ => networks/old_network}/nn/blocks/se.py (100%) rename clinicadl/{ => networks/old_network}/nn/blocks/unet.py (100%) rename clinicadl/{ => networks/old_network}/nn/layers/__init__.py (100%) rename clinicadl/{ => networks/old_network}/nn/layers/factory/__init__.py (100%) rename clinicadl/{ => networks/old_network}/nn/layers/factory/conv.py (100%) rename clinicadl/{ => networks/old_network}/nn/layers/factory/norm.py (100%) rename clinicadl/{ => networks/old_network}/nn/layers/factory/pool.py (100%) rename clinicadl/{ => networks/old_network}/nn/layers/pool.py (100%) rename clinicadl/{ => networks/old_network}/nn/layers/reverse.py (100%) rename clinicadl/{ => networks/old_network}/nn/layers/unflatten.py (100%) rename clinicadl/{ => networks/old_network}/nn/layers/unpool.py (100%) rename clinicadl/{ => networks/old_network}/nn/networks/__init__.py (100%) rename clinicadl/{ => networks/old_network}/nn/networks/ae.py (92%) rename clinicadl/{ => networks/old_network}/nn/networks/cnn.py (99%) rename clinicadl/{ => networks/old_network}/nn/networks/factory/__init__.py (100%) rename clinicadl/{ => networks/old_network}/nn/networks/factory/ae.py (97%) rename clinicadl/{ => networks/old_network}/nn/networks/factory/resnet.py (98%) rename clinicadl/{ => networks/old_network}/nn/networks/factory/secnn.py (96%) rename clinicadl/{ => networks/old_network}/nn/networks/random.py (98%) rename clinicadl/{ => networks/old_network}/nn/networks/ssda.py (98%) rename clinicadl/{ => networks/old_network}/nn/networks/unet.py (90%) rename clinicadl/{ => networks/old_network}/nn/networks/vae.py (99%) rename clinicadl/{ => networks/old_network}/nn/utils.py (100%) rename clinicadl/{network => networks/old_network}/sub_network.py (98%) create mode 100644 clinicadl/networks/old_network/unet/__init__.py rename clinicadl/{network => networks/old_network}/unet/unet.py (98%) create mode 100644 clinicadl/networks/old_network/vae/__init__.py rename clinicadl/{network => networks/old_network}/vae/advanced_CVAE.py (97%) rename clinicadl/{network => networks/old_network}/vae/base_vae.py (97%) rename clinicadl/{network => networks/old_network}/vae/convolutional_VAE.py (98%) rename clinicadl/{network => networks/old_network}/vae/vae_layers.py (99%) rename clinicadl/{network => networks/old_network}/vae/vae_utils.py (100%) rename clinicadl/{network => networks/old_network}/vae/vanilla_vae.py (99%) rename clinicadl/{optim => optimization}/__init__.py (100%) rename clinicadl/{optim => optimization}/config.py (100%) rename clinicadl/{optim => optimization}/early_stopping/__init__.py (100%) rename clinicadl/{optim => optimization}/early_stopping/config.py (100%) rename clinicadl/{optim => optimization}/early_stopping/early_stopper.py (100%) rename clinicadl/{optim => optimization}/lr_scheduler/__init__.py (100%) rename clinicadl/{optim => optimization}/lr_scheduler/config.py (100%) rename clinicadl/{optim => optimization}/lr_scheduler/enum.py (100%) rename clinicadl/{optim => optimization}/lr_scheduler/factory.py (100%) rename clinicadl/{optim => optimization}/optimizer/__init__.py (100%) rename clinicadl/{optim => optimization}/optimizer/config.py (100%) rename clinicadl/{optim => optimization}/optimizer/enum.py (100%) rename clinicadl/{optim => optimization}/optimizer/factory.py (100%) rename clinicadl/{optim => optimization}/optimizer/utils.py (100%) delete mode 100644 clinicadl/optimizer/optimization.py delete mode 100644 clinicadl/optimizer/optimizer.py create mode 100644 clinicadl/predictor/old_predictor.py create mode 100644 clinicadl/splitter/kfold.py rename clinicadl/splitter/{splitter.py => old_splitter.py} (100%) create mode 100644 clinicadl/splitter/split.py create mode 100644 clinicadl/trainer/old_trainer.py create mode 100644 clinicadl/transforms/factory.py delete mode 100644 tests/conftest.py delete mode 100644 tests/test_cli.py delete mode 100644 tests/test_generate.py delete mode 100644 tests/test_interpret.py delete mode 100644 tests/test_predict.py delete mode 100644 tests/test_prepare_data.py delete mode 100644 tests/test_qc.py delete mode 100644 tests/test_random_search.py delete mode 100644 tests/test_resume.py delete mode 100644 tests/test_train_ae.py delete mode 100644 tests/test_train_cnn.py delete mode 100644 tests/test_train_from_json.py delete mode 100644 tests/test_transfer_learning.py delete mode 100644 tests/test_tsvtools.py delete mode 100644 tests/testing_tools.py diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index 1a2b6f7d9..949d9efff 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -2,9 +2,9 @@ name: 'Lint codebase' on: pull_request: - branches: [ "dev", "refactoring" ] + branches: [ "dev", "refactoring", "clinicadl_v2" ] push: - branches: [ "dev", "refactoring" ] + branches: [ "dev", "refactoring", "clinicadl_v2" ] permissions: contents: read diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 219e86c2b..21f71cd43 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -2,9 +2,9 @@ name: Test on: push: - branches: ["dev", "refactoring"] + branches: ["dev", "refactoring", "clinicadl_v2"] pull_request: - branches: ["dev", "refactoring"] + branches: ["dev", "refactoring", "clinicadl_v2"] permissions: contents: read diff --git a/clinicadl/API_test.py b/clinicadl/API_test.py index d144c1597..a6eb9fa72 100644 --- a/clinicadl/API_test.py +++ b/clinicadl/API_test.py @@ -1,87 +1,225 @@ from pathlib import Path -from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig -from clinicadl.caps_dataset.data import return_dataset -from clinicadl.predictor.config import PredictConfig +import torchio + +from clinicadl.dataset.caps_dataset import ( + CapsDatasetPatch, + CapsDatasetRoi, + CapsDatasetSlice, +) +from clinicadl.dataset.caps_reader import CapsReader +from clinicadl.dataset.concat import ConcatDataset +from clinicadl.dataset.config.extraction import ExtractionConfig +from clinicadl.dataset.config.preprocessing import ( + PreprocessingConfig, + T1PreprocessingConfig, +) +from clinicadl.experiment_manager.experiment_manager import ExperimentManager +from clinicadl.losses.config import CrossEntropyLossConfig +from clinicadl.losses.factory import get_loss_function +from clinicadl.model.clinicadl_model import ClinicaDLModel +from clinicadl.networks.config import ImplementedNetworks +from clinicadl.networks.factory import ( + ConvEncoderOptions, + create_network_config, + get_network_from_config, +) +from clinicadl.optimization.optimizer.config import AdamConfig, OptimizerConfig +from clinicadl.optimization.optimizer.factory import get_optimizer from clinicadl.predictor.predictor import Predictor -from clinicadl.prepare_data.prepare_data import DeepLearningPrepareData -from clinicadl.splitter.config import SplitterConfig -from clinicadl.splitter.splitter import Splitter -from clinicadl.trainer.config.classification import ClassificationConfig +from clinicadl.splitter.kfold import KFolder +from clinicadl.splitter.split import get_single_split, split_tsv from clinicadl.trainer.trainer import Trainer -from clinicadl.utils.enum import ExtractionMethod, Preprocessing, Task -from clinicadl.utils.iotools.train_utils import merge_cli_and_config_file_options +from clinicadl.transforms.transforms import Transforms + +# Create the Maps Manager / Read/write manager / +maps_path = Path("/") +manager = ExperimentManager(maps_path, overwrite=False) + +caps_directory = Path("caps_directory") # output of clinica pipelines +caps_reader = CapsReader(caps_directory, manager=manager) + +preprocessing_1 = caps_reader.get_preprocessing("t1-linear") +extraction_1 = caps_reader.extract_slice(preprocessing=preprocessing_1, arg_slice=2) +transforms_1 = Transforms( + data_augmentation=[torchio.t1, torchio.t2], + image_transforms=[torchio.t1, torchio.t2], + object_transforms=[torchio.t1, torchio.t2], +) # not mandatory + +preprocessing_2 = caps_reader.get_preprocessing("pet-linear") +extraction_2 = caps_reader.extract_patch(preprocessing=preprocessing_2, arg_patch=2) +transforms_2 = Transforms( + data_augmentation=[torchio.t2], + image_transforms=[torchio.t1], + object_transforms=[torchio.t1, torchio.t2], +) + +sub_ses_tsv = Path("") +split_dir = split_tsv(sub_ses_tsv) # -> creer un test.tsv et un train.tsv + +dataset_t1_roi = caps_reader.get_dataset( + extraction=extraction_1, + preprocessing=preprocessing_1, + sub_ses_tsv=split_dir / "train.tsv", + transforms=transforms_1, +) # do we give config or object for transforms ? +dataset_pet_patch = caps_reader.get_dataset( + extraction=extraction_2, + preprocessing=preprocessing_2, + sub_ses_tsv=split_dir / "train.tsv", + transforms=transforms_2, +) + +dataset_multi_modality_multi_extract = ConcatDataset( + [dataset_t1_roi, dataset_pet_patch] +) # 2 train.tsv en entrée qu'il faut concat et pareil pour les transforms à faire attention + +config_file = Path("config_file") +trainer = Trainer.from_json(config_file=config_file, manager=manager) + +# CAS CROSS-VALIDATION +splitter = KFolder( + n_splits=3, caps_dataset=dataset_multi_modality_multi_extract, manager=manager +) + +for split in splitter.split_iterator(split_list=[0, 1]): + # bien définir ce qu'il y a dans l'objet split + + loss, loss_config = get_loss_function(CrossEntropyLossConfig()) + network_config = create_network_config(ImplementedNetworks.CNN)( + in_shape=[2, 2, 2], + num_outputs=1, + conv_args=ConvEncoderOptions(channels=[3, 2, 2]), + ) + network, _ = get_network_from_config(network_config) + optimizer, _ = get_optimizer(network, AdamConfig()) + model = ClinicaDLModel(network=network, loss=loss, optimizer=optimizer) + + trainer.train(model, split) + # le trainer va instancier un predictor/valdiator dans le train ou dans le init + + +# CAS SINGLE SPLIT +split = get_single_split( + n_subject_validation=0, + caps_dataset=dataset_multi_modality_multi_extract, + manager=manager, +) + +loss, loss_config = get_loss_function(CrossEntropyLossConfig()) +network_config = create_network_config(ImplementedNetworks.CNN)( + in_shape=[2, 2, 2], num_outputs=1, conv_args=ConvEncoderOptions(channels=[3, 2, 2]) +) +network, _ = get_network_from_config(network_config) +optimizer, _ = get_optimizer(network, AdamConfig()) +model = ClinicaDLModel(network=network, loss=loss, optimizer=optimizer) + +trainer.train(model, split) +# le trainer va instancier un predictor/valdiator dans le train ou dans le init + -image_config = CapsDatasetConfig.from_preprocessing_and_extraction_method( - extraction=ExtractionMethod.IMAGE, - preprocessing_type=Preprocessing.T1_LINEAR, +# TEST + +preprocessing_test: PreprocessingConfig = caps_reader.get_preprocessing("pet-linear") +extraction_test: ExtractionConfig = caps_reader.extract_patch( + preprocessing=preprocessing_2, arg_patch=2 +) +transforms_test = Transforms( + data_augmentation=[torchio.t2], + image_transforms=[torchio.t1], + object_transforms=[torchio.t1, torchio.t2], +) + +dataset_test = caps_reader.get_dataset( + extraction=extraction_test, + preprocessing=preprocessing_test, + sub_ses_tsv=split_dir / "test.tsv", + transforms=transforms_test, ) -DeepLearningPrepareData(image_config) - -dataset = return_dataset( - input_dir, - data_df, - preprocessing_dict, - transforms_config, - label, - label_code, - cnn_index, - label_presence, - multi_cohort, +predictor = Predictor(manager=manager) +predictor.predict(dataset_test=dataset_test, split=2) + + +# SIMPLE EXPERIMENT + + +maps_path = Path("/") +manager = ExperimentManager(maps_path, overwrite=False) + +caps_directory = Path("caps_directory") # output of clinica pipelines +caps_reader = CapsReader(caps_directory, manager=manager) + +extraction_1 = caps_reader.extract_image(preprocessing=T1PreprocessingConfig()) +transforms_1 = Transforms( + data_augmentation=[torchio.transforms.RandomMotion] +) # not mandatory + +sub_ses_tsv = Path("") +split_dir = split_tsv(sub_ses_tsv) # -> creer un test.tsv et un train.tsv + +dataset_t1_image = caps_reader.get_dataset( + extraction=extraction_1, + preprocessing=T1PreprocessingConfig(), + sub_ses_tsv=split_dir / "train.tsv", + transforms=transforms_1, +) # do we give config or ob + + +config_file = Path("config_file") +trainer = Trainer.from_json(config_file=config_file, manager=manager) + +# CAS CROSS-VALIDATION +splitter = KFolder(n_splits=3, caps_dataset=dataset_t1_image, manager=manager) + +for split in splitter.split_iterator(split_list=[0, 1]): + # bien définir ce qu'il y a dans l'objet split + + loss, loss_config = get_loss_function(CrossEntropyLossConfig()) + network_config = create_network_config(ImplementedNetworks.CNN)( + in_shape=[2, 2, 2], + num_outputs=1, + conv_args=ConvEncoderOptions(channels=[3, 2, 2]), + ) + network, _ = get_network_from_config(network_config) + optimizer, _ = get_optimizer(network, AdamConfig()) + model = ClinicaDLModel(network=network, loss=loss, optimizer=optimizer) + + trainer.train(model, split) + # le trainer va instancier un predictor/valdiator dans le train ou dans le init + + +# SIMPLE EXPERIMENT WITH A CAPS ALREADY EXISTING + +maps_path = Path("/") +manager = ExperimentManager(maps_path, overwrite=False) + +# sub_ses_tsv = Path("") +# split_dir = split_tsv(sub_ses_tsv) # -> creer un test.tsv et un train.tsv + +dataset_t1_image = CapsDatasetPatch.from_json( + extraction=extract_json, + sub_ses_tsv=split_dir / "train.tsv", ) +config_file = Path("config_file") +trainer = Trainer.from_json(config_file=config_file, manager=manager) + +# CAS CROSS-VALIDATION +splitter = KFolder(n_splits=3, caps_dataset=dataset_t1_image, manager=manager) + +for split in splitter.split_iterator(split_list=[0, 1]): + # bien définir ce qu'il y a dans l'objet split + + loss, loss_config = get_loss_function(CrossEntropyLossConfig()) + network_config = create_network_config(ImplementedNetworks.CNN)( + in_shape=[2, 2, 2], + num_outputs=1, + conv_args=ConvEncoderOptions(channels=[3, 2, 2]), + ) + network, _ = get_network_from_config(network_config) + optimizer, _ = get_optimizer(network, AdamConfig()) + model = ClinicaDLModel(network=network, loss=loss, optimizer=optimizer) -split_config = SplitterConfig() -splitter = Splitter(split_config) - -validator_config = PredictConfig() -validator = Predictor(validator_config) - -train_config = ClassificationConfig() -trainer = Trainer(train_config, validator) - -for split in splitter.split_iterator(): - for network in range( - first_network, self.maps_manager.num_networks - ): # for multi_network - ###### actual _train_single method of the Trainer ############ - train_loader = trainer.get_dataloader(dataset, split, network, "train", config) - valid_loader = validator.get_dataloader( - dataset, split, network, "valid", config - ) # ?? validatior, trainer ? - - trainer._train( - train_loader, - valid_loader, - split=split, - network=network, - resume=resume, # in a config class - callbacks=[CodeCarbonTracker], # in a config class ? - ) - - validator._ensemble_prediction( - self.maps_manager, - "train", - split, - self.config.validation.selection_metrics, - ) - validator._ensemble_prediction( - self.maps_manager, - "validation", - split, - self.config.validation.selection_metrics, - ) - ###### end ############ - - -for split in splitter.split_iterator(): - for network in range( - first_network, self.maps_manager.num_networks - ): # for multi_network - ###### actual _train_single method of the Trainer ############ - test_loader = trainer.get_dataloader(dataset, split, network, "test", config) - validator.predict(test_loader) - -interpret_config = PredictConfig(**kwargs) -predict_manager = Predictor(interpret_config) -predict_manager.interpret() + trainer.train(model, split) + # le trainer va instancier un predictor/valdiator dans le train ou dans le init diff --git a/clinicadl/commandline/modules_options/data.py b/clinicadl/commandline/modules_options/data.py index 569cbab6c..a881440c3 100644 --- a/clinicadl/commandline/modules_options/data.py +++ b/clinicadl/commandline/modules_options/data.py @@ -1,8 +1,8 @@ import click -from clinicadl.caps_dataset.data_config import DataConfig from clinicadl.config.config_utils import get_default_from_config_class as get_default from clinicadl.config.config_utils import get_type_from_config_class as get_type +from clinicadl.dataset.data_config import DataConfig # Data baseline = click.option( diff --git a/clinicadl/commandline/modules_options/dataloader.py b/clinicadl/commandline/modules_options/dataloader.py index dcaa66aa9..bf4d4c781 100644 --- a/clinicadl/commandline/modules_options/dataloader.py +++ b/clinicadl/commandline/modules_options/dataloader.py @@ -1,8 +1,8 @@ import click -from clinicadl.caps_dataset.dataloader_config import DataLoaderConfig from clinicadl.config.config_utils import get_default_from_config_class as get_default from clinicadl.config.config_utils import get_type_from_config_class as get_type +from clinicadl.dataset.dataloader_config import DataLoaderConfig # DataLoader batch_size = click.option( diff --git a/clinicadl/commandline/modules_options/extraction.py b/clinicadl/commandline/modules_options/extraction.py index fc0db1f98..e382eecc2 100644 --- a/clinicadl/commandline/modules_options/extraction.py +++ b/clinicadl/commandline/modules_options/extraction.py @@ -1,14 +1,14 @@ import click -from clinicadl.caps_dataset.extraction.config import ( +from clinicadl.config.config_utils import get_default_from_config_class as get_default +from clinicadl.config.config_utils import get_type_from_config_class as get_type +from clinicadl.dataset.config.extraction import ( ExtractionConfig, ExtractionImageConfig, ExtractionPatchConfig, ExtractionROIConfig, ExtractionSliceConfig, ) -from clinicadl.config.config_utils import get_default_from_config_class as get_default -from clinicadl.config.config_utils import get_type_from_config_class as get_type extract_json = click.option( "-ej", diff --git a/clinicadl/commandline/modules_options/maps_manager.py b/clinicadl/commandline/modules_options/maps_manager.py index f973f441a..69574d42c 100644 --- a/clinicadl/commandline/modules_options/maps_manager.py +++ b/clinicadl/commandline/modules_options/maps_manager.py @@ -1,7 +1,7 @@ import click from clinicadl.config.config_utils import get_type_from_config_class as get_type -from clinicadl.maps_manager.config import MapsManagerConfig +from clinicadl.experiment_manager.config import MapsManagerConfig maps_dir = click.argument("maps_dir", type=get_type("maps_dir", MapsManagerConfig)) data_group = click.option("data_group", type=get_type("data_group", MapsManagerConfig)) diff --git a/clinicadl/commandline/modules_options/network.py b/clinicadl/commandline/modules_options/network.py index 995ea6ccc..c0b8716e1 100644 --- a/clinicadl/commandline/modules_options/network.py +++ b/clinicadl/commandline/modules_options/network.py @@ -2,7 +2,7 @@ from clinicadl.config.config_utils import get_default_from_config_class as get_default from clinicadl.config.config_utils import get_type_from_config_class as get_type -from clinicadl.network.config import NetworkConfig +from clinicadl.networks.old_network.config import NetworkConfig # Model multi_network = click.option( diff --git a/clinicadl/commandline/modules_options/optimization.py b/clinicadl/commandline/modules_options/optimization.py index fd88dc06e..66bedebd0 100644 --- a/clinicadl/commandline/modules_options/optimization.py +++ b/clinicadl/commandline/modules_options/optimization.py @@ -2,7 +2,7 @@ from clinicadl.config.config_utils import get_default_from_config_class as get_default from clinicadl.config.config_utils import get_type_from_config_class as get_type -from clinicadl.optimizer.optimization import OptimizationConfig +from clinicadl.optimization.config import OptimizationConfig # Optimization accumulation_steps = click.option( diff --git a/clinicadl/commandline/modules_options/optimizer.py b/clinicadl/commandline/modules_options/optimizer.py index 57e3903e3..1012adfe0 100644 --- a/clinicadl/commandline/modules_options/optimizer.py +++ b/clinicadl/commandline/modules_options/optimizer.py @@ -2,7 +2,7 @@ from clinicadl.config.config_utils import get_default_from_config_class as get_default from clinicadl.config.config_utils import get_type_from_config_class as get_type -from clinicadl.optimizer.optimizer import OptimizerConfig +from clinicadl.optimization.optimizer import OptimizerConfig # Optimizer learning_rate = click.option( diff --git a/clinicadl/commandline/modules_options/preprocessing.py b/clinicadl/commandline/modules_options/preprocessing.py index 641e91518..131ba5324 100644 --- a/clinicadl/commandline/modules_options/preprocessing.py +++ b/clinicadl/commandline/modules_options/preprocessing.py @@ -1,13 +1,13 @@ import click -from clinicadl.caps_dataset.preprocessing.config import ( +from clinicadl.config.config_utils import get_default_from_config_class as get_default +from clinicadl.config.config_utils import get_type_from_config_class as get_type +from clinicadl.dataset.config.preprocessing import ( CustomPreprocessingConfig, DTIPreprocessingConfig, PETPreprocessingConfig, PreprocessingConfig, ) -from clinicadl.config.config_utils import get_default_from_config_class as get_default -from clinicadl.config.config_utils import get_type_from_config_class as get_type tracer = click.option( "--tracer", diff --git a/clinicadl/commandline/pipelines/generate/artifacts/cli.py b/clinicadl/commandline/pipelines/generate/artifacts/cli.py index b4a98b40a..68d1ec869 100644 --- a/clinicadl/commandline/pipelines/generate/artifacts/cli.py +++ b/clinicadl/commandline/pipelines/generate/artifacts/cli.py @@ -6,8 +6,6 @@ import torchio as tio from joblib import Parallel, delayed -from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig -from clinicadl.caps_dataset.caps_dataset_utils import find_file_type from clinicadl.commandline import arguments from clinicadl.commandline.modules_options import ( data, @@ -15,6 +13,8 @@ preprocessing, ) from clinicadl.commandline.pipelines.generate.artifacts import options as artifacts +from clinicadl.dataset.caps_dataset_config import CapsDatasetConfig +from clinicadl.dataset.caps_dataset_utils import find_file_type from clinicadl.generate.generate_config import GenerateArtifactsConfig from clinicadl.generate.generate_utils import ( load_and_check_tsv, diff --git a/clinicadl/commandline/pipelines/generate/hypometabolic/cli.py b/clinicadl/commandline/pipelines/generate/hypometabolic/cli.py index cb68269ca..82c4e5cb3 100644 --- a/clinicadl/commandline/pipelines/generate/hypometabolic/cli.py +++ b/clinicadl/commandline/pipelines/generate/hypometabolic/cli.py @@ -7,13 +7,13 @@ from joblib import Parallel, delayed from nilearn.image import resample_to_img -from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig -from clinicadl.caps_dataset.caps_dataset_utils import find_file_type from clinicadl.commandline import arguments from clinicadl.commandline.modules_options import data, dataloader, preprocessing from clinicadl.commandline.pipelines.generate.hypometabolic import ( options as hypometabolic, ) +from clinicadl.dataset.caps_dataset_config import CapsDatasetConfig +from clinicadl.dataset.caps_dataset_utils import find_file_type from clinicadl.generate.generate_config import GenerateHypometabolicConfig from clinicadl.generate.generate_utils import ( load_and_check_tsv, diff --git a/clinicadl/commandline/pipelines/generate/random/cli.py b/clinicadl/commandline/pipelines/generate/random/cli.py index cf8e8d9e8..8ea26a5d0 100644 --- a/clinicadl/commandline/pipelines/generate/random/cli.py +++ b/clinicadl/commandline/pipelines/generate/random/cli.py @@ -7,8 +7,6 @@ import pandas as pd from joblib import Parallel, delayed -from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig -from clinicadl.caps_dataset.caps_dataset_utils import find_file_type from clinicadl.commandline import arguments from clinicadl.commandline.modules_options import ( data, @@ -16,6 +14,8 @@ preprocessing, ) from clinicadl.commandline.pipelines.generate.random import options as random +from clinicadl.dataset.caps_dataset_config import CapsDatasetConfig +from clinicadl.dataset.caps_dataset_utils import find_file_type from clinicadl.generate.generate_config import GenerateRandomConfig from clinicadl.generate.generate_utils import ( load_and_check_tsv, diff --git a/clinicadl/commandline/pipelines/generate/trivial/cli.py b/clinicadl/commandline/pipelines/generate/trivial/cli.py index 4798dc904..d683865f2 100644 --- a/clinicadl/commandline/pipelines/generate/trivial/cli.py +++ b/clinicadl/commandline/pipelines/generate/trivial/cli.py @@ -6,8 +6,6 @@ import pandas as pd from joblib import Parallel, delayed -from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig -from clinicadl.caps_dataset.caps_dataset_utils import find_file_type from clinicadl.commandline import arguments from clinicadl.commandline.modules_options import ( data, @@ -15,6 +13,8 @@ preprocessing, ) from clinicadl.commandline.pipelines.generate.trivial import options as trivial +from clinicadl.dataset.caps_dataset_config import CapsDatasetConfig +from clinicadl.dataset.caps_dataset_utils import find_file_type from clinicadl.generate.generate_config import GenerateTrivialConfig from clinicadl.generate.generate_utils import ( im_loss_roi_gaussian_distribution, diff --git a/clinicadl/commandline/pipelines/interpret/cli.py b/clinicadl/commandline/pipelines/interpret/cli.py index db92f06e2..9f4fb8a87 100644 --- a/clinicadl/commandline/pipelines/interpret/cli.py +++ b/clinicadl/commandline/pipelines/interpret/cli.py @@ -12,7 +12,7 @@ ) from clinicadl.commandline.pipelines.interpret import options from clinicadl.interpret.config import InterpretConfig -from clinicadl.predictor.predictor import Predictor +from clinicadl.predictor.old_predictor import Predictor @click.command("interpret", no_args_is_help=True) diff --git a/clinicadl/commandline/pipelines/predict/cli.py b/clinicadl/commandline/pipelines/predict/cli.py index 184f46ad7..119c12678 100644 --- a/clinicadl/commandline/pipelines/predict/cli.py +++ b/clinicadl/commandline/pipelines/predict/cli.py @@ -11,7 +11,7 @@ ) from clinicadl.commandline.pipelines.predict import options from clinicadl.predictor.config import PredictConfig -from clinicadl.predictor.predictor import Predictor +from clinicadl.predictor.old_predictor import Predictor @click.command(name="predict", no_args_is_help=True) diff --git a/clinicadl/commandline/pipelines/prepare_data/prepare_data_cli.py b/clinicadl/commandline/pipelines/prepare_data/prepare_data_cli.py index c9630c507..d162dcf97 100644 --- a/clinicadl/commandline/pipelines/prepare_data/prepare_data_cli.py +++ b/clinicadl/commandline/pipelines/prepare_data/prepare_data_cli.py @@ -1,6 +1,5 @@ import click -from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig from clinicadl.commandline import arguments from clinicadl.commandline.modules_options import ( data, @@ -8,7 +7,8 @@ extraction, preprocessing, ) -from clinicadl.prepare_data.prepare_data import DeepLearningPrepareData +from clinicadl.dataset.caps_dataset_config import CapsDatasetConfig +from clinicadl.dataset.prepare_data.prepare_data import DeepLearningPrepareData from clinicadl.utils.enum import ExtractionMethod diff --git a/clinicadl/commandline/pipelines/prepare_data/prepare_data_from_bids_cli.py b/clinicadl/commandline/pipelines/prepare_data/prepare_data_from_bids_cli.py index 4c43df851..f4f888a71 100644 --- a/clinicadl/commandline/pipelines/prepare_data/prepare_data_from_bids_cli.py +++ b/clinicadl/commandline/pipelines/prepare_data/prepare_data_from_bids_cli.py @@ -1,6 +1,5 @@ import click -from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig from clinicadl.commandline import arguments from clinicadl.commandline.modules_options import ( data, @@ -8,7 +7,8 @@ extraction, preprocessing, ) -from clinicadl.prepare_data.prepare_data import DeepLearningPrepareData +from clinicadl.dataset.caps_dataset_config import CapsDatasetConfig +from clinicadl.dataset.prepare_data.prepare_data import DeepLearningPrepareData from clinicadl.utils.enum import ExtractionMethod diff --git a/clinicadl/commandline/pipelines/quality_check/pet_linear/cli.py b/clinicadl/commandline/pipelines/quality_check/pet_linear/cli.py index 455bb5299..938895b82 100644 --- a/clinicadl/commandline/pipelines/quality_check/pet_linear/cli.py +++ b/clinicadl/commandline/pipelines/quality_check/pet_linear/cli.py @@ -44,7 +44,7 @@ def cli( SUVR_REFERENCE_REGION is the reference region used to perform intensity normalization {pons|cerebellumPons|pons2|cerebellumPons2}. """ - from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig + from clinicadl.dataset.caps_dataset_config import CapsDatasetConfig from .....quality_check.pet_linear.quality_check import ( quality_check as pet_linear_qc, diff --git a/clinicadl/commandline/pipelines/quality_check/t1_linear/cli.py b/clinicadl/commandline/pipelines/quality_check/t1_linear/cli.py index f73971a63..6c55b3586 100755 --- a/clinicadl/commandline/pipelines/quality_check/t1_linear/cli.py +++ b/clinicadl/commandline/pipelines/quality_check/t1_linear/cli.py @@ -1,8 +1,8 @@ import click -from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig from clinicadl.commandline import arguments from clinicadl.commandline.modules_options import computational, data, dataloader +from clinicadl.dataset.caps_dataset_config import CapsDatasetConfig from clinicadl.utils.computational.computational import ComputationalConfig from clinicadl.utils.enum import ExtractionMethod, Preprocessing diff --git a/clinicadl/commandline/pipelines/train/classification/cli.py b/clinicadl/commandline/pipelines/train/classification/cli.py index 8ac287402..21d57f365 100644 --- a/clinicadl/commandline/pipelines/train/classification/cli.py +++ b/clinicadl/commandline/pipelines/train/classification/cli.py @@ -23,7 +23,7 @@ options as transfer_learning, ) from clinicadl.trainer.config.classification import ClassificationConfig -from clinicadl.trainer.trainer import Trainer +from clinicadl.trainer.old_trainer import Trainer from clinicadl.utils.enum import Task from clinicadl.utils.iotools.train_utils import merge_cli_and_config_file_options diff --git a/clinicadl/commandline/pipelines/train/from_json/cli.py b/clinicadl/commandline/pipelines/train/from_json/cli.py index 517ec8fa5..5e6771258 100644 --- a/clinicadl/commandline/pipelines/train/from_json/cli.py +++ b/clinicadl/commandline/pipelines/train/from_json/cli.py @@ -6,7 +6,7 @@ from clinicadl.commandline.modules_options import ( split, ) -from clinicadl.trainer.trainer import Trainer +from clinicadl.trainer.old_trainer import Trainer @click.command(name="from_json", no_args_is_help=True) diff --git a/clinicadl/commandline/pipelines/train/reconstruction/cli.py b/clinicadl/commandline/pipelines/train/reconstruction/cli.py index fc39ef54e..1bad88443 100644 --- a/clinicadl/commandline/pipelines/train/reconstruction/cli.py +++ b/clinicadl/commandline/pipelines/train/reconstruction/cli.py @@ -23,7 +23,7 @@ options as transfer_learning, ) from clinicadl.trainer.config.reconstruction import ReconstructionConfig -from clinicadl.trainer.trainer import Trainer +from clinicadl.trainer.old_trainer import Trainer from clinicadl.utils.enum import Task from clinicadl.utils.iotools.train_utils import merge_cli_and_config_file_options diff --git a/clinicadl/commandline/pipelines/train/regression/cli.py b/clinicadl/commandline/pipelines/train/regression/cli.py index 59e816192..95a623604 100644 --- a/clinicadl/commandline/pipelines/train/regression/cli.py +++ b/clinicadl/commandline/pipelines/train/regression/cli.py @@ -21,7 +21,7 @@ options as transfer_learning, ) from clinicadl.trainer.config.regression import RegressionConfig -from clinicadl.trainer.trainer import Trainer +from clinicadl.trainer.old_trainer import Trainer from clinicadl.utils.enum import Task from clinicadl.utils.iotools.train_utils import merge_cli_and_config_file_options diff --git a/clinicadl/commandline/pipelines/train/resume/cli.py b/clinicadl/commandline/pipelines/train/resume/cli.py index 12451d18a..90efa4244 100644 --- a/clinicadl/commandline/pipelines/train/resume/cli.py +++ b/clinicadl/commandline/pipelines/train/resume/cli.py @@ -4,7 +4,7 @@ from clinicadl.commandline.modules_options import ( split, ) -from clinicadl.trainer.trainer import Trainer +from clinicadl.trainer.old_trainer import Trainer @click.command(name="resume", no_args_is_help=True) diff --git a/clinicadl/caps_dataset/__init__.py b/clinicadl/dataset/__init__.py similarity index 100% rename from clinicadl/caps_dataset/__init__.py rename to clinicadl/dataset/__init__.py diff --git a/clinicadl/caps_dataset/data.py b/clinicadl/dataset/caps_dataset.py similarity index 99% rename from clinicadl/caps_dataset/data.py rename to clinicadl/dataset/caps_dataset.py index 638f49e9d..d45dc5aa6 100644 --- a/clinicadl/caps_dataset/data.py +++ b/clinicadl/dataset/caps_dataset.py @@ -10,14 +10,14 @@ import torch from torch.utils.data import Dataset -from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig -from clinicadl.caps_dataset.extraction.config import ( +from clinicadl.dataset.caps_dataset_config import CapsDatasetConfig +from clinicadl.dataset.config.extraction import ( ExtractionImageConfig, ExtractionPatchConfig, ExtractionROIConfig, ExtractionSliceConfig, ) -from clinicadl.prepare_data.prepare_data_utils import ( +from clinicadl.dataset.prepare_data.prepare_data_utils import ( compute_discarded_slices, extract_patch_path, extract_patch_tensor, diff --git a/clinicadl/caps_dataset/caps_dataset_config.py b/clinicadl/dataset/caps_dataset_config.py similarity index 93% rename from clinicadl/caps_dataset/caps_dataset_config.py rename to clinicadl/dataset/caps_dataset_config.py index b7086944c..0eac3ffd3 100644 --- a/clinicadl/caps_dataset/caps_dataset_config.py +++ b/clinicadl/dataset/caps_dataset_config.py @@ -3,10 +3,8 @@ from pydantic import BaseModel, ConfigDict -from clinicadl.caps_dataset.data_config import DataConfig -from clinicadl.caps_dataset.dataloader_config import DataLoaderConfig -from clinicadl.caps_dataset.extraction import config as extraction -from clinicadl.caps_dataset.preprocessing.config import ( +from clinicadl.dataset.config import extraction +from clinicadl.dataset.config.preprocessing import ( CustomPreprocessingConfig, DTIPreprocessingConfig, FlairPreprocessingConfig, @@ -14,7 +12,9 @@ PreprocessingConfig, T1PreprocessingConfig, ) -from clinicadl.caps_dataset.preprocessing.utils import ( +from clinicadl.dataset.data_config import DataConfig +from clinicadl.dataset.dataloader_config import DataLoaderConfig +from clinicadl.dataset.utils import ( bids_nii, dwi_dti, linear_nii, diff --git a/clinicadl/caps_dataset/caps_dataset_utils.py b/clinicadl/dataset/caps_dataset_utils.py similarity index 96% rename from clinicadl/caps_dataset/caps_dataset_utils.py rename to clinicadl/dataset/caps_dataset_utils.py index b87c6ed22..b54ba373d 100644 --- a/clinicadl/caps_dataset/caps_dataset_utils.py +++ b/clinicadl/dataset/caps_dataset_utils.py @@ -2,15 +2,15 @@ from pathlib import Path from typing import Any, Dict, Optional, Tuple -from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig -from clinicadl.caps_dataset.preprocessing.config import ( +from clinicadl.dataset.caps_dataset_config import CapsDatasetConfig +from clinicadl.dataset.config.preprocessing import ( CustomPreprocessingConfig, DTIPreprocessingConfig, FlairPreprocessingConfig, PETPreprocessingConfig, T1PreprocessingConfig, ) -from clinicadl.caps_dataset.preprocessing.utils import ( +from clinicadl.dataset.utils import ( bids_nii, dwi_dti, linear_nii, @@ -179,7 +179,7 @@ def read_json(json_path: Path) -> Dict[str, Any]: if "preprocessing" not in parameters: parameters["preprocessing"] = parameters["preprocessing_dict"]["preprocessing"] - from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig + from clinicadl.dataset.caps_dataset_config import CapsDatasetConfig config = CapsDatasetConfig.from_preprocessing_and_extraction_method( extraction=parameters["mode"], diff --git a/clinicadl/dataset/caps_reader.py b/clinicadl/dataset/caps_reader.py new file mode 100644 index 000000000..14199616e --- /dev/null +++ b/clinicadl/dataset/caps_reader.py @@ -0,0 +1,62 @@ +from pathlib import Path +from typing import Optional + +from clinicadl.dataset.caps_dataset import CapsDataset +from clinicadl.dataset.config.extraction import ( + ExtractionConfig, + ExtractionImageConfig, + ExtractionPatchConfig, + ExtractionROIConfig, + ExtractionSliceConfig, +) +from clinicadl.dataset.config.preprocessing import PreprocessingConfig +from clinicadl.experiment_manager.experiment_manager import ExperimentManager +from clinicadl.transforms.config import TransformsConfig + + +class CapsReader: + def __init__(self, caps_directory: Path, manager: ExperimentManager): + """TO COMPLETE""" + pass + + def get_dataset( + self, + extraction: ExtractionConfig, + preprocessing: PreprocessingConfig, + sub_ses_tsv: Path, + transforms: TransformsConfig, + ) -> CapsDataset: + return CapsDataset(extraction, preprocessing, sub_ses_tsv, transforms) + + def get_preprocessing(self, preprocessing: str) -> PreprocessingConfig: + """TO COMPLETE""" + + return PreprocessingConfig() + + def extract_slice( + self, preprocessing: PreprocessingConfig, arg_slice: Optional[int] = None + ) -> ExtractionSliceConfig: + """TO COMPLETE""" + + return ExtractionSliceConfig() + + def extract_patch( + self, preprocessing: PreprocessingConfig, arg_patch: Optional[int] = None + ) -> ExtractionPatchConfig: + """TO COMPLETE""" + + return ExtractionPatchConfig() + + def extract_roi( + self, preprocessing: PreprocessingConfig, arg_roi: Optional[int] = None + ) -> ExtractionROIConfig: + """TO COMPLETE""" + + return ExtractionROIConfig() + + def extract_image( + self, preprocessing: PreprocessingConfig, arg_image: Optional[int] = None + ) -> ExtractionImageConfig: + """TO COMPLETE""" + + return ExtractionImageConfig() diff --git a/clinicadl/dataset/concat.py b/clinicadl/dataset/concat.py new file mode 100644 index 000000000..f0b420dfe --- /dev/null +++ b/clinicadl/dataset/concat.py @@ -0,0 +1,6 @@ +from clinicadl.dataset.caps_dataset import CapsDataset + + +class ConcatDataset(CapsDataset): + def __init__(self, list_: list[CapsDataset]): + """TO COMPLETE""" diff --git a/clinicadl/maps_manager/__init__.py b/clinicadl/dataset/config/__init__.py similarity index 100% rename from clinicadl/maps_manager/__init__.py rename to clinicadl/dataset/config/__init__.py diff --git a/clinicadl/caps_dataset/extraction/config.py b/clinicadl/dataset/config/extraction.py similarity index 100% rename from clinicadl/caps_dataset/extraction/config.py rename to clinicadl/dataset/config/extraction.py diff --git a/clinicadl/caps_dataset/preprocessing/config.py b/clinicadl/dataset/config/preprocessing.py similarity index 100% rename from clinicadl/caps_dataset/preprocessing/config.py rename to clinicadl/dataset/config/preprocessing.py diff --git a/clinicadl/caps_dataset/data_config.py b/clinicadl/dataset/data_config.py similarity index 98% rename from clinicadl/caps_dataset/data_config.py rename to clinicadl/dataset/data_config.py index 80694fcd0..39e6a6254 100644 --- a/clinicadl/caps_dataset/data_config.py +++ b/clinicadl/dataset/data_config.py @@ -122,7 +122,6 @@ def preprocessing_dict(self) -> Dict[str, Any]: ValueError In case of multi-cohort dataset, if no preprocessing file is found in any CAPS. """ - from clinicadl.caps_dataset.data import CapsDataset if self.preprocessing_json is not None: if not self.multi_cohort: diff --git a/clinicadl/caps_dataset/dataloader_config.py b/clinicadl/dataset/dataloader_config.py similarity index 100% rename from clinicadl/caps_dataset/dataloader_config.py rename to clinicadl/dataset/dataloader_config.py diff --git a/clinicadl/monai_networks/nn/layers/__init__.py b/clinicadl/dataset/prepare_data/__init__.py similarity index 100% rename from clinicadl/monai_networks/nn/layers/__init__.py rename to clinicadl/dataset/prepare_data/__init__.py diff --git a/clinicadl/prepare_data/prepare_data.py b/clinicadl/dataset/prepare_data/prepare_data.py similarity index 97% rename from clinicadl/prepare_data/prepare_data.py rename to clinicadl/dataset/prepare_data/prepare_data.py index e9b7fc073..d9ef1c412 100644 --- a/clinicadl/prepare_data/prepare_data.py +++ b/clinicadl/dataset/prepare_data/prepare_data.py @@ -5,9 +5,9 @@ from joblib import Parallel, delayed from torch import save as save_tensor -from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig -from clinicadl.caps_dataset.caps_dataset_utils import compute_folder_and_file_type -from clinicadl.caps_dataset.extraction.config import ( +from clinicadl.dataset.caps_dataset_config import CapsDatasetConfig +from clinicadl.dataset.caps_dataset_utils import compute_folder_and_file_type +from clinicadl.dataset.config.extraction import ( ExtractionConfig, ExtractionImageConfig, ExtractionPatchConfig, diff --git a/clinicadl/prepare_data/prepare_data_utils.py b/clinicadl/dataset/prepare_data/prepare_data_utils.py similarity index 100% rename from clinicadl/prepare_data/prepare_data_utils.py rename to clinicadl/dataset/prepare_data/prepare_data_utils.py diff --git a/clinicadl/caps_dataset/preprocessing/utils.py b/clinicadl/dataset/utils.py similarity index 76% rename from clinicadl/caps_dataset/preprocessing/utils.py rename to clinicadl/dataset/utils.py index 0aa93004d..7af1da539 100644 --- a/clinicadl/caps_dataset/preprocessing/utils.py +++ b/clinicadl/dataset/utils.py @@ -1,6 +1,6 @@ from typing import Optional -from clinicadl.caps_dataset.preprocessing import config as preprocessing_config +from clinicadl.dataset.config import preprocessing from clinicadl.utils.enum import ( LinearModality, Preprocessing, @@ -11,7 +11,7 @@ def bids_nii( - config: preprocessing_config.PreprocessingConfig, + config: preprocessing.PreprocessingConfig, reconstruction: Optional[str] = None, ) -> FileType: """Return the query dict required to capture PET scans. @@ -41,7 +41,7 @@ def bids_nii( f"ClinicaDL is Unable to read this modality ({config.preprocessing}) of images, please chose one from this list: {list[Preprocessing]}" ) - if isinstance(config, preprocessing_config.PETPreprocessingConfig): + if isinstance(config, preprocessing.PETPreprocessingConfig): trc = "" if config.tracer is None else f"_trc-{Tracer(config.tracer).value}" rec = "" if reconstruction is None else f"_rec-{reconstruction}" description = "PET data" @@ -56,13 +56,13 @@ def bids_nii( ) return file_type - elif isinstance(config, preprocessing_config.T1PreprocessingConfig): + elif isinstance(config, preprocessing.T1PreprocessingConfig): return FileType(pattern="anat/sub-*_ses-*_T1w.nii*", description="T1w MRI") - elif isinstance(config, preprocessing_config.FlairPreprocessingConfig): + elif isinstance(config, preprocessing.FlairPreprocessingConfig): return FileType(pattern="sub-*_ses-*_flair.nii*", description="FLAIR T2w MRI") - elif isinstance(config, preprocessing_config.DTIPreprocessingConfig): + elif isinstance(config, preprocessing.DTIPreprocessingConfig): return FileType(pattern="dwi/sub-*_ses-*_dwi.nii*", description="DWI NIfTI") else: @@ -70,15 +70,15 @@ def bids_nii( def linear_nii( - config: preprocessing_config.PreprocessingConfig, + config: preprocessing, ) -> FileType: - if isinstance(config, preprocessing_config.T1PreprocessingConfig): + if isinstance(config, preprocessing.T1PreprocessingConfig): needed_pipeline = Preprocessing.T1_LINEAR modality = LinearModality.T1W - elif isinstance(config, preprocessing_config.T2PreprocessingConfig): + elif isinstance(config, preprocessing.T2PreprocessingConfig): needed_pipeline = Preprocessing.T2_LINEAR modality = LinearModality.T2W - elif isinstance(config, preprocessing_config.FlairPreprocessingConfig): + elif isinstance(config, preprocessing.FlairPreprocessingConfig): needed_pipeline = Preprocessing.FLAIR_LINEAR modality = LinearModality.FLAIR else: @@ -102,7 +102,7 @@ def linear_nii( return file_type -def dwi_dti(config: preprocessing_config.DTIPreprocessingConfig) -> FileType: +def dwi_dti(config: preprocessing.DTIPreprocessingConfig) -> FileType: """Return the query dict required to capture DWI DTI images. Parameters @@ -113,12 +113,12 @@ def dwi_dti(config: preprocessing_config.DTIPreprocessingConfig) -> FileType: ------- FileType : """ - if isinstance(config, preprocessing_config.DTIPreprocessingConfig): + if isinstance(config, preprocessing.DTIPreprocessingConfig): measure = config.dti_measure space = config.dti_space else: raise ClinicaDLArgumentError( - f"PreprocessingConfig is of type {config} but should be of type{preprocessing_config.DTIPreprocessingConfig}" + f"preprocessing is of type {config} but should be of type{preprocessing.DTIPreprocessingConfig}" ) return FileType( @@ -128,10 +128,10 @@ def dwi_dti(config: preprocessing_config.DTIPreprocessingConfig) -> FileType: ) -def pet_linear_nii(config: preprocessing_config.PETPreprocessingConfig) -> FileType: - if not isinstance(config, preprocessing_config.PETPreprocessingConfig): +def pet_linear_nii(config: preprocessing.PETPreprocessingConfig) -> FileType: + if not isinstance(config, preprocessing.PETPreprocessingConfig): raise ClinicaDLArgumentError( - f"PreprocessingConfig is of type {config} but should be of type{preprocessing_config.PETPreprocessingConfig}" + f"preprocessing is of type {config} but should be of type{preprocessing.PETPreprocessingConfig}" ) if config.use_uncropped_image: diff --git a/clinicadl/network/autoencoder/__init__.py b/clinicadl/experiment_manager/__init__.py similarity index 100% rename from clinicadl/network/autoencoder/__init__.py rename to clinicadl/experiment_manager/__init__.py diff --git a/clinicadl/maps_manager/config.py b/clinicadl/experiment_manager/config.py similarity index 100% rename from clinicadl/maps_manager/config.py rename to clinicadl/experiment_manager/config.py diff --git a/clinicadl/experiment_manager/experiment_manager.py b/clinicadl/experiment_manager/experiment_manager.py new file mode 100644 index 000000000..f3e4aaac8 --- /dev/null +++ b/clinicadl/experiment_manager/experiment_manager.py @@ -0,0 +1,7 @@ +from pathlib import Path + + +class ExperimentManager: + def __init__(self, maps_path: Path, overwrite: bool) -> None: + """TO COMPLETE""" + pass diff --git a/clinicadl/maps_manager/maps_manager.py b/clinicadl/experiment_manager/maps_manager.py similarity index 98% rename from clinicadl/maps_manager/maps_manager.py rename to clinicadl/experiment_manager/maps_manager.py index 10550a021..84c7806ca 100644 --- a/clinicadl/maps_manager/maps_manager.py +++ b/clinicadl/experiment_manager/maps_manager.py @@ -9,17 +9,17 @@ import pandas as pd import torch -from clinicadl.caps_dataset.caps_dataset_utils import read_json -from clinicadl.caps_dataset.data import ( +from clinicadl.dataset.caps_dataset import ( return_dataset, ) -from clinicadl.metrics.metric_module import MetricModule -from clinicadl.metrics.utils import ( +from clinicadl.dataset.caps_dataset_utils import read_json +from clinicadl.metrics.old_metrics.metric_module import MetricModule +from clinicadl.metrics.old_metrics.utils import ( check_selection_metric, ) from clinicadl.predictor.utils import get_prediction from clinicadl.splitter.config import SplitterConfig -from clinicadl.splitter.splitter import Splitter +from clinicadl.splitter.old_splitter import Splitter from clinicadl.trainer.tasks_utils import ( ensemble_prediction, evaluation_metrics, @@ -346,7 +346,7 @@ def _write_information(self): """ from datetime import datetime - import clinicadl.network as network_package + import clinicadl.networks.old_network as network_package model_class = getattr(network_package, self.architecture) args = list( @@ -589,7 +589,7 @@ def _init_model( gpu (bool): If given, a new value for the device of the model will be computed. network (int): Index of the network trained (used in multi-network setting only). """ - import clinicadl.network as network_package + import clinicadl.networks.old_network as network_package logger.debug(f"Initialization of model {self.architecture}") # or choose to implement a dictionary diff --git a/clinicadl/hugging_face/hugging_face.py b/clinicadl/hugging_face/hugging_face.py index 00f729e35..22b6bbb02 100644 --- a/clinicadl/hugging_face/hugging_face.py +++ b/clinicadl/hugging_face/hugging_face.py @@ -5,7 +5,7 @@ import toml -from clinicadl.caps_dataset.caps_dataset_utils import read_json +from clinicadl.dataset.caps_dataset_utils import read_json from clinicadl.utils.exceptions import ClinicaDLArgumentError from clinicadl.utils.iotools.maps_manager_utils import ( remove_unused_tasks, diff --git a/clinicadl/interpret/config.py b/clinicadl/interpret/config.py index 03036d4c9..d679a82e4 100644 --- a/clinicadl/interpret/config.py +++ b/clinicadl/interpret/config.py @@ -4,11 +4,13 @@ from pydantic import BaseModel, field_validator -from clinicadl.caps_dataset.data_config import DataConfig -from clinicadl.caps_dataset.dataloader_config import DataLoaderConfig +from clinicadl.dataset.data_config import DataConfig +from clinicadl.dataset.dataloader_config import DataLoaderConfig +from clinicadl.experiment_manager.config import ( + MapsManagerConfig as MapsManagerConfigBase, +) +from clinicadl.experiment_manager.maps_manager import MapsManager from clinicadl.interpret.gradients import GradCam, Gradients, VanillaBackProp -from clinicadl.maps_manager.config import MapsManagerConfig as MapsManagerConfigBase -from clinicadl.maps_manager.maps_manager import MapsManager from clinicadl.predictor.validation import ValidationConfig from clinicadl.splitter.config import SplitConfig from clinicadl.transforms.config import TransformsConfig diff --git a/clinicadl/interpret/gradients.py b/clinicadl/interpret/gradients.py index b62308f38..d6e11815f 100644 --- a/clinicadl/interpret/gradients.py +++ b/clinicadl/interpret/gradients.py @@ -50,7 +50,7 @@ class GradCam(Gradients): """ def __init__(self, model): - from clinicadl.network.sub_network import CNN + from clinicadl.networks.old_network.sub_network import CNN super().__init__(model=model) if not isinstance(model, CNN): diff --git a/clinicadl/monai_metrics/__init__.py b/clinicadl/metrics/__init__.py similarity index 100% rename from clinicadl/monai_metrics/__init__.py rename to clinicadl/metrics/__init__.py diff --git a/clinicadl/monai_metrics/config/__init__.py b/clinicadl/metrics/config/__init__.py similarity index 100% rename from clinicadl/monai_metrics/config/__init__.py rename to clinicadl/metrics/config/__init__.py diff --git a/clinicadl/monai_metrics/config/base.py b/clinicadl/metrics/config/base.py similarity index 100% rename from clinicadl/monai_metrics/config/base.py rename to clinicadl/metrics/config/base.py diff --git a/clinicadl/monai_metrics/config/classification.py b/clinicadl/metrics/config/classification.py similarity index 100% rename from clinicadl/monai_metrics/config/classification.py rename to clinicadl/metrics/config/classification.py diff --git a/clinicadl/monai_metrics/config/enum.py b/clinicadl/metrics/config/enum.py similarity index 100% rename from clinicadl/monai_metrics/config/enum.py rename to clinicadl/metrics/config/enum.py diff --git a/clinicadl/monai_metrics/config/factory.py b/clinicadl/metrics/config/factory.py similarity index 100% rename from clinicadl/monai_metrics/config/factory.py rename to clinicadl/metrics/config/factory.py diff --git a/clinicadl/monai_metrics/config/generation.py b/clinicadl/metrics/config/generation.py similarity index 100% rename from clinicadl/monai_metrics/config/generation.py rename to clinicadl/metrics/config/generation.py diff --git a/clinicadl/monai_metrics/config/reconstruction.py b/clinicadl/metrics/config/reconstruction.py similarity index 100% rename from clinicadl/monai_metrics/config/reconstruction.py rename to clinicadl/metrics/config/reconstruction.py diff --git a/clinicadl/monai_metrics/config/regression.py b/clinicadl/metrics/config/regression.py similarity index 100% rename from clinicadl/monai_metrics/config/regression.py rename to clinicadl/metrics/config/regression.py diff --git a/clinicadl/monai_metrics/config/segmentation.py b/clinicadl/metrics/config/segmentation.py similarity index 100% rename from clinicadl/monai_metrics/config/segmentation.py rename to clinicadl/metrics/config/segmentation.py diff --git a/clinicadl/monai_metrics/factory.py b/clinicadl/metrics/factory.py similarity index 100% rename from clinicadl/monai_metrics/factory.py rename to clinicadl/metrics/factory.py diff --git a/clinicadl/metrics/metric_module.py b/clinicadl/metrics/old_metrics/metric_module.py similarity index 100% rename from clinicadl/metrics/metric_module.py rename to clinicadl/metrics/old_metrics/metric_module.py diff --git a/clinicadl/metrics/utils.py b/clinicadl/metrics/old_metrics/utils.py similarity index 100% rename from clinicadl/metrics/utils.py rename to clinicadl/metrics/old_metrics/utils.py diff --git a/clinicadl/network/cnn/__init__.py b/clinicadl/model/__init__.py similarity index 100% rename from clinicadl/network/cnn/__init__.py rename to clinicadl/model/__init__.py diff --git a/clinicadl/model/clinicadl_model.py b/clinicadl/model/clinicadl_model.py new file mode 100644 index 000000000..8785ed97b --- /dev/null +++ b/clinicadl/model/clinicadl_model.py @@ -0,0 +1,8 @@ +import torch.nn as nn +import torch.optim as optim + + +class ClinicaDLModel: + def __init__(self, network: nn.Module, loss: nn.Module, optimizer=optim.optimizer): + """TO COMPLETE""" + pass diff --git a/clinicadl/monai_networks/__init__.py b/clinicadl/networks/__init__.py similarity index 100% rename from clinicadl/monai_networks/__init__.py rename to clinicadl/networks/__init__.py diff --git a/clinicadl/monai_networks/config/__init__.py b/clinicadl/networks/config/__init__.py similarity index 100% rename from clinicadl/monai_networks/config/__init__.py rename to clinicadl/networks/config/__init__.py diff --git a/clinicadl/monai_networks/config/autoencoder.py b/clinicadl/networks/config/autoencoder.py similarity index 95% rename from clinicadl/monai_networks/config/autoencoder.py rename to clinicadl/networks/config/autoencoder.py index b19108573..50ef4bcca 100644 --- a/clinicadl/monai_networks/config/autoencoder.py +++ b/clinicadl/networks/config/autoencoder.py @@ -2,7 +2,7 @@ from pydantic import PositiveInt, computed_field -from clinicadl.monai_networks.nn.layers.utils import ( +from clinicadl.networks.nn.layers.utils import ( ActivationParameters, UnpoolingMode, ) diff --git a/clinicadl/monai_networks/config/base.py b/clinicadl/networks/config/base.py similarity index 97% rename from clinicadl/monai_networks/config/base.py rename to clinicadl/networks/config/base.py index d5c0a6f9b..6d61d16fd 100644 --- a/clinicadl/monai_networks/config/base.py +++ b/clinicadl/networks/config/base.py @@ -4,7 +4,7 @@ from pydantic import BaseModel, ConfigDict, PositiveInt, computed_field -from clinicadl.monai_networks.nn.layers.utils import ActivationParameters +from clinicadl.networks.nn.layers.utils import ActivationParameters from clinicadl.utils.factories import DefaultFromLibrary diff --git a/clinicadl/monai_networks/config/cnn.py b/clinicadl/networks/config/cnn.py similarity index 100% rename from clinicadl/monai_networks/config/cnn.py rename to clinicadl/networks/config/cnn.py diff --git a/clinicadl/monai_networks/config/conv_decoder.py b/clinicadl/networks/config/conv_decoder.py similarity index 97% rename from clinicadl/monai_networks/config/conv_decoder.py rename to clinicadl/networks/config/conv_decoder.py index 5dc78dfec..91547e052 100644 --- a/clinicadl/monai_networks/config/conv_decoder.py +++ b/clinicadl/networks/config/conv_decoder.py @@ -2,7 +2,7 @@ from pydantic import BaseModel, ConfigDict, PositiveFloat, PositiveInt, computed_field -from clinicadl.monai_networks.nn.layers.utils import ( +from clinicadl.networks.nn.layers.utils import ( ActivationParameters, ConvNormalizationParameters, ConvParameters, diff --git a/clinicadl/monai_networks/config/conv_encoder.py b/clinicadl/networks/config/conv_encoder.py similarity index 97% rename from clinicadl/monai_networks/config/conv_encoder.py rename to clinicadl/networks/config/conv_encoder.py index 499f69b19..1bddbc947 100644 --- a/clinicadl/monai_networks/config/conv_encoder.py +++ b/clinicadl/networks/config/conv_encoder.py @@ -2,7 +2,7 @@ from pydantic import BaseModel, ConfigDict, PositiveFloat, PositiveInt, computed_field -from clinicadl.monai_networks.nn.layers.utils import ( +from clinicadl.networks.nn.layers.utils import ( ActivationParameters, ConvNormalizationParameters, ConvParameters, diff --git a/clinicadl/monai_networks/config/densenet.py b/clinicadl/networks/config/densenet.py similarity index 97% rename from clinicadl/monai_networks/config/densenet.py rename to clinicadl/networks/config/densenet.py index 4984d010b..022f26cca 100644 --- a/clinicadl/monai_networks/config/densenet.py +++ b/clinicadl/networks/config/densenet.py @@ -2,7 +2,7 @@ from pydantic import PositiveFloat, PositiveInt, computed_field -from clinicadl.monai_networks.nn.layers.utils import ActivationParameters +from clinicadl.networks.nn.layers.utils import ActivationParameters from clinicadl.utils.factories import DefaultFromLibrary from .base import ImplementedNetworks, NetworkConfig, NetworkType, PreTrainedConfig diff --git a/clinicadl/monai_networks/config/factory.py b/clinicadl/networks/config/factory.py similarity index 100% rename from clinicadl/monai_networks/config/factory.py rename to clinicadl/networks/config/factory.py diff --git a/clinicadl/monai_networks/config/generator.py b/clinicadl/networks/config/generator.py similarity index 100% rename from clinicadl/monai_networks/config/generator.py rename to clinicadl/networks/config/generator.py diff --git a/clinicadl/monai_networks/config/mlp.py b/clinicadl/networks/config/mlp.py similarity index 96% rename from clinicadl/monai_networks/config/mlp.py rename to clinicadl/networks/config/mlp.py index 5d12f303f..2f72eda88 100644 --- a/clinicadl/monai_networks/config/mlp.py +++ b/clinicadl/networks/config/mlp.py @@ -2,7 +2,7 @@ from pydantic import BaseModel, ConfigDict, PositiveFloat, PositiveInt, computed_field -from clinicadl.monai_networks.nn.layers.utils import ( +from clinicadl.networks.nn.layers.utils import ( ActivationParameters, NormalizationParameters, ) diff --git a/clinicadl/monai_networks/config/resnet.py b/clinicadl/networks/config/resnet.py similarity index 95% rename from clinicadl/monai_networks/config/resnet.py rename to clinicadl/networks/config/resnet.py index 0f3141dd8..ddc53a125 100644 --- a/clinicadl/monai_networks/config/resnet.py +++ b/clinicadl/networks/config/resnet.py @@ -2,8 +2,8 @@ from pydantic import PositiveInt, computed_field -from clinicadl.monai_networks.nn.layers.utils import ActivationParameters -from clinicadl.monai_networks.nn.resnet import ResNetBlockType +from clinicadl.networks.nn.layers.utils import ActivationParameters +from clinicadl.networks.nn.resnet import ResNetBlockType from clinicadl.utils.factories import DefaultFromLibrary from .base import ImplementedNetworks, NetworkConfig, NetworkType, PreTrainedConfig diff --git a/clinicadl/monai_networks/config/senet.py b/clinicadl/networks/config/senet.py similarity index 100% rename from clinicadl/monai_networks/config/senet.py rename to clinicadl/networks/config/senet.py diff --git a/clinicadl/monai_networks/config/unet.py b/clinicadl/networks/config/unet.py similarity index 93% rename from clinicadl/monai_networks/config/unet.py rename to clinicadl/networks/config/unet.py index abd87817e..b1faf542e 100644 --- a/clinicadl/monai_networks/config/unet.py +++ b/clinicadl/networks/config/unet.py @@ -2,7 +2,7 @@ from pydantic import PositiveFloat, PositiveInt, computed_field -from clinicadl.monai_networks.nn.layers.utils import ActivationParameters +from clinicadl.networks.nn.layers.utils import ActivationParameters from clinicadl.utils.factories import DefaultFromLibrary from .base import ImplementedNetworks, NetworkConfig diff --git a/clinicadl/monai_networks/config/vit.py b/clinicadl/networks/config/vit.py similarity index 94% rename from clinicadl/monai_networks/config/vit.py rename to clinicadl/networks/config/vit.py index 5059df790..ea4103f5d 100644 --- a/clinicadl/monai_networks/config/vit.py +++ b/clinicadl/networks/config/vit.py @@ -2,8 +2,8 @@ from pydantic import PositiveFloat, PositiveInt, computed_field -from clinicadl.monai_networks.nn.layers.utils import ActivationParameters -from clinicadl.monai_networks.nn.vit import PosEmbedType +from clinicadl.networks.nn.layers.utils import ActivationParameters +from clinicadl.networks.nn.vit import PosEmbedType from clinicadl.utils.factories import DefaultFromLibrary from .base import ImplementedNetworks, NetworkConfig, NetworkType, PreTrainedConfig diff --git a/clinicadl/monai_networks/factory.py b/clinicadl/networks/factory.py similarity index 99% rename from clinicadl/monai_networks/factory.py rename to clinicadl/networks/factory.py index 36a4d1d46..a1822af52 100644 --- a/clinicadl/monai_networks/factory.py +++ b/clinicadl/networks/factory.py @@ -4,7 +4,7 @@ import torch.nn as nn from pydantic import BaseModel -import clinicadl.monai_networks.nn as nets +import clinicadl.networks.nn as nets from clinicadl.utils.factories import DefaultFromLibrary, get_args_and_defaults from .config import ( diff --git a/clinicadl/monai_networks/nn/__init__.py b/clinicadl/networks/nn/__init__.py similarity index 100% rename from clinicadl/monai_networks/nn/__init__.py rename to clinicadl/networks/nn/__init__.py diff --git a/clinicadl/monai_networks/nn/att_unet.py b/clinicadl/networks/nn/att_unet.py similarity index 100% rename from clinicadl/monai_networks/nn/att_unet.py rename to clinicadl/networks/nn/att_unet.py diff --git a/clinicadl/monai_networks/nn/autoencoder.py b/clinicadl/networks/nn/autoencoder.py similarity index 100% rename from clinicadl/monai_networks/nn/autoencoder.py rename to clinicadl/networks/nn/autoencoder.py diff --git a/clinicadl/monai_networks/nn/cnn.py b/clinicadl/networks/nn/cnn.py similarity index 100% rename from clinicadl/monai_networks/nn/cnn.py rename to clinicadl/networks/nn/cnn.py diff --git a/clinicadl/monai_networks/nn/conv_decoder.py b/clinicadl/networks/nn/conv_decoder.py similarity index 100% rename from clinicadl/monai_networks/nn/conv_decoder.py rename to clinicadl/networks/nn/conv_decoder.py diff --git a/clinicadl/monai_networks/nn/conv_encoder.py b/clinicadl/networks/nn/conv_encoder.py similarity index 100% rename from clinicadl/monai_networks/nn/conv_encoder.py rename to clinicadl/networks/nn/conv_encoder.py diff --git a/clinicadl/monai_networks/nn/densenet.py b/clinicadl/networks/nn/densenet.py similarity index 100% rename from clinicadl/monai_networks/nn/densenet.py rename to clinicadl/networks/nn/densenet.py diff --git a/clinicadl/monai_networks/nn/generator.py b/clinicadl/networks/nn/generator.py similarity index 100% rename from clinicadl/monai_networks/nn/generator.py rename to clinicadl/networks/nn/generator.py diff --git a/clinicadl/network/unet/__init__.py b/clinicadl/networks/nn/layers/__init__.py similarity index 100% rename from clinicadl/network/unet/__init__.py rename to clinicadl/networks/nn/layers/__init__.py diff --git a/clinicadl/monai_networks/nn/layers/resnet.py b/clinicadl/networks/nn/layers/resnet.py similarity index 100% rename from clinicadl/monai_networks/nn/layers/resnet.py rename to clinicadl/networks/nn/layers/resnet.py diff --git a/clinicadl/monai_networks/nn/layers/senet.py b/clinicadl/networks/nn/layers/senet.py similarity index 100% rename from clinicadl/monai_networks/nn/layers/senet.py rename to clinicadl/networks/nn/layers/senet.py diff --git a/clinicadl/monai_networks/nn/layers/unet.py b/clinicadl/networks/nn/layers/unet.py similarity index 100% rename from clinicadl/monai_networks/nn/layers/unet.py rename to clinicadl/networks/nn/layers/unet.py diff --git a/clinicadl/monai_networks/nn/layers/unpool.py b/clinicadl/networks/nn/layers/unpool.py similarity index 100% rename from clinicadl/monai_networks/nn/layers/unpool.py rename to clinicadl/networks/nn/layers/unpool.py diff --git a/clinicadl/monai_networks/nn/layers/utils/__init__.py b/clinicadl/networks/nn/layers/utils/__init__.py similarity index 100% rename from clinicadl/monai_networks/nn/layers/utils/__init__.py rename to clinicadl/networks/nn/layers/utils/__init__.py diff --git a/clinicadl/monai_networks/nn/layers/utils/enum.py b/clinicadl/networks/nn/layers/utils/enum.py similarity index 100% rename from clinicadl/monai_networks/nn/layers/utils/enum.py rename to clinicadl/networks/nn/layers/utils/enum.py diff --git a/clinicadl/monai_networks/nn/layers/utils/types.py b/clinicadl/networks/nn/layers/utils/types.py similarity index 100% rename from clinicadl/monai_networks/nn/layers/utils/types.py rename to clinicadl/networks/nn/layers/utils/types.py diff --git a/clinicadl/monai_networks/nn/layers/vit.py b/clinicadl/networks/nn/layers/vit.py similarity index 100% rename from clinicadl/monai_networks/nn/layers/vit.py rename to clinicadl/networks/nn/layers/vit.py diff --git a/clinicadl/monai_networks/nn/mlp.py b/clinicadl/networks/nn/mlp.py similarity index 100% rename from clinicadl/monai_networks/nn/mlp.py rename to clinicadl/networks/nn/mlp.py diff --git a/clinicadl/monai_networks/nn/resnet.py b/clinicadl/networks/nn/resnet.py similarity index 100% rename from clinicadl/monai_networks/nn/resnet.py rename to clinicadl/networks/nn/resnet.py diff --git a/clinicadl/monai_networks/nn/senet.py b/clinicadl/networks/nn/senet.py similarity index 100% rename from clinicadl/monai_networks/nn/senet.py rename to clinicadl/networks/nn/senet.py diff --git a/clinicadl/monai_networks/nn/unet.py b/clinicadl/networks/nn/unet.py similarity index 100% rename from clinicadl/monai_networks/nn/unet.py rename to clinicadl/networks/nn/unet.py diff --git a/clinicadl/monai_networks/nn/utils/__init__.py b/clinicadl/networks/nn/utils/__init__.py similarity index 100% rename from clinicadl/monai_networks/nn/utils/__init__.py rename to clinicadl/networks/nn/utils/__init__.py diff --git a/clinicadl/monai_networks/nn/utils/checks.py b/clinicadl/networks/nn/utils/checks.py similarity index 100% rename from clinicadl/monai_networks/nn/utils/checks.py rename to clinicadl/networks/nn/utils/checks.py diff --git a/clinicadl/monai_networks/nn/utils/shapes.py b/clinicadl/networks/nn/utils/shapes.py similarity index 100% rename from clinicadl/monai_networks/nn/utils/shapes.py rename to clinicadl/networks/nn/utils/shapes.py diff --git a/clinicadl/monai_networks/nn/vae.py b/clinicadl/networks/nn/vae.py similarity index 100% rename from clinicadl/monai_networks/nn/vae.py rename to clinicadl/networks/nn/vae.py diff --git a/clinicadl/monai_networks/nn/vit.py b/clinicadl/networks/nn/vit.py similarity index 100% rename from clinicadl/monai_networks/nn/vit.py rename to clinicadl/networks/nn/vit.py diff --git a/clinicadl/network/__init__.py b/clinicadl/networks/old_network/__init__.py similarity index 100% rename from clinicadl/network/__init__.py rename to clinicadl/networks/old_network/__init__.py diff --git a/clinicadl/network/vae/__init__.py b/clinicadl/networks/old_network/autoencoder/__init__.py similarity index 100% rename from clinicadl/network/vae/__init__.py rename to clinicadl/networks/old_network/autoencoder/__init__.py diff --git a/clinicadl/network/autoencoder/cnn_transformer.py b/clinicadl/networks/old_network/autoencoder/cnn_transformer.py similarity index 98% rename from clinicadl/network/autoencoder/cnn_transformer.py rename to clinicadl/networks/old_network/autoencoder/cnn_transformer.py index 39270d2a1..d60789879 100644 --- a/clinicadl/network/autoencoder/cnn_transformer.py +++ b/clinicadl/networks/old_network/autoencoder/cnn_transformer.py @@ -2,7 +2,7 @@ from torch import nn -from clinicadl.network.network_utils import ( +from clinicadl.networks.old_network.network_utils import ( CropMaxUnpool2d, CropMaxUnpool3d, PadMaxPool2d, diff --git a/clinicadl/network/autoencoder/models.py b/clinicadl/networks/old_network/autoencoder/models.py similarity index 89% rename from clinicadl/network/autoencoder/models.py rename to clinicadl/networks/old_network/autoencoder/models.py index 8ac40191e..ba0e3928a 100644 --- a/clinicadl/network/autoencoder/models.py +++ b/clinicadl/networks/old_network/autoencoder/models.py @@ -1,9 +1,9 @@ from torch import nn -from clinicadl.network.autoencoder.cnn_transformer import CNN_Transformer -from clinicadl.network.cnn.models import Conv4_FC3, Conv5_FC3 -from clinicadl.network.sub_network import AutoEncoder -from clinicadl.network.vae.vae_layers import ( +from clinicadl.networks.old_network.autoencoder.cnn_transformer import CNN_Transformer +from clinicadl.networks.old_network.cnn.models import Conv4_FC3, Conv5_FC3 +from clinicadl.networks.old_network.sub_network import AutoEncoder +from clinicadl.networks.old_network.vae.vae_layers import ( DecoderLayer3D, EncoderLayer3D, Flatten, diff --git a/clinicadl/network/cnn/SECNN.py b/clinicadl/networks/old_network/cnn/SECNN.py similarity index 100% rename from clinicadl/network/cnn/SECNN.py rename to clinicadl/networks/old_network/cnn/SECNN.py diff --git a/clinicadl/nn/__init__.py b/clinicadl/networks/old_network/cnn/__init__.py similarity index 100% rename from clinicadl/nn/__init__.py rename to clinicadl/networks/old_network/cnn/__init__.py diff --git a/clinicadl/network/cnn/models.py b/clinicadl/networks/old_network/cnn/models.py similarity index 97% rename from clinicadl/network/cnn/models.py rename to clinicadl/networks/old_network/cnn/models.py index 87d5e3ce5..af03969e4 100644 --- a/clinicadl/network/cnn/models.py +++ b/clinicadl/networks/old_network/cnn/models.py @@ -4,11 +4,11 @@ from torch import nn from torchvision.models.resnet import BasicBlock -from clinicadl.network.cnn.resnet import ResNetDesigner, model_urls -from clinicadl.network.cnn.resnet3D import ResNetDesigner3D -from clinicadl.network.cnn.SECNN import SECNNDesigner3D -from clinicadl.network.network_utils import PadMaxPool2d, PadMaxPool3d -from clinicadl.network.sub_network import CNN, CNN_SSDA +from clinicadl.networks.old_network.cnn.resnet import ResNetDesigner, model_urls +from clinicadl.networks.old_network.cnn.resnet3D import ResNetDesigner3D +from clinicadl.networks.old_network.cnn.SECNN import SECNNDesigner3D +from clinicadl.networks.old_network.network_utils import PadMaxPool2d, PadMaxPool3d +from clinicadl.networks.old_network.sub_network import CNN, CNN_SSDA def get_layers_fn(input_size): diff --git a/clinicadl/network/cnn/random.py b/clinicadl/networks/old_network/cnn/random.py similarity index 98% rename from clinicadl/network/cnn/random.py rename to clinicadl/networks/old_network/cnn/random.py index 897a014d1..221fee3f5 100644 --- a/clinicadl/network/cnn/random.py +++ b/clinicadl/networks/old_network/cnn/random.py @@ -1,7 +1,7 @@ import numpy as np -from clinicadl.network.network_utils import * -from clinicadl.network.sub_network import CNN +from clinicadl.networks.old_network.network_utils import * +from clinicadl.networks.old_network.sub_network import CNN from clinicadl.utils.exceptions import ClinicaDLNetworksError diff --git a/clinicadl/network/cnn/resnet.py b/clinicadl/networks/old_network/cnn/resnet.py similarity index 100% rename from clinicadl/network/cnn/resnet.py rename to clinicadl/networks/old_network/cnn/resnet.py diff --git a/clinicadl/network/cnn/resnet3D.py b/clinicadl/networks/old_network/cnn/resnet3D.py similarity index 100% rename from clinicadl/network/cnn/resnet3D.py rename to clinicadl/networks/old_network/cnn/resnet3D.py diff --git a/clinicadl/network/config.py b/clinicadl/networks/old_network/config.py similarity index 100% rename from clinicadl/network/config.py rename to clinicadl/networks/old_network/config.py diff --git a/clinicadl/network/network.py b/clinicadl/networks/old_network/network.py similarity index 100% rename from clinicadl/network/network.py rename to clinicadl/networks/old_network/network.py diff --git a/clinicadl/network/network_utils.py b/clinicadl/networks/old_network/network_utils.py similarity index 100% rename from clinicadl/network/network_utils.py rename to clinicadl/networks/old_network/network_utils.py diff --git a/clinicadl/prepare_data/__init__.py b/clinicadl/networks/old_network/nn/__init__.py similarity index 100% rename from clinicadl/prepare_data/__init__.py rename to clinicadl/networks/old_network/nn/__init__.py diff --git a/clinicadl/nn/blocks/__init__.py b/clinicadl/networks/old_network/nn/blocks/__init__.py similarity index 100% rename from clinicadl/nn/blocks/__init__.py rename to clinicadl/networks/old_network/nn/blocks/__init__.py diff --git a/clinicadl/nn/blocks/decoder.py b/clinicadl/networks/old_network/nn/blocks/decoder.py similarity index 98% rename from clinicadl/nn/blocks/decoder.py rename to clinicadl/networks/old_network/nn/blocks/decoder.py index 27938c8d7..06db04937 100644 --- a/clinicadl/nn/blocks/decoder.py +++ b/clinicadl/networks/old_network/nn/blocks/decoder.py @@ -1,7 +1,7 @@ import torch.nn as nn import torch.nn.functional as F -from clinicadl.nn.layers import Unflatten2D, get_norm_layer +from clinicadl.networks.old_network.nn.layers import Unflatten2D, get_norm_layer __all__ = [ "Decoder2D", diff --git a/clinicadl/nn/blocks/encoder.py b/clinicadl/networks/old_network/nn/blocks/encoder.py similarity index 98% rename from clinicadl/nn/blocks/encoder.py rename to clinicadl/networks/old_network/nn/blocks/encoder.py index fde13b956..290855dae 100644 --- a/clinicadl/nn/blocks/encoder.py +++ b/clinicadl/networks/old_network/nn/blocks/encoder.py @@ -1,7 +1,7 @@ import torch.nn as nn import torch.nn.functional as F -from clinicadl.nn.layers import get_norm_layer +from clinicadl.networks.old_network.nn.layers import get_norm_layer __all__ = [ "Encoder2D", diff --git a/clinicadl/nn/blocks/residual.py b/clinicadl/networks/old_network/nn/blocks/residual.py similarity index 100% rename from clinicadl/nn/blocks/residual.py rename to clinicadl/networks/old_network/nn/blocks/residual.py diff --git a/clinicadl/nn/blocks/se.py b/clinicadl/networks/old_network/nn/blocks/se.py similarity index 100% rename from clinicadl/nn/blocks/se.py rename to clinicadl/networks/old_network/nn/blocks/se.py diff --git a/clinicadl/nn/blocks/unet.py b/clinicadl/networks/old_network/nn/blocks/unet.py similarity index 100% rename from clinicadl/nn/blocks/unet.py rename to clinicadl/networks/old_network/nn/blocks/unet.py diff --git a/clinicadl/nn/layers/__init__.py b/clinicadl/networks/old_network/nn/layers/__init__.py similarity index 100% rename from clinicadl/nn/layers/__init__.py rename to clinicadl/networks/old_network/nn/layers/__init__.py diff --git a/clinicadl/nn/layers/factory/__init__.py b/clinicadl/networks/old_network/nn/layers/factory/__init__.py similarity index 100% rename from clinicadl/nn/layers/factory/__init__.py rename to clinicadl/networks/old_network/nn/layers/factory/__init__.py diff --git a/clinicadl/nn/layers/factory/conv.py b/clinicadl/networks/old_network/nn/layers/factory/conv.py similarity index 100% rename from clinicadl/nn/layers/factory/conv.py rename to clinicadl/networks/old_network/nn/layers/factory/conv.py diff --git a/clinicadl/nn/layers/factory/norm.py b/clinicadl/networks/old_network/nn/layers/factory/norm.py similarity index 100% rename from clinicadl/nn/layers/factory/norm.py rename to clinicadl/networks/old_network/nn/layers/factory/norm.py diff --git a/clinicadl/nn/layers/factory/pool.py b/clinicadl/networks/old_network/nn/layers/factory/pool.py similarity index 100% rename from clinicadl/nn/layers/factory/pool.py rename to clinicadl/networks/old_network/nn/layers/factory/pool.py diff --git a/clinicadl/nn/layers/pool.py b/clinicadl/networks/old_network/nn/layers/pool.py similarity index 100% rename from clinicadl/nn/layers/pool.py rename to clinicadl/networks/old_network/nn/layers/pool.py diff --git a/clinicadl/nn/layers/reverse.py b/clinicadl/networks/old_network/nn/layers/reverse.py similarity index 100% rename from clinicadl/nn/layers/reverse.py rename to clinicadl/networks/old_network/nn/layers/reverse.py diff --git a/clinicadl/nn/layers/unflatten.py b/clinicadl/networks/old_network/nn/layers/unflatten.py similarity index 100% rename from clinicadl/nn/layers/unflatten.py rename to clinicadl/networks/old_network/nn/layers/unflatten.py diff --git a/clinicadl/nn/layers/unpool.py b/clinicadl/networks/old_network/nn/layers/unpool.py similarity index 100% rename from clinicadl/nn/layers/unpool.py rename to clinicadl/networks/old_network/nn/layers/unpool.py diff --git a/clinicadl/nn/networks/__init__.py b/clinicadl/networks/old_network/nn/networks/__init__.py similarity index 100% rename from clinicadl/nn/networks/__init__.py rename to clinicadl/networks/old_network/nn/networks/__init__.py diff --git a/clinicadl/nn/networks/ae.py b/clinicadl/networks/old_network/nn/networks/ae.py similarity index 92% rename from clinicadl/nn/networks/ae.py rename to clinicadl/networks/old_network/nn/networks/ae.py index 1a8ed283f..aabe9b15a 100644 --- a/clinicadl/nn/networks/ae.py +++ b/clinicadl/networks/old_network/nn/networks/ae.py @@ -1,17 +1,17 @@ import numpy as np from torch import nn -from clinicadl.nn.blocks import Decoder3D, Encoder3D -from clinicadl.nn.layers import ( +from clinicadl.networks.old_network.nn.blocks import Decoder3D, Encoder3D +from clinicadl.networks.old_network.nn.layers import ( CropMaxUnpool2d, CropMaxUnpool3d, PadMaxPool2d, PadMaxPool3d, Unflatten3D, ) -from clinicadl.nn.networks.cnn import Conv4_FC3, Conv5_FC3 -from clinicadl.nn.networks.factory import autoencoder_from_cnn -from clinicadl.nn.utils import compute_output_size +from clinicadl.networks.old_network.nn.networks.cnn import Conv4_FC3, Conv5_FC3 +from clinicadl.networks.old_network.nn.networks.factory import autoencoder_from_cnn +from clinicadl.networks.old_network.nn.utils import compute_output_size from clinicadl.utils.enum import BaseEnum diff --git a/clinicadl/nn/networks/cnn.py b/clinicadl/networks/old_network/nn/networks/cnn.py similarity index 99% rename from clinicadl/nn/networks/cnn.py rename to clinicadl/networks/old_network/nn/networks/cnn.py index 5fe596bcb..cfdf610d7 100644 --- a/clinicadl/nn/networks/cnn.py +++ b/clinicadl/networks/old_network/nn/networks/cnn.py @@ -4,7 +4,7 @@ from torch import nn from torchvision.models.resnet import BasicBlock -from clinicadl.nn.layers.factory import ( +from clinicadl.networks.old_network.nn.layers.factory import ( get_conv_layer, get_norm_layer, get_pool_layer, diff --git a/clinicadl/nn/networks/factory/__init__.py b/clinicadl/networks/old_network/nn/networks/factory/__init__.py similarity index 100% rename from clinicadl/nn/networks/factory/__init__.py rename to clinicadl/networks/old_network/nn/networks/factory/__init__.py diff --git a/clinicadl/nn/networks/factory/ae.py b/clinicadl/networks/old_network/nn/networks/factory/ae.py similarity index 97% rename from clinicadl/nn/networks/factory/ae.py rename to clinicadl/networks/old_network/nn/networks/factory/ae.py index fccb14484..99dcd162e 100644 --- a/clinicadl/nn/networks/factory/ae.py +++ b/clinicadl/networks/old_network/nn/networks/factory/ae.py @@ -5,7 +5,7 @@ from torch import nn -from clinicadl.nn.layers import ( +from clinicadl.networks.old_network.nn.layers import ( CropMaxUnpool2d, CropMaxUnpool3d, PadMaxPool2d, @@ -13,7 +13,7 @@ ) if TYPE_CHECKING: - from clinicadl.nn.networks.cnn import CNN + from clinicadl.networks.old_network.nn.networks.cnn import CNN def autoencoder_from_cnn(model: CNN) -> Tuple[nn.Module, nn.Module]: diff --git a/clinicadl/nn/networks/factory/resnet.py b/clinicadl/networks/old_network/nn/networks/factory/resnet.py similarity index 98% rename from clinicadl/nn/networks/factory/resnet.py rename to clinicadl/networks/old_network/nn/networks/factory/resnet.py index 251199c92..0500c9ece 100644 --- a/clinicadl/nn/networks/factory/resnet.py +++ b/clinicadl/networks/old_network/nn/networks/factory/resnet.py @@ -3,7 +3,7 @@ import torch from torch import nn -from clinicadl.nn.blocks import ResBlock +from clinicadl.networks.old_network.nn.blocks import ResBlock model_urls = {"resnet18": "https://download.pytorch.org/models/resnet18-5c106cde.pth"} diff --git a/clinicadl/nn/networks/factory/secnn.py b/clinicadl/networks/old_network/nn/networks/factory/secnn.py similarity index 96% rename from clinicadl/nn/networks/factory/secnn.py rename to clinicadl/networks/old_network/nn/networks/factory/secnn.py index 270f0a357..f12e6de1a 100644 --- a/clinicadl/nn/networks/factory/secnn.py +++ b/clinicadl/networks/old_network/nn/networks/factory/secnn.py @@ -1,7 +1,7 @@ import torch import torch.nn as nn -from clinicadl.nn.blocks import ResBlock_SE +from clinicadl.networks.old_network.nn.blocks import ResBlock_SE class SECNNDesigner3D(nn.Module): diff --git a/clinicadl/nn/networks/random.py b/clinicadl/networks/old_network/nn/networks/random.py similarity index 98% rename from clinicadl/nn/networks/random.py rename to clinicadl/networks/old_network/nn/networks/random.py index 50b18dd60..4122cdf0b 100644 --- a/clinicadl/nn/networks/random.py +++ b/clinicadl/networks/old_network/nn/networks/random.py @@ -1,8 +1,8 @@ import numpy as np import torch.nn as nn -from clinicadl.nn.layers import PadMaxPool2d, PadMaxPool3d -from clinicadl.nn.networks.cnn import CNN +from clinicadl.networks.old_network.nn.layers import PadMaxPool2d, PadMaxPool3d +from clinicadl.networks.old_network.nn.networks.cnn import CNN from clinicadl.utils.exceptions import ClinicaDLNetworksError diff --git a/clinicadl/nn/networks/ssda.py b/clinicadl/networks/old_network/nn/networks/ssda.py similarity index 98% rename from clinicadl/nn/networks/ssda.py rename to clinicadl/networks/old_network/nn/networks/ssda.py index a87cb33b5..a774f5c86 100644 --- a/clinicadl/nn/networks/ssda.py +++ b/clinicadl/networks/old_network/nn/networks/ssda.py @@ -2,7 +2,7 @@ import torch import torch.nn as nn -from clinicadl.nn.layers import ( +from clinicadl.networks.old_network.nn.layers import ( GradientReversal, get_conv_layer, get_norm_layer, diff --git a/clinicadl/nn/networks/unet.py b/clinicadl/networks/old_network/nn/networks/unet.py similarity index 90% rename from clinicadl/nn/networks/unet.py rename to clinicadl/networks/old_network/nn/networks/unet.py index 45850de29..36450b01f 100644 --- a/clinicadl/nn/networks/unet.py +++ b/clinicadl/networks/old_network/nn/networks/unet.py @@ -1,6 +1,6 @@ from torch import nn -from clinicadl.nn.blocks import UNetDown, UNetFinalLayer, UNetUp +from clinicadl.networks.old_network.nn.blocks import UNetDown, UNetFinalLayer, UNetUp class UNet(nn.Module): diff --git a/clinicadl/nn/networks/vae.py b/clinicadl/networks/old_network/nn/networks/vae.py similarity index 99% rename from clinicadl/nn/networks/vae.py rename to clinicadl/networks/old_network/nn/networks/vae.py index 9e9b3e72f..fe7564ef9 100644 --- a/clinicadl/nn/networks/vae.py +++ b/clinicadl/networks/old_network/nn/networks/vae.py @@ -1,14 +1,14 @@ import torch import torch.nn as nn -from clinicadl.nn.blocks import ( +from clinicadl.networks.old_network.nn.blocks import ( Decoder3D, Encoder3D, VAE_Decoder2D, VAE_Encoder2D, ) -from clinicadl.nn.layers import Unflatten3D -from clinicadl.nn.utils import multiply_list +from clinicadl.networks.old_network.nn.layers import Unflatten3D +from clinicadl.networks.old_network.nn.utils import multiply_list from clinicadl.utils.enum import BaseEnum diff --git a/clinicadl/nn/utils.py b/clinicadl/networks/old_network/nn/utils.py similarity index 100% rename from clinicadl/nn/utils.py rename to clinicadl/networks/old_network/nn/utils.py diff --git a/clinicadl/network/sub_network.py b/clinicadl/networks/old_network/sub_network.py similarity index 98% rename from clinicadl/network/sub_network.py rename to clinicadl/networks/old_network/sub_network.py index 9d17e8600..e3feb1347 100644 --- a/clinicadl/network/sub_network.py +++ b/clinicadl/networks/old_network/sub_network.py @@ -4,8 +4,8 @@ import torch from torch import nn -from clinicadl.network.network import Network -from clinicadl.network.network_utils import ( +from clinicadl.networks.old_network.network import Network +from clinicadl.networks.old_network.network_utils import ( CropMaxUnpool2d, CropMaxUnpool3d, PadMaxPool2d, diff --git a/clinicadl/networks/old_network/unet/__init__.py b/clinicadl/networks/old_network/unet/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/clinicadl/network/unet/unet.py b/clinicadl/networks/old_network/unet/unet.py similarity index 98% rename from clinicadl/network/unet/unet.py rename to clinicadl/networks/old_network/unet/unet.py index 3743f13d8..f23729def 100644 --- a/clinicadl/network/unet/unet.py +++ b/clinicadl/networks/old_network/unet/unet.py @@ -1,7 +1,7 @@ import torch from torch import nn -from clinicadl.network.network import Network +from clinicadl.networks.old_network.network import Network class UNetDown(nn.Module): diff --git a/clinicadl/networks/old_network/vae/__init__.py b/clinicadl/networks/old_network/vae/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/clinicadl/network/vae/advanced_CVAE.py b/clinicadl/networks/old_network/vae/advanced_CVAE.py similarity index 97% rename from clinicadl/network/vae/advanced_CVAE.py rename to clinicadl/networks/old_network/vae/advanced_CVAE.py index 2da43916e..d174df157 100644 --- a/clinicadl/network/vae/advanced_CVAE.py +++ b/clinicadl/networks/old_network/vae/advanced_CVAE.py @@ -2,8 +2,8 @@ import torch.nn as nn import torch.nn.functional as F -from clinicadl.network.network import Network -from clinicadl.network.vae.vae_utils import multiply_list +from clinicadl.networks.old_network.network import Network +from clinicadl.networks.old_network.vae.vae_utils import multiply_list class CVAE_3D_final_conv(Network): diff --git a/clinicadl/network/vae/base_vae.py b/clinicadl/networks/old_network/vae/base_vae.py similarity index 97% rename from clinicadl/network/vae/base_vae.py rename to clinicadl/networks/old_network/vae/base_vae.py index b9ccb4808..c8d2cbef2 100644 --- a/clinicadl/network/vae/base_vae.py +++ b/clinicadl/networks/old_network/vae/base_vae.py @@ -1,6 +1,6 @@ import torch -from clinicadl.network.network import Network +from clinicadl.networks.old_network.network import Network class BaseVAE(Network): diff --git a/clinicadl/network/vae/convolutional_VAE.py b/clinicadl/networks/old_network/vae/convolutional_VAE.py similarity index 98% rename from clinicadl/network/vae/convolutional_VAE.py rename to clinicadl/networks/old_network/vae/convolutional_VAE.py index ab29c842e..5021b826d 100644 --- a/clinicadl/network/vae/convolutional_VAE.py +++ b/clinicadl/networks/old_network/vae/convolutional_VAE.py @@ -4,8 +4,8 @@ import torch.nn as nn import torch.nn.functional as F -from clinicadl.network.network import Network -from clinicadl.network.vae.vae_utils import multiply_list +from clinicadl.networks.old_network.network import Network +from clinicadl.networks.old_network.vae.vae_utils import multiply_list class CVAE_3D(Network): diff --git a/clinicadl/network/vae/vae_layers.py b/clinicadl/networks/old_network/vae/vae_layers.py similarity index 99% rename from clinicadl/network/vae/vae_layers.py rename to clinicadl/networks/old_network/vae/vae_layers.py index dfa9f0e15..a84067b99 100644 --- a/clinicadl/network/vae/vae_layers.py +++ b/clinicadl/networks/old_network/vae/vae_layers.py @@ -1,7 +1,7 @@ import torch.nn.functional as F from torch import nn -from clinicadl.network.vae.vae_utils import get_norm2d, get_norm3d +from clinicadl.networks.old_network.vae.vae_utils import get_norm2d, get_norm3d class EncoderLayer2D(nn.Module): diff --git a/clinicadl/network/vae/vae_utils.py b/clinicadl/networks/old_network/vae/vae_utils.py similarity index 100% rename from clinicadl/network/vae/vae_utils.py rename to clinicadl/networks/old_network/vae/vae_utils.py diff --git a/clinicadl/network/vae/vanilla_vae.py b/clinicadl/networks/old_network/vae/vanilla_vae.py similarity index 99% rename from clinicadl/network/vae/vanilla_vae.py rename to clinicadl/networks/old_network/vae/vanilla_vae.py index 200db6cc1..a7494f385 100644 --- a/clinicadl/network/vae/vanilla_vae.py +++ b/clinicadl/networks/old_network/vae/vanilla_vae.py @@ -1,8 +1,8 @@ import torch from torch import nn -from clinicadl.network.vae.base_vae import BaseVAE -from clinicadl.network.vae.vae_layers import ( +from clinicadl.networks.old_network.vae.base_vae import BaseVAE +from clinicadl.networks.old_network.vae.vae_layers import ( DecoderLayer3D, EncoderLayer3D, Flatten, diff --git a/clinicadl/optim/__init__.py b/clinicadl/optimization/__init__.py similarity index 100% rename from clinicadl/optim/__init__.py rename to clinicadl/optimization/__init__.py diff --git a/clinicadl/optim/config.py b/clinicadl/optimization/config.py similarity index 100% rename from clinicadl/optim/config.py rename to clinicadl/optimization/config.py diff --git a/clinicadl/optim/early_stopping/__init__.py b/clinicadl/optimization/early_stopping/__init__.py similarity index 100% rename from clinicadl/optim/early_stopping/__init__.py rename to clinicadl/optimization/early_stopping/__init__.py diff --git a/clinicadl/optim/early_stopping/config.py b/clinicadl/optimization/early_stopping/config.py similarity index 100% rename from clinicadl/optim/early_stopping/config.py rename to clinicadl/optimization/early_stopping/config.py diff --git a/clinicadl/optim/early_stopping/early_stopper.py b/clinicadl/optimization/early_stopping/early_stopper.py similarity index 100% rename from clinicadl/optim/early_stopping/early_stopper.py rename to clinicadl/optimization/early_stopping/early_stopper.py diff --git a/clinicadl/optim/lr_scheduler/__init__.py b/clinicadl/optimization/lr_scheduler/__init__.py similarity index 100% rename from clinicadl/optim/lr_scheduler/__init__.py rename to clinicadl/optimization/lr_scheduler/__init__.py diff --git a/clinicadl/optim/lr_scheduler/config.py b/clinicadl/optimization/lr_scheduler/config.py similarity index 100% rename from clinicadl/optim/lr_scheduler/config.py rename to clinicadl/optimization/lr_scheduler/config.py diff --git a/clinicadl/optim/lr_scheduler/enum.py b/clinicadl/optimization/lr_scheduler/enum.py similarity index 100% rename from clinicadl/optim/lr_scheduler/enum.py rename to clinicadl/optimization/lr_scheduler/enum.py diff --git a/clinicadl/optim/lr_scheduler/factory.py b/clinicadl/optimization/lr_scheduler/factory.py similarity index 100% rename from clinicadl/optim/lr_scheduler/factory.py rename to clinicadl/optimization/lr_scheduler/factory.py diff --git a/clinicadl/optim/optimizer/__init__.py b/clinicadl/optimization/optimizer/__init__.py similarity index 100% rename from clinicadl/optim/optimizer/__init__.py rename to clinicadl/optimization/optimizer/__init__.py diff --git a/clinicadl/optim/optimizer/config.py b/clinicadl/optimization/optimizer/config.py similarity index 100% rename from clinicadl/optim/optimizer/config.py rename to clinicadl/optimization/optimizer/config.py diff --git a/clinicadl/optim/optimizer/enum.py b/clinicadl/optimization/optimizer/enum.py similarity index 100% rename from clinicadl/optim/optimizer/enum.py rename to clinicadl/optimization/optimizer/enum.py diff --git a/clinicadl/optim/optimizer/factory.py b/clinicadl/optimization/optimizer/factory.py similarity index 100% rename from clinicadl/optim/optimizer/factory.py rename to clinicadl/optimization/optimizer/factory.py diff --git a/clinicadl/optim/optimizer/utils.py b/clinicadl/optimization/optimizer/utils.py similarity index 100% rename from clinicadl/optim/optimizer/utils.py rename to clinicadl/optimization/optimizer/utils.py diff --git a/clinicadl/optimizer/optimization.py b/clinicadl/optimizer/optimization.py deleted file mode 100644 index eba352f2e..000000000 --- a/clinicadl/optimizer/optimization.py +++ /dev/null @@ -1,16 +0,0 @@ -from logging import getLogger - -from pydantic import BaseModel, ConfigDict -from pydantic.types import PositiveInt - -logger = getLogger("clinicadl.optimization_config") - - -class OptimizationConfig(BaseModel): - """Config class to configure the optimization process.""" - - accumulation_steps: PositiveInt = 1 - epochs: PositiveInt = 20 - profiler: bool = False - # pydantic config - model_config = ConfigDict(validate_assignment=True) diff --git a/clinicadl/optimizer/optimizer.py b/clinicadl/optimizer/optimizer.py deleted file mode 100644 index 2beb9b913..000000000 --- a/clinicadl/optimizer/optimizer.py +++ /dev/null @@ -1,18 +0,0 @@ -from logging import getLogger - -from pydantic import BaseModel, ConfigDict -from pydantic.types import NonNegativeFloat, PositiveFloat - -from clinicadl.utils.enum import Optimizer - -logger = getLogger("clinicadl.optimizer_config") - - -class OptimizerConfig(BaseModel): - """Config class to configure the optimizer.""" - - learning_rate: PositiveFloat = 1e-4 - optimizer: Optimizer = Optimizer.ADAM - weight_decay: NonNegativeFloat = 1e-4 - # pydantic config - model_config = ConfigDict(validate_assignment=True) diff --git a/clinicadl/predictor/config.py b/clinicadl/predictor/config.py index ead42d1c6..6075890aa 100644 --- a/clinicadl/predictor/config.py +++ b/clinicadl/predictor/config.py @@ -3,12 +3,12 @@ from pydantic import BaseModel, ConfigDict, computed_field -from clinicadl.caps_dataset.data_config import DataConfig as DataBaseConfig -from clinicadl.caps_dataset.dataloader_config import DataLoaderConfig -from clinicadl.maps_manager.config import ( +from clinicadl.dataset.data_config import DataConfig as DataBaseConfig +from clinicadl.dataset.dataloader_config import DataLoaderConfig +from clinicadl.experiment_manager.config import ( MapsManagerConfig as MapsManagerBaseConfig, ) -from clinicadl.maps_manager.maps_manager import MapsManager +from clinicadl.experiment_manager.maps_manager import MapsManager from clinicadl.predictor.validation import ValidationConfig from clinicadl.splitter.config import SplitConfig from clinicadl.transforms.config import TransformsConfig diff --git a/clinicadl/predictor/old_predictor.py b/clinicadl/predictor/old_predictor.py new file mode 100644 index 000000000..8314ce9d9 --- /dev/null +++ b/clinicadl/predictor/old_predictor.py @@ -0,0 +1,1153 @@ +import json +import shutil +from logging import getLogger +from pathlib import Path +from typing import Any, Dict, List, Optional, Set, Tuple, Union + +import pandas as pd +import torch +import torch.distributed as dist +from torch.amp import autocast +from torch.nn.modules.loss import _Loss +from torch.utils.data import DataLoader +from torch.utils.data.distributed import DistributedSampler + +from clinicadl.dataset.caps_dataset import ( + return_dataset, +) +from clinicadl.experiment_manager.maps_manager import MapsManager +from clinicadl.interpret.config import InterpretConfig +from clinicadl.metrics.old_metrics.metric_module import MetricModule +from clinicadl.metrics.old_metrics.utils import ( + check_selection_metric, + find_selection_metrics, +) +from clinicadl.networks.old_network.network import Network +from clinicadl.predictor.config import PredictConfig +from clinicadl.trainer.tasks_utils import ( + columns, + compute_metrics, + generate_label_code, + generate_test_row, + get_criterion, +) +from clinicadl.transforms.config import TransformsConfig +from clinicadl.utils.computational.ddp import DDP, cluster +from clinicadl.utils.enum import Task +from clinicadl.utils.exceptions import ( + ClinicaDLArgumentError, + ClinicaDLDataLeakageError, + MAPSError, +) + +logger = getLogger("clinicadl.predict_manager") +level_list: List[str] = ["warning", "info", "debug"] + + +class Predictor: + def __init__(self, _config: Union[PredictConfig, InterpretConfig]) -> None: + self._config = _config + + from clinicadl.splitter.config import SplitterConfig + from clinicadl.splitter.old_splitter import Splitter + + self.maps_manager = MapsManager(_config.maps_manager.maps_dir) + self._config.adapt_with_maps_manager_info(self.maps_manager) + tmp = self._config.data.model_dump( + exclude=set(["preprocessing_dict", "mode", "caps_dict"]) + ) + tmp.update(self._config.split.model_dump()) + tmp.update(self._config.validation.model_dump()) + self.splitter = Splitter(SplitterConfig(**tmp)) + + def predict( + self, + label_code: Union[str, dict[str, int]] = "default", + ): + """Performs the prediction task on a subset of caps_directory defined in a TSV file.""" + + group_df = self._config.data.create_groupe_df() + self._check_data_group(group_df) + criterion = get_criterion( + self.maps_manager.network_task, self.maps_manager.loss + ) + + for split in self.splitter.split_iterator(): + logger.info(f"Prediction of split {split}") + group_df, group_parameters = self.get_group_info( + self._config.maps_manager.data_group, split + ) + # Find label code if not given + if self._config.data.is_given_label_code( + self.maps_manager.label, label_code + ): + generate_label_code( + self.maps_manager.network_task, group_df, self._config.data.label + ) + # Erase previous TSV files on master process + if not self._config.validation.selection_metrics: + split_selection_metrics = find_selection_metrics( + self.maps_manager.maps_path, + split, + ) + else: + split_selection_metrics = self._config.validation.selection_metrics + for selection in split_selection_metrics: + tsv_dir = ( + self.maps_manager.maps_path + / f"split-{split}" + / f"best-{selection}" + / self._config.maps_manager.data_group + ) + tsv_pattern = f"{self._config.maps_manager.data_group}*.tsv" + for tsv_file in tsv_dir.glob(tsv_pattern): + tsv_file.unlink() + + self._config.data.check_label(self.maps_manager.label) + if self.maps_manager.multi_network: + for network in range(self.maps_manager.num_networks): + self._predict_single( + group_parameters, + group_df, + self._config.transforms, + label_code, + criterion, + split, + split_selection_metrics, + network, + ) + else: + self._predict_single( + group_parameters, + group_df, + self._config.transforms, + label_code, + criterion, + split, + split_selection_metrics, + ) + if cluster.master: + self._ensemble_prediction( + self.maps_manager, + self._config.maps_manager.data_group, + split, + self._config.validation.selection_metrics, + self._config.data.use_labels, + self._config.validation.skip_leak_check, + ) + + def _predict_single( + self, + group_parameters, + group_df, + transforms, + label_code, + criterion, + split, + split_selection_metrics, + network: Optional[int] = None, + ): + """_summary_""" + + assert isinstance(self._config, PredictConfig) + # assert self._config.data.label + + data_test = return_dataset( + group_parameters["caps_directory"], + group_df, + self.maps_manager.preprocessing_dict, + transforms_config=self._config.transforms, + multi_cohort=group_parameters["multi_cohort"], + label_presence=self._config.data.use_labels, + label=self._config.data.label, + label_code=( + self.maps_manager.label_code if label_code == "default" else label_code + ), + cnn_index=network, + ) + test_loader = DataLoader( + data_test, + batch_size=( + self._config.dataloader.batch_size + if self._config.dataloader.batch_size is not None + else self.maps_manager.batch_size + ), + shuffle=False, + sampler=DistributedSampler( + data_test, + num_replicas=cluster.world_size, + rank=cluster.rank, + shuffle=False, + ), + num_workers=self._config.dataloader.n_proc + if self._config.dataloader.n_proc is not None + else self.maps_manager.n_proc, + ) + self._test_loader( + maps_manager=self.maps_manager, + dataloader=test_loader, + criterion=criterion, + data_group=self._config.maps_manager.data_group, + split=split, + selection_metrics=split_selection_metrics, + use_labels=self._config.data.use_labels, + gpu=self._config.computational.gpu, + amp=self._config.computational.amp, + network=network, + ) + if self._config.maps_manager.save_tensor: + logger.debug("Saving tensors") + self._compute_output_tensors( + maps_manager=self.maps_manager, + dataset=data_test, + data_group=self._config.maps_manager.data_group, + split=split, + selection_metrics=self._config.validation.selection_metrics, + gpu=self._config.computational.gpu, + network=network, + ) + if self._config.maps_manager.save_nifti: + self._compute_output_nifti( + dataset=data_test, + split=split, + network=network, + ) + if self._config.maps_manager.save_latent_tensor: + self._compute_latent_tensors( + dataset=data_test, + split=split, + network=network, + ) + + def _compute_latent_tensors( + self, + dataset, + split: int, + nb_images: Optional[int] = None, + network: Optional[int] = None, + ): + """ + Compute the output tensors and saves them in the MAPS. + Parameters + ---------- + dataset : _type_ + wrapper of the data set. + data_group : _type_ + name of the data group used for the task. + split : _type_ + split number. + selection_metrics : _type_ + metrics used for model selection. + nb_images : _type_ (optional, default=None) + number of full images to write. Default computes the outputs of the whole data set. + gpu : _type_ (optional, default=None) + If given, a new value for the device of the model will be computed. + network : _type_ (optional, default=None) + Index of the network tested (only used in multi-network setting). + """ + for selection_metric in self._config.validation.selection_metrics: + # load the best trained model during the training + model, _ = self.maps_manager._init_model( + transfer_path=self.maps_manager.maps_path, + split=split, + transfer_selection=selection_metric, + gpu=self._config.computational.gpu, + network=network, + nb_unfrozen_layer=self.maps_manager.nb_unfrozen_layer, + ) + model = DDP( + model, + fsdp=self.maps_manager.fully_sharded_data_parallel, + amp=self.maps_manager.amp, + ) + model.eval() + tensor_path = ( + self.maps_manager.maps_path + / f"split-{split}" + / f"best-{selection_metric}" + / self._config.maps_manager.data_group + / "latent_tensors" + ) + if cluster.master: + tensor_path.mkdir(parents=True, exist_ok=True) + dist.barrier() + if nb_images is None: # Compute outputs for the whole data set + nb_modes = len(dataset) + else: + nb_modes = nb_images * dataset.elem_per_image + for i in [ + *range(cluster.rank, nb_modes, cluster.world_size), + *range(int(nb_modes % cluster.world_size <= cluster.rank)), + ]: + data = dataset[i] + image = data["image"] + logger.debug(f"Image for latent representation {image}") + with autocast("cuda", enabled=self.maps_manager.std_amp): + _, latent, _ = model.module._forward( + image.unsqueeze(0).to(model.device) + ) + latent = latent.squeeze(0).cpu().float() + participant_id = data["participant_id"] + session_id = data["session_id"] + mode_id = data[f"{self.maps_manager.mode}_id"] + output_filename = f"{participant_id}_{session_id}_{self.maps_manager.mode}-{mode_id}_latent.pt" + torch.save(latent, tensor_path / output_filename) + + @torch.no_grad() + def _compute_output_nifti( + self, + dataset, + split: int, + network: Optional[int] = None, + ): + """Computes the output nifti images and saves them in the MAPS. + Parameters + ---------- + dataset : _type_ + _description_ + data_group : str + name of the data group used for the task. + split : int + split number. + selection_metrics : list[str] + metrics used for model selection. + gpu : bool (optional, default=None) + If given, a new value for the device of the model will be computed. + network : int (optional, default=None) + Index of the network tested (only used in multi-network setting). + Raises + -------- + ClinicaDLException if not an image + """ + import nibabel as nib + from numpy import eye + + for selection_metric in self._config.validation.selection_metrics: + # load the best trained model during the training + model, _ = self.maps_manager._init_model( + transfer_path=self.maps_manager.maps_path, + split=split, + transfer_selection=selection_metric, + gpu=self._config.computational.gpu, + network=network, + nb_unfrozen_layer=self.maps_manager.nb_unfrozen_layer, + ) + model = DDP( + model, + fsdp=self.maps_manager.fully_sharded_data_parallel, + amp=self.maps_manager.amp, + ) + model.eval() + nifti_path = ( + self.maps_manager.maps_path + / f"split-{split}" + / f"best-{selection_metric}" + / self._config.maps_manager.data_group + / "nifti_images" + ) + if cluster.master: + nifti_path.mkdir(parents=True, exist_ok=True) + dist.barrier() + nb_imgs = len(dataset) + for i in [ + *range(cluster.rank, nb_imgs, cluster.world_size), + *range(int(nb_imgs % cluster.world_size <= cluster.rank)), + ]: + data = dataset[i] + image = data["image"] + x = image.unsqueeze(0).to(model.device) + with autocast("cuda", enabled=self.maps_manager.std_amp): + output = model(x) + output = output.squeeze(0).detach().cpu().float() + # Convert tensor to nifti image with appropriate affine + input_nii = nib.nifti1.Nifti1Image( + image[0].detach().cpu().numpy(), eye(4) + ) + output_nii = nib.nifti1.Nifti1Image(output[0].numpy(), eye(4)) + # Create file name according to participant and session id + participant_id = data["participant_id"] + session_id = data["session_id"] + input_filename = f"{participant_id}_{session_id}_image_input.nii.gz" + output_filename = f"{participant_id}_{session_id}_image_output.nii.gz" + nib.loadsave.save(input_nii, nifti_path / input_filename) + nib.loadsave.save(output_nii, nifti_path / output_filename) + + def interpret(self): + """Performs the interpretation task on a subset of caps_directory defined in a TSV file. + The mean interpretation is always saved, to save the individual interpretations set save_individual to True. + """ + assert isinstance(self._config, InterpretConfig) + + self._config.adapt_with_maps_manager_info(self.maps_manager) + + if self.maps_manager.multi_network: + raise NotImplementedError( + "The interpretation of multi-network framework is not implemented." + ) + transforms = TransformsConfig( + normalize=self.maps_manager.normalize, + data_augmentation=self.maps_manager.data_augmentation, + size_reduction=self.maps_manager.size_reduction, + size_reduction_factor=self.maps_manager.size_reduction_factor, + ) + group_df = self._config.data.create_groupe_df() + self._check_data_group(group_df) + + for split in self.splitter.split_iterator(): + logger.info(f"Interpretation of split {split}") + df_group, parameters_group = self.get_group_info( + self._config.maps_manager.data_group, split + ) + data_test = return_dataset( + parameters_group["caps_directory"], + df_group, + self.maps_manager.preprocessing_dict, + transforms_config=transforms, + multi_cohort=parameters_group["multi_cohort"], + label_presence=False, + label_code=self.maps_manager.label_code, + label=self.maps_manager.label, + ) + test_loader = DataLoader( + data_test, + batch_size=self._config.dataloader.batch_size, + shuffle=False, + num_workers=self._config.dataloader.n_proc, + ) + if not self._config.validation.selection_metrics: + self._config.validation.selection_metrics = find_selection_metrics( + self.maps_manager.maps_path, + split, + ) + for selection_metric in self._config.validation.selection_metrics: + logger.info(f"Interpretation of metric {selection_metric}") + results_path = ( + self.maps_manager.maps_path + / f"split-{split}" + / f"best-{selection_metric}" + / self._config.maps_manager.data_group + / f"interpret-{self._config.interpret.name}" + ) + if (results_path).is_dir(): + if self._config.interpret.overwrite_name: + shutil.rmtree(results_path) + else: + raise MAPSError( + f"Interpretation name {self._config.interpret.name} is already written. " + f"Please choose another name or set overwrite_name to True." + ) + results_path.mkdir(parents=True) + model, _ = self.maps_manager._init_model( + transfer_path=self.maps_manager.maps_path, + split=split, + transfer_selection=selection_metric, + gpu=self._config.computational.gpu, + ) + interpreter = self._config.interpret.get_method()(model) + cum_maps = [0] * data_test.elem_per_image + for data in test_loader: + images = data["image"].to(model.device) + map_pt = interpreter.generate_gradients( + images, + self._config.interpret.target_node, + level=self._config.interpret.level, + amp=self._config.computational.amp, + ) + for i in range(len(data["participant_id"])): + mode_id = data[f"{self.maps_manager.mode}_id"][i] + cum_maps[mode_id] += map_pt[i] + if self._config.interpret.save_individual: + single_path = ( + results_path + / f"{data['participant_id'][i]}_{data['session_id'][i]}_{self.maps_manager.mode}-{data[f'{self.maps_manager.mode}_id'][i]}_map.pt" + ) + torch.save(map_pt[i], single_path) + if self._config.maps_manager.save_nifti: + import nibabel as nib + from numpy import eye + + single_nifti_path = ( + results_path + / f"{data['participant_id'][i]}_{data['session_id'][i]}_{self.maps_manager.mode}-{data[f'{self.maps_manager.mode}_id'][i]}_map.nii.gz" + ) + output_nii = nib.nifti1.Nifti1Image( + map_pt[i].numpy(), eye(4) + ) + nib.loadsave.save(output_nii, single_nifti_path) + for i, mode_map in enumerate(cum_maps): + mode_map /= len(data_test) + torch.save( + mode_map, + results_path / f"mean_{self.maps_manager.mode}-{i}_map.pt", + ) + if self._config.maps_manager.save_nifti: + import nibabel as nib + from numpy import eye + + output_nii = nib.nifti1.Nifti1Image(mode_map.numpy(), eye(4)) + nib.loadsave.save( + output_nii, + results_path + / f"mean_{self.maps_manager.mode}-{i}_map.nii.gz", + ) + + def _check_data_group( + self, + df: Optional[pd.DataFrame] = None, + ): + """Check if a data group is already available if other arguments are None. + Else creates a new data_group. + + Parameters + ---------- + + Raises + ------ + MAPSError + when trying to overwrite train or validation data groups + ClinicaDLArgumentError + when caps_directory or df are given but data group already exists + ClinicaDLArgumentError + when caps_directory or df are not given and data group does not exist + + """ + group_dir = ( + self.maps_manager.maps_path + / "groups" + / self._config.maps_manager.data_group + ) + logger.debug(f"Group path {group_dir}") + if group_dir.is_dir(): # Data group already exists + if self._config.maps_manager.overwrite: + if self._config.maps_manager.data_group in ["train", "validation"]: + raise MAPSError("Cannot overwrite train or validation data group.") + else: + if not self._config.split.split: + self._config.split.split = self.maps_manager.find_splits() + assert self._config.split + for split in self._config.split.split: + selection_metrics = find_selection_metrics( + self.maps_manager.maps_path, + split, + ) + for selection in selection_metrics: + results_path = ( + self.maps_manager.maps_path + / f"split-{split}" + / f"best-{selection}" + / self._config.maps_manager.data_group + ) + if results_path.is_dir(): + shutil.rmtree(results_path) + elif df is not None or ( + self._config.data.caps_directory is not None + and self._config.data.caps_directory != Path("") + ): + raise ClinicaDLArgumentError( + f"Data group {self._config.maps_manager.data_group} is already defined. " + f"Please do not give any caps_directory, tsv_path or multi_cohort to use it. " + f"To erase {self._config.maps_manager.data_group} please set overwrite to True." + ) + + elif not group_dir.is_dir() and ( + self._config.data.caps_directory is None or df is None + ): # Data group does not exist yet / was overwritten + missing data + raise ClinicaDLArgumentError( + f"The data group {self._config.maps_manager.data_group} does not already exist. " + f"Please specify a caps_directory and a tsv_path to create this data group." + ) + elif ( + not group_dir.is_dir() + ): # Data group does not exist yet / was overwritten + all data is provided + if self._config.validation.skip_leak_check: + logger.info("Skipping data leakage check") + else: + self._check_leakage(self._config.maps_manager.data_group, df) + self._write_data_group( + self._config.maps_manager.data_group, + df, + self._config.data.caps_directory, + self._config.data.multi_cohort, + label=self._config.data.label, + ) + + def get_group_info( + self, data_group: str, split: int = None + ) -> Tuple[pd.DataFrame, Dict[str, Any]]: + """Gets information from corresponding data group + (list of participant_id / session_id + configuration parameters). + split is only needed if data_group is train or validation. + + Parameters + ---------- + data_group : str + _description_ + split : int (optional, default=None) + _description_ + + Returns + ------- + Tuple[pd.DataFrame, Dict[str, Any]] + _description_ + + Raises + ------ + MAPSError + _description_ + MAPSError + _description_ + MAPSError + _description_ + """ + group_path = self.maps_manager.maps_path / "groups" / data_group + if not group_path.is_dir(): + raise MAPSError( + f"Data group {data_group} is not defined. " + f"Please run a prediction to create this data group." + ) + if data_group in ["train", "validation"]: + if split is None: + raise MAPSError( + "Information on train or validation data can only be " + "loaded if a split number is given" + ) + elif not (group_path / f"split-{split}").is_dir(): + raise MAPSError( + f"Split {split} is not available for data group {data_group}." + ) + else: + group_path = group_path / f"split-{split}" + + df = pd.read_csv(group_path / "data.tsv", sep="\t") + json_path = group_path / "maps.json" + from clinicadl.utils.iotools.utils import path_decoder + + with json_path.open(mode="r") as f: + parameters = json.load(f, object_hook=path_decoder) + return df, parameters + + def _check_leakage(self, data_group: str, test_df: pd.DataFrame): + """Checks that no intersection exist between the participants used for training and those used for testing. + + Parameters + ---------- + data_group : str + name of the data group + test_df : pd.DataFrame + Table of participant_id / session_id of the data group + + Raises + ------ + ClinicaDLDataLeakageError + if data_group not in ["train", "validation"] and there is an intersection + between the participant IDs in test_df and the ones used for training. + """ + if data_group not in ["train", "validation"]: + train_path = self.maps_manager.maps_path / "groups" / "train+validation.tsv" + train_df = pd.read_csv(train_path, sep="\t") + participants_train = set(train_df.participant_id.values) + participants_test = set(test_df.participant_id.values) + intersection = participants_test & participants_train + + if len(intersection) > 0: + raise ClinicaDLDataLeakageError( + "Your evaluation set contains participants who were already seen during " + "the training step. The list of common participants is the following: " + f"{intersection}." + ) + + def _write_data_group( + self, + data_group, + df, + caps_directory: Path = None, + multi_cohort: bool = None, + label=None, + ): + """Check that a data_group is not already written and writes the characteristics of the data group + (TSV file with a list of participant / session + JSON file containing the CAPS and the preprocessing). + + Parameters + ---------- + data_group : _type_ + name whose presence is checked. + df : _type_ + DataFrame containing the participant_id and session_id (and label if use_labels is True) + caps_directory : Path (optional, default=None) + caps_directory if different from the training caps_directory, + multi_cohort : bool (optional, default=None) + multi_cohort used if different from the training multi_cohort. + label : _type_ (optional, default=None) + _description_ + """ + group_path = self.maps_manager.maps_path / "groups" / data_group + group_path.mkdir(parents=True) + + columns = ["participant_id", "session_id", "cohort"] + if self._config.data.label in df.columns.values: + columns += [self._config.data.label] + if label is not None and label in df.columns.values: + columns += [label] + + df.to_csv(group_path / "data.tsv", sep="\t", columns=columns, index=False) + self.maps_manager.write_parameters( + group_path, + { + "caps_directory": ( + caps_directory + if caps_directory is not None + else self._config.caps_directory + ), + "multi_cohort": ( + multi_cohort + if multi_cohort is not None + else self._config.multi_cohort + ), + }, + ) + + # this function is never used ??? + + def get_interpretation( + self, + data_group: str, + name: str, + split: int = 0, + selection_metric: Optional[str] = None, + verbose: bool = True, + participant_id: Optional[str] = None, + session_id: Optional[str] = None, + mode_id: int = 0, + ) -> torch.Tensor: + """ + Get the individual interpretation maps for one session if participant_id and session_id are filled. + Else load the mean interpretation map. + + Args: + data_group (str): Name of the data group used for the interpretation task. + name (str): name of the interpretation task. + split (int): Index of the split used for training. + selection_metric (str): Metric used for best weights selection. + verbose (bool): if True will print associated prediction.log. + participant_id (str): ID of the participant (if not given load mean map). + session_id (str): ID of the session (if not give load the mean map). + mode_id (int): Index of the mode used. + Returns: + (torch.Tensor): Tensor of the interpretability map. + """ + + selection_metric = check_selection_metric( + self.maps_manager.maps_path, + split, + selection_metric, + ) + if verbose: + self.maps_manager._print_description_log( + data_group, split, selection_metric + ) + map_dir = ( + self.maps_manager.maps_path + / f"split-{split}" + / f"best-{selection_metric}" + / data_group + / f"interpret-{name}" + ) + if not map_dir.is_dir(): + raise MAPSError( + f"No prediction corresponding to data group {data_group} and " + f"interpretation {name} was found." + ) + if participant_id is None and session_id is None: + map_pt = torch.load( + map_dir / f"mean_{self.maps_manager.mode}-{mode_id}_map.pt", + weights_only=True, + ) + elif participant_id is None or session_id is None: + raise ValueError( + "To load the mean interpretation map, " + "please do not give any participant_id or session_id.\n " + "Else specify both parameters" + ) + else: + map_pt = torch.load( + map_dir + / f"{participant_id}_{session_id}_{self.maps_manager.mode}-{mode_id}_map.pt", + weights_only=True, + ) + return map_pt + + def test( + self, + mode: str, + metrics_module: MetricModule, + n_classes: int, + network_task, + model: Network, + dataloader: DataLoader, + criterion: _Loss, + use_labels: bool = True, + amp: bool = False, + report_ci=False, + ) -> Tuple[pd.DataFrame, Dict[str, float]]: + """ + Computes the predictions and evaluation metrics. + + Parameters + ---------- + model: Network + The model trained. + dataloader: DataLoader + Wrapper of a CapsDataset. + criterion: _Loss + Function to calculate the loss. + use_labels: bool + If True the true_label will be written in output DataFrame + and metrics dict will be created. + amp: bool + If True, enables Pytorch's automatic mixed precision. + + Returns + ------- + the results and metrics on the image level. + """ + model.eval() + dataloader.dataset.eval() + + results_df = pd.DataFrame(columns=columns(network_task, mode, n_classes)) + total_loss = {} + with torch.no_grad(): + for i, data in enumerate(dataloader): + # initialize the loss list to save the loss components + with autocast("cuda", enabled=amp): + outputs, loss_dict = model(data, criterion, use_labels=use_labels) + + if i == 0: + for loss_component in loss_dict.keys(): + total_loss[loss_component] = 0 + for loss_component in total_loss.keys(): + total_loss[loss_component] += loss_dict[loss_component].float() + + # Generate detailed DataFrame + for idx in range(len(data["participant_id"])): + row = generate_test_row( + network_task, + mode, + metrics_module, + n_classes, + idx, + data, + outputs.float(), + ) + row_df = pd.DataFrame( + row, columns=columns(network_task, mode, n_classes) + ) + results_df = pd.concat([results_df, row_df]) + + del outputs, loss_dict + dataframes = [None] * dist.get_world_size() + dist.gather_object( + results_df, dataframes if dist.get_rank() == 0 else None, dst=0 + ) + if dist.get_rank() == 0: + results_df = pd.concat(dataframes) + del dataframes + results_df.reset_index(inplace=True, drop=True) + + if not use_labels: + metrics_dict = None + else: + metrics_dict = compute_metrics( + network_task, results_df, metrics_module, report_ci=report_ci + ) + for loss_component in total_loss.keys(): + dist.reduce(total_loss[loss_component], dst=0) + loss_value = total_loss[loss_component].item() / cluster.world_size + + if report_ci: + metrics_dict["Metric_names"].append(loss_component) + metrics_dict["Metric_values"].append(loss_value) + metrics_dict["Lower_CI"].append("N/A") + metrics_dict["Upper_CI"].append("N/A") + metrics_dict["SE"].append("N/A") + + else: + metrics_dict[loss_component] = loss_value + + torch.cuda.empty_cache() + + return results_df, metrics_dict + + def test_da( + self, + mode: str, + metrics_module: MetricModule, + n_classes: int, + network_task: Union[str, Task], + model: Network, + dataloader: DataLoader, + criterion: _Loss, + alpha: float = 0, + use_labels: bool = True, + target: bool = True, + report_ci=False, + ) -> Tuple[pd.DataFrame, Dict[str, float]]: + """ + Computes the predictions and evaluation metrics. + + Args: + model: the model trained. + dataloader: wrapper of a CapsDataset. + criterion: function to calculate the loss. + use_labels: If True the true_label will be written in output DataFrame + and metrics dict will be created. + Returns: + the results and metrics on the image level. + """ + model.eval() + dataloader.dataset.eval() + results_df = pd.DataFrame(columns=columns(network_task, mode, n_classes)) + total_loss = 0 + with torch.no_grad(): + for i, data in enumerate(dataloader): + outputs, loss_dict = model.compute_outputs_and_loss_test( + data, criterion, alpha, target + ) + total_loss += loss_dict["loss"].item() + + # Generate detailed DataFrame + for idx in range(len(data["participant_id"])): + row = generate_test_row( + network_task, + mode, + metrics_module, + n_classes, + idx, + data, + outputs, + ) + row_df = pd.DataFrame( + row, columns=columns(network_task, mode, n_classes) + ) + results_df = pd.concat([results_df, row_df]) + + del outputs, loss_dict + results_df.reset_index(inplace=True, drop=True) + + if not use_labels: + metrics_dict = None + else: + metrics_dict = compute_metrics( + network_task, results_df, metrics_module, report_ci=report_ci + ) + if report_ci: + metrics_dict["Metric_names"].append("loss") + metrics_dict["Metric_values"].append(total_loss) + metrics_dict["Lower_CI"].append("N/A") + metrics_dict["Upper_CI"].append("N/A") + metrics_dict["SE"].append("N/A") + + else: + metrics_dict["loss"] = total_loss + + torch.cuda.empty_cache() + + return results_df, metrics_dict + + def _test_loader( + self, + maps_manager: MapsManager, + dataloader, + criterion, + data_group: str, + split: int, + selection_metrics, + use_labels=True, + gpu=None, + amp=False, + network=None, + report_ci=True, + ): + """ + Launches the testing task on a dataset wrapped by a DataLoader and writes prediction TSV files. + + Args: + dataloader (torch.utils.data.DataLoader): DataLoader wrapping the test CapsDataset. + criterion (torch.nn.modules.loss._Loss): optimization criterion used during training. + data_group (str): name of the data group used for the testing task. + split (int): Index of the split used to train the model tested. + selection_metrics (list[str]): List of metrics used to select the best models which are tested. + use_labels (bool): If True, the labels must exist in test meta-data and metrics are computed. + gpu (bool): If given, a new value for the device of the model will be computed. + amp (bool): If enabled, uses Automatic Mixed Precision (requires GPU usage). + network (int): Index of the network tested (only used in multi-network setting). + """ + for selection_metric in selection_metrics: + if cluster.master: + log_dir = ( + maps_manager.maps_path + / f"split-{split}" + / f"best-{selection_metric}" + / data_group + ) + maps_manager.write_description_log( + log_dir, + data_group, + dataloader.dataset.config.data.caps_dict, + dataloader.dataset.config.data.data_df, + ) + + # load the best trained model during the training + model, _ = maps_manager._init_model( + transfer_path=maps_manager.maps_path, + split=split, + transfer_selection=selection_metric, + gpu=gpu, + network=network, + ) + model = DDP( + model, + fsdp=maps_manager.fully_sharded_data_parallel, + amp=maps_manager.amp, + ) + + prediction_df, metrics = self.test( + mode=maps_manager.mode, + metrics_module=maps_manager.metrics_module, + n_classes=maps_manager.n_classes, + network_task=maps_manager.network_task, + model=model, + dataloader=dataloader, + criterion=criterion, + use_labels=use_labels, + amp=amp, + report_ci=report_ci, + ) + if use_labels: + if network is not None: + metrics[f"{maps_manager.mode}_id"] = network + + loss_to_log = ( + metrics["Metric_values"][-1] if report_ci else metrics["loss"] + ) + + logger.info( + f"{maps_manager.mode} level {data_group} loss is {loss_to_log} for model selected on {selection_metric}" + ) + + if cluster.master: + # Replace here + maps_manager._mode_level_to_tsv( + prediction_df, + metrics, + split, + selection_metric, + data_group=data_group, + ) + + @torch.no_grad() + def _compute_output_tensors( + self, + maps_manager: MapsManager, + dataset, + data_group, + split, + selection_metrics, + nb_images=None, + gpu=None, + network=None, + ): + """ + Compute the output tensors and saves them in the MAPS. + + Args: + dataset (clinicadl.dataset.caps_dataset.CapsDataset): wrapper of the data set. + data_group (str): name of the data group used for the task. + split (int): split number. + selection_metrics (list[str]): metrics used for model selection. + nb_images (int): number of full images to write. Default computes the outputs of the whole data set. + gpu (bool): If given, a new value for the device of the model will be computed. + network (int): Index of the network tested (only used in multi-network setting). + """ + for selection_metric in selection_metrics: + # load the best trained model during the training + model, _ = maps_manager._init_model( + transfer_path=maps_manager.maps_path, + split=split, + transfer_selection=selection_metric, + gpu=gpu, + network=network, + nb_unfrozen_layer=maps_manager.nb_unfrozen_layer, + ) + model = DDP( + model, + fsdp=maps_manager.fully_sharded_data_parallel, + amp=maps_manager.amp, + ) + model.eval() + + tensor_path = ( + maps_manager.maps_path + / f"split-{split}" + / f"best-{selection_metric}" + / data_group + / "tensors" + ) + if cluster.master: + tensor_path.mkdir(parents=True, exist_ok=True) + dist.barrier() + + if nb_images is None: # Compute outputs for the whole data set + nb_modes = len(dataset) + else: + nb_modes = nb_images * dataset.elem_per_image + + for i in [ + *range(cluster.rank, nb_modes, cluster.world_size), + *range(int(nb_modes % cluster.world_size <= cluster.rank)), + ]: + data = dataset[i] + image = data["image"] + x = image.unsqueeze(0).to(model.device) + with autocast("cuda", enabled=maps_manager.std_amp): + output = model(x) + output = output.squeeze(0).cpu().float() + participant_id = data["participant_id"] + session_id = data["session_id"] + mode_id = data[f"{maps_manager.mode}_id"] + input_filename = f"{participant_id}_{session_id}_{maps_manager.mode}-{mode_id}_input.pt" + output_filename = f"{participant_id}_{session_id}_{maps_manager.mode}-{mode_id}_output.pt" + torch.save(image, tensor_path / input_filename) + torch.save(output, tensor_path / output_filename) + logger.debug(f"File saved at {[input_filename, output_filename]}") + + def _ensemble_prediction( + self, + maps_manager: MapsManager, + data_group, + split, + selection_metrics, + use_labels=True, + skip_leak_check=False, + ): + """Computes the results on the image-level.""" + + if not selection_metrics: + selection_metrics = find_selection_metrics(maps_manager.maps_path, split) + + for selection_metric in selection_metrics: + ##################### + # Soft voting + if maps_manager.num_networks > 1 and not skip_leak_check: + maps_manager._ensemble_to_tsv( + split, + selection=selection_metric, + data_group=data_group, + use_labels=use_labels, + ) + elif maps_manager.mode != "image" and not skip_leak_check: + maps_manager._mode_to_image_tsv( + split, + selection=selection_metric, + data_group=data_group, + use_labels=use_labels, + ) diff --git a/clinicadl/predictor/predictor.py b/clinicadl/predictor/predictor.py index 30fbbe5b8..f173e3dde 100644 --- a/clinicadl/predictor/predictor.py +++ b/clinicadl/predictor/predictor.py @@ -1,1153 +1,12 @@ -import json -import shutil -from logging import getLogger -from pathlib import Path -from typing import Any, Dict, List, Optional, Set, Tuple, Union - -import pandas as pd -import torch -import torch.distributed as dist -from torch.amp import autocast -from torch.nn.modules.loss import _Loss -from torch.utils.data import DataLoader -from torch.utils.data.distributed import DistributedSampler - -from clinicadl.caps_dataset.data import ( - return_dataset, -) -from clinicadl.interpret.config import InterpretConfig -from clinicadl.maps_manager.maps_manager import MapsManager -from clinicadl.metrics.metric_module import MetricModule -from clinicadl.metrics.utils import ( - check_selection_metric, - find_selection_metrics, -) -from clinicadl.network.network import Network -from clinicadl.predictor.config import PredictConfig -from clinicadl.trainer.tasks_utils import ( - columns, - compute_metrics, - generate_label_code, - generate_test_row, - get_criterion, -) -from clinicadl.transforms.config import TransformsConfig -from clinicadl.utils.computational.ddp import DDP, cluster -from clinicadl.utils.enum import Task -from clinicadl.utils.exceptions import ( - ClinicaDLArgumentError, - ClinicaDLDataLeakageError, - MAPSError, -) - -logger = getLogger("clinicadl.predict_manager") -level_list: List[str] = ["warning", "info", "debug"] +from clinicadl.dataset.caps_dataset import CapsDataset +from clinicadl.experiment_manager.experiment_manager import ExperimentManager class Predictor: - def __init__(self, _config: Union[PredictConfig, InterpretConfig]) -> None: - self._config = _config - - from clinicadl.splitter.config import SplitterConfig - from clinicadl.splitter.splitter import Splitter - - self.maps_manager = MapsManager(_config.maps_manager.maps_dir) - self._config.adapt_with_maps_manager_info(self.maps_manager) - tmp = self._config.data.model_dump( - exclude=set(["preprocessing_dict", "mode", "caps_dict"]) - ) - tmp.update(self._config.split.model_dump()) - tmp.update(self._config.validation.model_dump()) - self.splitter = Splitter(SplitterConfig(**tmp)) - - def predict( - self, - label_code: Union[str, dict[str, int]] = "default", - ): - """Performs the prediction task on a subset of caps_directory defined in a TSV file.""" - - group_df = self._config.data.create_groupe_df() - self._check_data_group(group_df) - criterion = get_criterion( - self.maps_manager.network_task, self.maps_manager.loss - ) - - for split in self.splitter.split_iterator(): - logger.info(f"Prediction of split {split}") - group_df, group_parameters = self.get_group_info( - self._config.maps_manager.data_group, split - ) - # Find label code if not given - if self._config.data.is_given_label_code( - self.maps_manager.label, label_code - ): - generate_label_code( - self.maps_manager.network_task, group_df, self._config.data.label - ) - # Erase previous TSV files on master process - if not self._config.validation.selection_metrics: - split_selection_metrics = find_selection_metrics( - self.maps_manager.maps_path, - split, - ) - else: - split_selection_metrics = self._config.validation.selection_metrics - for selection in split_selection_metrics: - tsv_dir = ( - self.maps_manager.maps_path - / f"split-{split}" - / f"best-{selection}" - / self._config.maps_manager.data_group - ) - tsv_pattern = f"{self._config.maps_manager.data_group}*.tsv" - for tsv_file in tsv_dir.glob(tsv_pattern): - tsv_file.unlink() - - self._config.data.check_label(self.maps_manager.label) - if self.maps_manager.multi_network: - for network in range(self.maps_manager.num_networks): - self._predict_single( - group_parameters, - group_df, - self._config.transforms, - label_code, - criterion, - split, - split_selection_metrics, - network, - ) - else: - self._predict_single( - group_parameters, - group_df, - self._config.transforms, - label_code, - criterion, - split, - split_selection_metrics, - ) - if cluster.master: - self._ensemble_prediction( - self.maps_manager, - self._config.maps_manager.data_group, - split, - self._config.validation.selection_metrics, - self._config.data.use_labels, - self._config.validation.skip_leak_check, - ) - - def _predict_single( - self, - group_parameters, - group_df, - transforms, - label_code, - criterion, - split, - split_selection_metrics, - network: Optional[int] = None, - ): - """_summary_""" - - assert isinstance(self._config, PredictConfig) - # assert self._config.data.label - - data_test = return_dataset( - group_parameters["caps_directory"], - group_df, - self.maps_manager.preprocessing_dict, - transforms_config=self._config.transforms, - multi_cohort=group_parameters["multi_cohort"], - label_presence=self._config.data.use_labels, - label=self._config.data.label, - label_code=( - self.maps_manager.label_code if label_code == "default" else label_code - ), - cnn_index=network, - ) - test_loader = DataLoader( - data_test, - batch_size=( - self._config.dataloader.batch_size - if self._config.dataloader.batch_size is not None - else self.maps_manager.batch_size - ), - shuffle=False, - sampler=DistributedSampler( - data_test, - num_replicas=cluster.world_size, - rank=cluster.rank, - shuffle=False, - ), - num_workers=self._config.dataloader.n_proc - if self._config.dataloader.n_proc is not None - else self.maps_manager.n_proc, - ) - self._test_loader( - maps_manager=self.maps_manager, - dataloader=test_loader, - criterion=criterion, - data_group=self._config.maps_manager.data_group, - split=split, - selection_metrics=split_selection_metrics, - use_labels=self._config.data.use_labels, - gpu=self._config.computational.gpu, - amp=self._config.computational.amp, - network=network, - ) - if self._config.maps_manager.save_tensor: - logger.debug("Saving tensors") - self._compute_output_tensors( - maps_manager=self.maps_manager, - dataset=data_test, - data_group=self._config.maps_manager.data_group, - split=split, - selection_metrics=self._config.validation.selection_metrics, - gpu=self._config.computational.gpu, - network=network, - ) - if self._config.maps_manager.save_nifti: - self._compute_output_nifti( - dataset=data_test, - split=split, - network=network, - ) - if self._config.maps_manager.save_latent_tensor: - self._compute_latent_tensors( - dataset=data_test, - split=split, - network=network, - ) - - def _compute_latent_tensors( - self, - dataset, - split: int, - nb_images: Optional[int] = None, - network: Optional[int] = None, - ): - """ - Compute the output tensors and saves them in the MAPS. - Parameters - ---------- - dataset : _type_ - wrapper of the data set. - data_group : _type_ - name of the data group used for the task. - split : _type_ - split number. - selection_metrics : _type_ - metrics used for model selection. - nb_images : _type_ (optional, default=None) - number of full images to write. Default computes the outputs of the whole data set. - gpu : _type_ (optional, default=None) - If given, a new value for the device of the model will be computed. - network : _type_ (optional, default=None) - Index of the network tested (only used in multi-network setting). - """ - for selection_metric in self._config.validation.selection_metrics: - # load the best trained model during the training - model, _ = self.maps_manager._init_model( - transfer_path=self.maps_manager.maps_path, - split=split, - transfer_selection=selection_metric, - gpu=self._config.computational.gpu, - network=network, - nb_unfrozen_layer=self.maps_manager.nb_unfrozen_layer, - ) - model = DDP( - model, - fsdp=self.maps_manager.fully_sharded_data_parallel, - amp=self.maps_manager.amp, - ) - model.eval() - tensor_path = ( - self.maps_manager.maps_path - / f"split-{split}" - / f"best-{selection_metric}" - / self._config.maps_manager.data_group - / "latent_tensors" - ) - if cluster.master: - tensor_path.mkdir(parents=True, exist_ok=True) - dist.barrier() - if nb_images is None: # Compute outputs for the whole data set - nb_modes = len(dataset) - else: - nb_modes = nb_images * dataset.elem_per_image - for i in [ - *range(cluster.rank, nb_modes, cluster.world_size), - *range(int(nb_modes % cluster.world_size <= cluster.rank)), - ]: - data = dataset[i] - image = data["image"] - logger.debug(f"Image for latent representation {image}") - with autocast("cuda", enabled=self.maps_manager.std_amp): - _, latent, _ = model.module._forward( - image.unsqueeze(0).to(model.device) - ) - latent = latent.squeeze(0).cpu().float() - participant_id = data["participant_id"] - session_id = data["session_id"] - mode_id = data[f"{self.maps_manager.mode}_id"] - output_filename = f"{participant_id}_{session_id}_{self.maps_manager.mode}-{mode_id}_latent.pt" - torch.save(latent, tensor_path / output_filename) - - @torch.no_grad() - def _compute_output_nifti( - self, - dataset, - split: int, - network: Optional[int] = None, - ): - """Computes the output nifti images and saves them in the MAPS. - Parameters - ---------- - dataset : _type_ - _description_ - data_group : str - name of the data group used for the task. - split : int - split number. - selection_metrics : list[str] - metrics used for model selection. - gpu : bool (optional, default=None) - If given, a new value for the device of the model will be computed. - network : int (optional, default=None) - Index of the network tested (only used in multi-network setting). - Raises - -------- - ClinicaDLException if not an image - """ - import nibabel as nib - from numpy import eye - - for selection_metric in self._config.validation.selection_metrics: - # load the best trained model during the training - model, _ = self.maps_manager._init_model( - transfer_path=self.maps_manager.maps_path, - split=split, - transfer_selection=selection_metric, - gpu=self._config.computational.gpu, - network=network, - nb_unfrozen_layer=self.maps_manager.nb_unfrozen_layer, - ) - model = DDP( - model, - fsdp=self.maps_manager.fully_sharded_data_parallel, - amp=self.maps_manager.amp, - ) - model.eval() - nifti_path = ( - self.maps_manager.maps_path - / f"split-{split}" - / f"best-{selection_metric}" - / self._config.maps_manager.data_group - / "nifti_images" - ) - if cluster.master: - nifti_path.mkdir(parents=True, exist_ok=True) - dist.barrier() - nb_imgs = len(dataset) - for i in [ - *range(cluster.rank, nb_imgs, cluster.world_size), - *range(int(nb_imgs % cluster.world_size <= cluster.rank)), - ]: - data = dataset[i] - image = data["image"] - x = image.unsqueeze(0).to(model.device) - with autocast("cuda", enabled=self.maps_manager.std_amp): - output = model(x) - output = output.squeeze(0).detach().cpu().float() - # Convert tensor to nifti image with appropriate affine - input_nii = nib.nifti1.Nifti1Image( - image[0].detach().cpu().numpy(), eye(4) - ) - output_nii = nib.nifti1.Nifti1Image(output[0].numpy(), eye(4)) - # Create file name according to participant and session id - participant_id = data["participant_id"] - session_id = data["session_id"] - input_filename = f"{participant_id}_{session_id}_image_input.nii.gz" - output_filename = f"{participant_id}_{session_id}_image_output.nii.gz" - nib.loadsave.save(input_nii, nifti_path / input_filename) - nib.loadsave.save(output_nii, nifti_path / output_filename) - - def interpret(self): - """Performs the interpretation task on a subset of caps_directory defined in a TSV file. - The mean interpretation is always saved, to save the individual interpretations set save_individual to True. - """ - assert isinstance(self._config, InterpretConfig) - - self._config.adapt_with_maps_manager_info(self.maps_manager) - - if self.maps_manager.multi_network: - raise NotImplementedError( - "The interpretation of multi-network framework is not implemented." - ) - transforms = TransformsConfig( - normalize=self.maps_manager.normalize, - data_augmentation=self.maps_manager.data_augmentation, - size_reduction=self.maps_manager.size_reduction, - size_reduction_factor=self.maps_manager.size_reduction_factor, - ) - group_df = self._config.data.create_groupe_df() - self._check_data_group(group_df) - - for split in self.splitter.split_iterator(): - logger.info(f"Interpretation of split {split}") - df_group, parameters_group = self.get_group_info( - self._config.maps_manager.data_group, split - ) - data_test = return_dataset( - parameters_group["caps_directory"], - df_group, - self.maps_manager.preprocessing_dict, - transforms_config=transforms, - multi_cohort=parameters_group["multi_cohort"], - label_presence=False, - label_code=self.maps_manager.label_code, - label=self.maps_manager.label, - ) - test_loader = DataLoader( - data_test, - batch_size=self._config.dataloader.batch_size, - shuffle=False, - num_workers=self._config.dataloader.n_proc, - ) - if not self._config.validation.selection_metrics: - self._config.validation.selection_metrics = find_selection_metrics( - self.maps_manager.maps_path, - split, - ) - for selection_metric in self._config.validation.selection_metrics: - logger.info(f"Interpretation of metric {selection_metric}") - results_path = ( - self.maps_manager.maps_path - / f"split-{split}" - / f"best-{selection_metric}" - / self._config.maps_manager.data_group - / f"interpret-{self._config.interpret.name}" - ) - if (results_path).is_dir(): - if self._config.interpret.overwrite_name: - shutil.rmtree(results_path) - else: - raise MAPSError( - f"Interpretation name {self._config.interpret.name} is already written. " - f"Please choose another name or set overwrite_name to True." - ) - results_path.mkdir(parents=True) - model, _ = self.maps_manager._init_model( - transfer_path=self.maps_manager.maps_path, - split=split, - transfer_selection=selection_metric, - gpu=self._config.computational.gpu, - ) - interpreter = self._config.interpret.get_method()(model) - cum_maps = [0] * data_test.elem_per_image - for data in test_loader: - images = data["image"].to(model.device) - map_pt = interpreter.generate_gradients( - images, - self._config.interpret.target_node, - level=self._config.interpret.level, - amp=self._config.computational.amp, - ) - for i in range(len(data["participant_id"])): - mode_id = data[f"{self.maps_manager.mode}_id"][i] - cum_maps[mode_id] += map_pt[i] - if self._config.interpret.save_individual: - single_path = ( - results_path - / f"{data['participant_id'][i]}_{data['session_id'][i]}_{self.maps_manager.mode}-{data[f'{self.maps_manager.mode}_id'][i]}_map.pt" - ) - torch.save(map_pt[i], single_path) - if self._config.maps_manager.save_nifti: - import nibabel as nib - from numpy import eye - - single_nifti_path = ( - results_path - / f"{data['participant_id'][i]}_{data['session_id'][i]}_{self.maps_manager.mode}-{data[f'{self.maps_manager.mode}_id'][i]}_map.nii.gz" - ) - output_nii = nib.nifti1.Nifti1Image( - map_pt[i].numpy(), eye(4) - ) - nib.loadsave.save(output_nii, single_nifti_path) - for i, mode_map in enumerate(cum_maps): - mode_map /= len(data_test) - torch.save( - mode_map, - results_path / f"mean_{self.maps_manager.mode}-{i}_map.pt", - ) - if self._config.maps_manager.save_nifti: - import nibabel as nib - from numpy import eye - - output_nii = nib.nifti1.Nifti1Image(mode_map.numpy(), eye(4)) - nib.loadsave.save( - output_nii, - results_path - / f"mean_{self.maps_manager.mode}-{i}_map.nii.gz", - ) - - def _check_data_group( - self, - df: Optional[pd.DataFrame] = None, - ): - """Check if a data group is already available if other arguments are None. - Else creates a new data_group. - - Parameters - ---------- - - Raises - ------ - MAPSError - when trying to overwrite train or validation data groups - ClinicaDLArgumentError - when caps_directory or df are given but data group already exists - ClinicaDLArgumentError - when caps_directory or df are not given and data group does not exist - - """ - group_dir = ( - self.maps_manager.maps_path - / "groups" - / self._config.maps_manager.data_group - ) - logger.debug(f"Group path {group_dir}") - if group_dir.is_dir(): # Data group already exists - if self._config.maps_manager.overwrite: - if self._config.maps_manager.data_group in ["train", "validation"]: - raise MAPSError("Cannot overwrite train or validation data group.") - else: - if not self._config.split.split: - self._config.split.split = self.maps_manager.find_splits() - assert self._config.split - for split in self._config.split.split: - selection_metrics = find_selection_metrics( - self.maps_manager.maps_path, - split, - ) - for selection in selection_metrics: - results_path = ( - self.maps_manager.maps_path - / f"split-{split}" - / f"best-{selection}" - / self._config.maps_manager.data_group - ) - if results_path.is_dir(): - shutil.rmtree(results_path) - elif df is not None or ( - self._config.data.caps_directory is not None - and self._config.data.caps_directory != Path("") - ): - raise ClinicaDLArgumentError( - f"Data group {self._config.maps_manager.data_group} is already defined. " - f"Please do not give any caps_directory, tsv_path or multi_cohort to use it. " - f"To erase {self._config.maps_manager.data_group} please set overwrite to True." - ) - - elif not group_dir.is_dir() and ( - self._config.data.caps_directory is None or df is None - ): # Data group does not exist yet / was overwritten + missing data - raise ClinicaDLArgumentError( - f"The data group {self._config.maps_manager.data_group} does not already exist. " - f"Please specify a caps_directory and a tsv_path to create this data group." - ) - elif ( - not group_dir.is_dir() - ): # Data group does not exist yet / was overwritten + all data is provided - if self._config.validation.skip_leak_check: - logger.info("Skipping data leakage check") - else: - self._check_leakage(self._config.maps_manager.data_group, df) - self._write_data_group( - self._config.maps_manager.data_group, - df, - self._config.data.caps_directory, - self._config.data.multi_cohort, - label=self._config.data.label, - ) - - def get_group_info( - self, data_group: str, split: int = None - ) -> Tuple[pd.DataFrame, Dict[str, Any]]: - """Gets information from corresponding data group - (list of participant_id / session_id + configuration parameters). - split is only needed if data_group is train or validation. - - Parameters - ---------- - data_group : str - _description_ - split : int (optional, default=None) - _description_ - - Returns - ------- - Tuple[pd.DataFrame, Dict[str, Any]] - _description_ - - Raises - ------ - MAPSError - _description_ - MAPSError - _description_ - MAPSError - _description_ - """ - group_path = self.maps_manager.maps_path / "groups" / data_group - if not group_path.is_dir(): - raise MAPSError( - f"Data group {data_group} is not defined. " - f"Please run a prediction to create this data group." - ) - if data_group in ["train", "validation"]: - if split is None: - raise MAPSError( - "Information on train or validation data can only be " - "loaded if a split number is given" - ) - elif not (group_path / f"split-{split}").is_dir(): - raise MAPSError( - f"Split {split} is not available for data group {data_group}." - ) - else: - group_path = group_path / f"split-{split}" - - df = pd.read_csv(group_path / "data.tsv", sep="\t") - json_path = group_path / "maps.json" - from clinicadl.utils.iotools.utils import path_decoder - - with json_path.open(mode="r") as f: - parameters = json.load(f, object_hook=path_decoder) - return df, parameters - - def _check_leakage(self, data_group: str, test_df: pd.DataFrame): - """Checks that no intersection exist between the participants used for training and those used for testing. - - Parameters - ---------- - data_group : str - name of the data group - test_df : pd.DataFrame - Table of participant_id / session_id of the data group - - Raises - ------ - ClinicaDLDataLeakageError - if data_group not in ["train", "validation"] and there is an intersection - between the participant IDs in test_df and the ones used for training. - """ - if data_group not in ["train", "validation"]: - train_path = self.maps_manager.maps_path / "groups" / "train+validation.tsv" - train_df = pd.read_csv(train_path, sep="\t") - participants_train = set(train_df.participant_id.values) - participants_test = set(test_df.participant_id.values) - intersection = participants_test & participants_train - - if len(intersection) > 0: - raise ClinicaDLDataLeakageError( - "Your evaluation set contains participants who were already seen during " - "the training step. The list of common participants is the following: " - f"{intersection}." - ) - - def _write_data_group( - self, - data_group, - df, - caps_directory: Path = None, - multi_cohort: bool = None, - label=None, - ): - """Check that a data_group is not already written and writes the characteristics of the data group - (TSV file with a list of participant / session + JSON file containing the CAPS and the preprocessing). - - Parameters - ---------- - data_group : _type_ - name whose presence is checked. - df : _type_ - DataFrame containing the participant_id and session_id (and label if use_labels is True) - caps_directory : Path (optional, default=None) - caps_directory if different from the training caps_directory, - multi_cohort : bool (optional, default=None) - multi_cohort used if different from the training multi_cohort. - label : _type_ (optional, default=None) - _description_ - """ - group_path = self.maps_manager.maps_path / "groups" / data_group - group_path.mkdir(parents=True) - - columns = ["participant_id", "session_id", "cohort"] - if self._config.data.label in df.columns.values: - columns += [self._config.data.label] - if label is not None and label in df.columns.values: - columns += [label] - - df.to_csv(group_path / "data.tsv", sep="\t", columns=columns, index=False) - self.maps_manager.write_parameters( - group_path, - { - "caps_directory": ( - caps_directory - if caps_directory is not None - else self._config.caps_directory - ), - "multi_cohort": ( - multi_cohort - if multi_cohort is not None - else self._config.multi_cohort - ), - }, - ) - - # this function is never used ??? - - def get_interpretation( - self, - data_group: str, - name: str, - split: int = 0, - selection_metric: Optional[str] = None, - verbose: bool = True, - participant_id: Optional[str] = None, - session_id: Optional[str] = None, - mode_id: int = 0, - ) -> torch.Tensor: - """ - Get the individual interpretation maps for one session if participant_id and session_id are filled. - Else load the mean interpretation map. - - Args: - data_group (str): Name of the data group used for the interpretation task. - name (str): name of the interpretation task. - split (int): Index of the split used for training. - selection_metric (str): Metric used for best weights selection. - verbose (bool): if True will print associated prediction.log. - participant_id (str): ID of the participant (if not given load mean map). - session_id (str): ID of the session (if not give load the mean map). - mode_id (int): Index of the mode used. - Returns: - (torch.Tensor): Tensor of the interpretability map. - """ - - selection_metric = check_selection_metric( - self.maps_manager.maps_path, - split, - selection_metric, - ) - if verbose: - self.maps_manager._print_description_log( - data_group, split, selection_metric - ) - map_dir = ( - self.maps_manager.maps_path - / f"split-{split}" - / f"best-{selection_metric}" - / data_group - / f"interpret-{name}" - ) - if not map_dir.is_dir(): - raise MAPSError( - f"No prediction corresponding to data group {data_group} and " - f"interpretation {name} was found." - ) - if participant_id is None and session_id is None: - map_pt = torch.load( - map_dir / f"mean_{self.maps_manager.mode}-{mode_id}_map.pt", - weights_only=True, - ) - elif participant_id is None or session_id is None: - raise ValueError( - "To load the mean interpretation map, " - "please do not give any participant_id or session_id.\n " - "Else specify both parameters" - ) - else: - map_pt = torch.load( - map_dir - / f"{participant_id}_{session_id}_{self.maps_manager.mode}-{mode_id}_map.pt", - weights_only=True, - ) - return map_pt - - def test( - self, - mode: str, - metrics_module: MetricModule, - n_classes: int, - network_task, - model: Network, - dataloader: DataLoader, - criterion: _Loss, - use_labels: bool = True, - amp: bool = False, - report_ci=False, - ) -> Tuple[pd.DataFrame, Dict[str, float]]: - """ - Computes the predictions and evaluation metrics. - - Parameters - ---------- - model: Network - The model trained. - dataloader: DataLoader - Wrapper of a CapsDataset. - criterion: _Loss - Function to calculate the loss. - use_labels: bool - If True the true_label will be written in output DataFrame - and metrics dict will be created. - amp: bool - If True, enables Pytorch's automatic mixed precision. - - Returns - ------- - the results and metrics on the image level. - """ - model.eval() - dataloader.dataset.eval() - - results_df = pd.DataFrame(columns=columns(network_task, mode, n_classes)) - total_loss = {} - with torch.no_grad(): - for i, data in enumerate(dataloader): - # initialize the loss list to save the loss components - with autocast("cuda", enabled=amp): - outputs, loss_dict = model(data, criterion, use_labels=use_labels) - - if i == 0: - for loss_component in loss_dict.keys(): - total_loss[loss_component] = 0 - for loss_component in total_loss.keys(): - total_loss[loss_component] += loss_dict[loss_component].float() - - # Generate detailed DataFrame - for idx in range(len(data["participant_id"])): - row = generate_test_row( - network_task, - mode, - metrics_module, - n_classes, - idx, - data, - outputs.float(), - ) - row_df = pd.DataFrame( - row, columns=columns(network_task, mode, n_classes) - ) - results_df = pd.concat([results_df, row_df]) - - del outputs, loss_dict - dataframes = [None] * dist.get_world_size() - dist.gather_object( - results_df, dataframes if dist.get_rank() == 0 else None, dst=0 - ) - if dist.get_rank() == 0: - results_df = pd.concat(dataframes) - del dataframes - results_df.reset_index(inplace=True, drop=True) - - if not use_labels: - metrics_dict = None - else: - metrics_dict = compute_metrics( - network_task, results_df, metrics_module, report_ci=report_ci - ) - for loss_component in total_loss.keys(): - dist.reduce(total_loss[loss_component], dst=0) - loss_value = total_loss[loss_component].item() / cluster.world_size - - if report_ci: - metrics_dict["Metric_names"].append(loss_component) - metrics_dict["Metric_values"].append(loss_value) - metrics_dict["Lower_CI"].append("N/A") - metrics_dict["Upper_CI"].append("N/A") - metrics_dict["SE"].append("N/A") - - else: - metrics_dict[loss_component] = loss_value - - torch.cuda.empty_cache() - - return results_df, metrics_dict - - def test_da( - self, - mode: str, - metrics_module: MetricModule, - n_classes: int, - network_task: Union[str, Task], - model: Network, - dataloader: DataLoader, - criterion: _Loss, - alpha: float = 0, - use_labels: bool = True, - target: bool = True, - report_ci=False, - ) -> Tuple[pd.DataFrame, Dict[str, float]]: - """ - Computes the predictions and evaluation metrics. - - Args: - model: the model trained. - dataloader: wrapper of a CapsDataset. - criterion: function to calculate the loss. - use_labels: If True the true_label will be written in output DataFrame - and metrics dict will be created. - Returns: - the results and metrics on the image level. - """ - model.eval() - dataloader.dataset.eval() - results_df = pd.DataFrame(columns=columns(network_task, mode, n_classes)) - total_loss = 0 - with torch.no_grad(): - for i, data in enumerate(dataloader): - outputs, loss_dict = model.compute_outputs_and_loss_test( - data, criterion, alpha, target - ) - total_loss += loss_dict["loss"].item() - - # Generate detailed DataFrame - for idx in range(len(data["participant_id"])): - row = generate_test_row( - network_task, - mode, - metrics_module, - n_classes, - idx, - data, - outputs, - ) - row_df = pd.DataFrame( - row, columns=columns(network_task, mode, n_classes) - ) - results_df = pd.concat([results_df, row_df]) - - del outputs, loss_dict - results_df.reset_index(inplace=True, drop=True) - - if not use_labels: - metrics_dict = None - else: - metrics_dict = compute_metrics( - network_task, results_df, metrics_module, report_ci=report_ci - ) - if report_ci: - metrics_dict["Metric_names"].append("loss") - metrics_dict["Metric_values"].append(total_loss) - metrics_dict["Lower_CI"].append("N/A") - metrics_dict["Upper_CI"].append("N/A") - metrics_dict["SE"].append("N/A") - - else: - metrics_dict["loss"] = total_loss - - torch.cuda.empty_cache() - - return results_df, metrics_dict - - def _test_loader( - self, - maps_manager: MapsManager, - dataloader, - criterion, - data_group: str, - split: int, - selection_metrics, - use_labels=True, - gpu=None, - amp=False, - network=None, - report_ci=True, - ): - """ - Launches the testing task on a dataset wrapped by a DataLoader and writes prediction TSV files. - - Args: - dataloader (torch.utils.data.DataLoader): DataLoader wrapping the test CapsDataset. - criterion (torch.nn.modules.loss._Loss): optimization criterion used during training. - data_group (str): name of the data group used for the testing task. - split (int): Index of the split used to train the model tested. - selection_metrics (list[str]): List of metrics used to select the best models which are tested. - use_labels (bool): If True, the labels must exist in test meta-data and metrics are computed. - gpu (bool): If given, a new value for the device of the model will be computed. - amp (bool): If enabled, uses Automatic Mixed Precision (requires GPU usage). - network (int): Index of the network tested (only used in multi-network setting). - """ - for selection_metric in selection_metrics: - if cluster.master: - log_dir = ( - maps_manager.maps_path - / f"split-{split}" - / f"best-{selection_metric}" - / data_group - ) - maps_manager.write_description_log( - log_dir, - data_group, - dataloader.dataset.config.data.caps_dict, - dataloader.dataset.config.data.data_df, - ) - - # load the best trained model during the training - model, _ = maps_manager._init_model( - transfer_path=maps_manager.maps_path, - split=split, - transfer_selection=selection_metric, - gpu=gpu, - network=network, - ) - model = DDP( - model, - fsdp=maps_manager.fully_sharded_data_parallel, - amp=maps_manager.amp, - ) - - prediction_df, metrics = self.test( - mode=maps_manager.mode, - metrics_module=maps_manager.metrics_module, - n_classes=maps_manager.n_classes, - network_task=maps_manager.network_task, - model=model, - dataloader=dataloader, - criterion=criterion, - use_labels=use_labels, - amp=amp, - report_ci=report_ci, - ) - if use_labels: - if network is not None: - metrics[f"{maps_manager.mode}_id"] = network - - loss_to_log = ( - metrics["Metric_values"][-1] if report_ci else metrics["loss"] - ) - - logger.info( - f"{maps_manager.mode} level {data_group} loss is {loss_to_log} for model selected on {selection_metric}" - ) - - if cluster.master: - # Replace here - maps_manager._mode_level_to_tsv( - prediction_df, - metrics, - split, - selection_metric, - data_group=data_group, - ) - - @torch.no_grad() - def _compute_output_tensors( - self, - maps_manager: MapsManager, - dataset, - data_group, - split, - selection_metrics, - nb_images=None, - gpu=None, - network=None, - ): - """ - Compute the output tensors and saves them in the MAPS. - - Args: - dataset (clinicadl.caps_dataset.data.CapsDataset): wrapper of the data set. - data_group (str): name of the data group used for the task. - split (int): split number. - selection_metrics (list[str]): metrics used for model selection. - nb_images (int): number of full images to write. Default computes the outputs of the whole data set. - gpu (bool): If given, a new value for the device of the model will be computed. - network (int): Index of the network tested (only used in multi-network setting). - """ - for selection_metric in selection_metrics: - # load the best trained model during the training - model, _ = maps_manager._init_model( - transfer_path=maps_manager.maps_path, - split=split, - transfer_selection=selection_metric, - gpu=gpu, - network=network, - nb_unfrozen_layer=maps_manager.nb_unfrozen_layer, - ) - model = DDP( - model, - fsdp=maps_manager.fully_sharded_data_parallel, - amp=maps_manager.amp, - ) - model.eval() - - tensor_path = ( - maps_manager.maps_path - / f"split-{split}" - / f"best-{selection_metric}" - / data_group - / "tensors" - ) - if cluster.master: - tensor_path.mkdir(parents=True, exist_ok=True) - dist.barrier() - - if nb_images is None: # Compute outputs for the whole data set - nb_modes = len(dataset) - else: - nb_modes = nb_images * dataset.elem_per_image - - for i in [ - *range(cluster.rank, nb_modes, cluster.world_size), - *range(int(nb_modes % cluster.world_size <= cluster.rank)), - ]: - data = dataset[i] - image = data["image"] - x = image.unsqueeze(0).to(model.device) - with autocast("cuda", enabled=maps_manager.std_amp): - output = model(x) - output = output.squeeze(0).cpu().float() - participant_id = data["participant_id"] - session_id = data["session_id"] - mode_id = data[f"{maps_manager.mode}_id"] - input_filename = f"{participant_id}_{session_id}_{maps_manager.mode}-{mode_id}_input.pt" - output_filename = f"{participant_id}_{session_id}_{maps_manager.mode}-{mode_id}_output.pt" - torch.save(image, tensor_path / input_filename) - torch.save(output, tensor_path / output_filename) - logger.debug(f"File saved at {[input_filename, output_filename]}") - - def _ensemble_prediction( - self, - maps_manager: MapsManager, - data_group, - split, - selection_metrics, - use_labels=True, - skip_leak_check=False, - ): - """Computes the results on the image-level.""" - - if not selection_metrics: - selection_metrics = find_selection_metrics(maps_manager.maps_path, split) + def __init__(self, manager: ExperimentManager): + """TO COMPLETE""" + pass - for selection_metric in selection_metrics: - ##################### - # Soft voting - if maps_manager.num_networks > 1 and not skip_leak_check: - maps_manager._ensemble_to_tsv( - split, - selection=selection_metric, - data_group=data_group, - use_labels=use_labels, - ) - elif maps_manager.mode != "image" and not skip_leak_check: - maps_manager._mode_to_image_tsv( - split, - selection=selection_metric, - data_group=data_group, - use_labels=use_labels, - ) + def predict(self, dataset_test: CapsDataset, split: int): + """TO COMPLETE""" + pass diff --git a/clinicadl/predictor/utils.py b/clinicadl/predictor/utils.py index c66372764..6aea27e65 100644 --- a/clinicadl/predictor/utils.py +++ b/clinicadl/predictor/utils.py @@ -3,7 +3,7 @@ import pandas as pd -from clinicadl.metrics.utils import check_selection_metric +from clinicadl.metrics.old_metrics.utils import check_selection_metric from clinicadl.splitter.split_utils import print_description_log from clinicadl.utils.exceptions import MAPSError diff --git a/clinicadl/quality_check/pet_linear/quality_check.py b/clinicadl/quality_check/pet_linear/quality_check.py index 7c355b09c..d54eabac2 100644 --- a/clinicadl/quality_check/pet_linear/quality_check.py +++ b/clinicadl/quality_check/pet_linear/quality_check.py @@ -12,8 +12,8 @@ import pandas as pd from joblib import Parallel, delayed -from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig -from clinicadl.caps_dataset.preprocessing.utils import pet_linear_nii +from clinicadl.dataset.caps_dataset_config import CapsDatasetConfig +from clinicadl.dataset.utils import pet_linear_nii from clinicadl.utils.iotools.clinica_utils import ( RemoteFileStructure, clinicadl_file_reader, diff --git a/clinicadl/quality_check/pet_linear/utils.py b/clinicadl/quality_check/pet_linear/utils.py index 1edba9e15..e27c60d2b 100644 --- a/clinicadl/quality_check/pet_linear/utils.py +++ b/clinicadl/quality_check/pet_linear/utils.py @@ -6,7 +6,7 @@ import numpy as np -from clinicadl.transforms.transforms import MinMaxNormalization +from clinicadl.transforms.factory import MinMaxNormalization def get_metric(contour_np, image_np, inside): diff --git a/clinicadl/quality_check/t1_linear/quality_check.py b/clinicadl/quality_check/t1_linear/quality_check.py index 7063c0c68..373f5228c 100755 --- a/clinicadl/quality_check/t1_linear/quality_check.py +++ b/clinicadl/quality_check/t1_linear/quality_check.py @@ -11,7 +11,7 @@ from torch.amp import autocast from torch.utils.data import DataLoader -from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig +from clinicadl.dataset.caps_dataset_config import CapsDatasetConfig from clinicadl.generate.generate_utils import load_and_check_tsv from clinicadl.utils.computational.computational import ComputationalConfig from clinicadl.utils.exceptions import ClinicaDLArgumentError diff --git a/clinicadl/quality_check/t1_linear/utils.py b/clinicadl/quality_check/t1_linear/utils.py index 20d4d5462..0ac67736c 100755 --- a/clinicadl/quality_check/t1_linear/utils.py +++ b/clinicadl/quality_check/t1_linear/utils.py @@ -8,9 +8,9 @@ import torch from torch.utils.data import Dataset -from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig -from clinicadl.caps_dataset.caps_dataset_utils import compute_folder_and_file_type -from clinicadl.caps_dataset.preprocessing.utils import linear_nii +from clinicadl.dataset.caps_dataset_config import CapsDatasetConfig +from clinicadl.dataset.caps_dataset_utils import compute_folder_and_file_type +from clinicadl.dataset.utils import linear_nii from clinicadl.utils.enum import Preprocessing from clinicadl.utils.exceptions import ClinicaDLException from clinicadl.utils.iotools.clinica_utils import clinicadl_file_reader @@ -30,7 +30,7 @@ def __init__( data_df (DataFrame): Subject and session list. """ - from clinicadl.transforms.transforms import MinMaxNormalization + from clinicadl.transforms.factory import MinMaxNormalization self.img_dir = config.data.caps_directory self.df = config.data.data_df diff --git a/clinicadl/random_search/random_search.py b/clinicadl/random_search/random_search.py index 7929e9382..f38f248d2 100755 --- a/clinicadl/random_search/random_search.py +++ b/clinicadl/random_search/random_search.py @@ -4,7 +4,7 @@ from pathlib import Path -from clinicadl.trainer.trainer import Trainer +from clinicadl.trainer.old_trainer import Trainer from .random_search_config import RandomSearchConfig, create_training_config from .random_search_utils import get_space_dict, random_sampling diff --git a/clinicadl/random_search/random_search_config.py b/clinicadl/random_search/random_search_config.py index 2e1d728a9..3d6a65d2d 100644 --- a/clinicadl/random_search/random_search_config.py +++ b/clinicadl/random_search/random_search_config.py @@ -15,7 +15,7 @@ from clinicadl.utils.enum import Normalization, Pooling, Task if TYPE_CHECKING: - from clinicadl.trainer.trainer import TrainConfig + from clinicadl.trainer.old_trainer import TrainConfig class RandomSearchConfig( diff --git a/clinicadl/splitter/config.py b/clinicadl/splitter/config.py index 59fdbaad8..da4e32707 100644 --- a/clinicadl/splitter/config.py +++ b/clinicadl/splitter/config.py @@ -6,7 +6,7 @@ from pydantic import BaseModel, ConfigDict, field_validator from pydantic.types import NonNegativeInt -from clinicadl.caps_dataset.data_config import DataConfig +from clinicadl.dataset.data_config import DataConfig from clinicadl.predictor.validation import ValidationConfig from clinicadl.splitter.split_utils import find_splits diff --git a/clinicadl/splitter/kfold.py b/clinicadl/splitter/kfold.py new file mode 100644 index 000000000..37805dfba --- /dev/null +++ b/clinicadl/splitter/kfold.py @@ -0,0 +1,24 @@ +from typing import Optional + +from clinicadl.dataset.caps_dataset import CapsDataset +from clinicadl.experiment_manager.experiment_manager import ExperimentManager + + +class Split: + def __init__( + self, + ): + """TO COMPLETE""" + pass + + +class KFolder: + def __init__( + self, n_splits: int, caps_dataset: CapsDataset, manager: ExperimentManager + ) -> None: + """TO COMPLETE""" + + def split_iterator(self, split_list: Optional[list] = None) -> list[Split]: + """TO COMPLETE""" + + return list[Split()] diff --git a/clinicadl/splitter/splitter.py b/clinicadl/splitter/old_splitter.py similarity index 100% rename from clinicadl/splitter/splitter.py rename to clinicadl/splitter/old_splitter.py diff --git a/clinicadl/splitter/split.py b/clinicadl/splitter/split.py new file mode 100644 index 000000000..72c4f9d82 --- /dev/null +++ b/clinicadl/splitter/split.py @@ -0,0 +1,18 @@ +from pathlib import Path + +from clinicadl.dataset.caps_dataset import CapsDataset +from clinicadl.experiment_manager.experiment_manager import ExperimentManager +from clinicadl.splitter.kfold import Split + + +def split_tsv(sub_ses_tsv: Path) -> Path: + """TO COMPLETE""" + + split_dir = Path("") + return split_dir + + +def get_single_split( + n_subject_validation: int, caps_dataset: CapsDataset, manager: ExperimentManager +) -> Split: + pass diff --git a/clinicadl/tmp_config.py b/clinicadl/tmp_config.py index 620db133e..54a791b1e 100644 --- a/clinicadl/tmp_config.py +++ b/clinicadl/tmp_config.py @@ -19,8 +19,8 @@ ) from typing_extensions import Self -from clinicadl.caps_dataset.data import return_dataset -from clinicadl.metrics.metric_module import MetricModule +from clinicadl.dataset.caps_dataset import return_dataset +from clinicadl.metrics.old_metrics.metric_module import MetricModule from clinicadl.splitter.split_utils import find_splits from clinicadl.trainer.tasks_utils import ( evaluation_metrics, @@ -28,7 +28,7 @@ get_default_network, output_size, ) -from clinicadl.transforms import transforms +from clinicadl.transforms import factory from clinicadl.transforms.config import TransformsConfig from clinicadl.utils.enum import ( Compensation, @@ -380,7 +380,7 @@ def check_preprocessing_dict(self) -> Self: ValueError In case of multi-cohort dataset, if no preprocessing file is found in any CAPS. """ - from clinicadl.caps_dataset.data import CapsDataset + from clinicadl.dataset.caps_dataset import CapsDataset if self.preprocessing_dict is None: if self.preprocessing_json is not None: @@ -484,16 +484,16 @@ def get_transforms( transforms to apply in train and evaluation mode / transforms to apply in evaluation mode only. """ augmentation_dict = { - "Noise": transforms.RandomNoising(sigma=0.1), + "Noise": factory.RandomNoising(sigma=0.1), "Erasing": torch_transforms.RandomErasing(), - "CropPad": transforms.RandomCropPad(10), - "Smoothing": transforms.RandomSmoothing(), - "Motion": transforms.RandomMotion((2, 4), (2, 4), 2), - "Ghosting": transforms.RandomGhosting((4, 10)), - "Spike": transforms.RandomSpike(1, (1, 3)), - "BiasField": transforms.RandomBiasField(0.5), - "RandomBlur": transforms.RandomBlur((0, 2)), - "RandomSwap": transforms.RandomSwap(15, 100), + "CropPad": factory.RandomCropPad(10), + "Smoothing": factory.RandomSmoothing(), + "Motion": factory.RandomMotion((2, 4), (2, 4), 2), + "Ghosting": factory.RandomGhosting((4, 10)), + "Spike": factory.RandomSpike(1, (1, 3)), + "BiasField": factory.RandomBiasField(0.5), + "RandomBlur": factory.RandomBlur((0, 2)), + "RandomSwap": factory.RandomSwap(15, 100), "None": None, } @@ -508,12 +508,12 @@ def get_transforms( ] ) - transformations_list.append(transforms.NanRemoval()) + transformations_list.append(factory.NanRemoval()) if self.normalize: - transformations_list.append(transforms.MinMaxNormalization()) + transformations_list.append(factory.MinMaxNormalization()) if self.size_reduction: transformations_list.append( - transforms.SizeReduction(self.size_reduction_factor) + factory.SizeReduction(self.size_reduction_factor) ) all_transformations = torch_transforms.Compose(transformations_list) diff --git a/clinicadl/trainer/config/classification.py b/clinicadl/trainer/config/classification.py index f09021559..25a8d7f6b 100644 --- a/clinicadl/trainer/config/classification.py +++ b/clinicadl/trainer/config/classification.py @@ -3,8 +3,8 @@ from pydantic import computed_field, field_validator -from clinicadl.caps_dataset.data_config import DataConfig as BaseDataConfig -from clinicadl.network.config import NetworkConfig as BaseNetworkConfig +from clinicadl.dataset.data_config import DataConfig as BaseDataConfig +from clinicadl.networks.old_network.config import NetworkConfig as BaseNetworkConfig from clinicadl.predictor.validation import ValidationConfig as BaseValidationConfig from clinicadl.trainer.config.train import TrainConfig from clinicadl.utils.enum import ClassificationLoss, ClassificationMetric, Task diff --git a/clinicadl/trainer/config/reconstruction.py b/clinicadl/trainer/config/reconstruction.py index d4b90ee2d..8a1dd825e 100644 --- a/clinicadl/trainer/config/reconstruction.py +++ b/clinicadl/trainer/config/reconstruction.py @@ -3,7 +3,7 @@ from pydantic import PositiveFloat, PositiveInt, computed_field, field_validator -from clinicadl.network.config import NetworkConfig as BaseNetworkConfig +from clinicadl.networks.old_network.config import NetworkConfig as BaseNetworkConfig from clinicadl.predictor.validation import ValidationConfig as BaseValidationConfig from clinicadl.trainer.config.train import TrainConfig from clinicadl.utils.enum import ( diff --git a/clinicadl/trainer/config/regression.py b/clinicadl/trainer/config/regression.py index f094d5552..7504138d8 100644 --- a/clinicadl/trainer/config/regression.py +++ b/clinicadl/trainer/config/regression.py @@ -3,8 +3,8 @@ from pydantic import computed_field, field_validator -from clinicadl.caps_dataset.data_config import DataConfig as BaseDataConfig -from clinicadl.network.config import NetworkConfig as BaseNetworkConfig +from clinicadl.dataset.data_config import DataConfig as BaseDataConfig +from clinicadl.networks.old_network.config import NetworkConfig as BaseNetworkConfig from clinicadl.predictor.validation import ValidationConfig as BaseValidationConfig from clinicadl.trainer.config.train import TrainConfig from clinicadl.utils.enum import RegressionLoss, RegressionMetric, Task diff --git a/clinicadl/trainer/config/train.py b/clinicadl/trainer/config/train.py index 30a92c92a..96e1b2081 100644 --- a/clinicadl/trainer/config/train.py +++ b/clinicadl/trainer/config/train.py @@ -10,14 +10,14 @@ ) from clinicadl.callbacks.config import CallbacksConfig -from clinicadl.caps_dataset.data_config import DataConfig -from clinicadl.caps_dataset.dataloader_config import DataLoaderConfig from clinicadl.config.config.lr_scheduler import LRschedulerConfig from clinicadl.config.config.reproducibility import ReproducibilityConfig -from clinicadl.maps_manager.config import MapsManagerConfig -from clinicadl.network.config import NetworkConfig -from clinicadl.optimizer.optimization import OptimizationConfig -from clinicadl.optimizer.optimizer import OptimizerConfig +from clinicadl.dataset.data_config import DataConfig +from clinicadl.dataset.dataloader_config import DataLoaderConfig +from clinicadl.experiment_manager.config import MapsManagerConfig +from clinicadl.networks.old_network.config import NetworkConfig +from clinicadl.optimization.config import OptimizationConfig +from clinicadl.optimization.optimizer.config import OptimizerConfig from clinicadl.predictor.validation import ValidationConfig from clinicadl.splitter.config import SplitConfig from clinicadl.trainer.transfer_learning import TransferLearningConfig @@ -46,7 +46,6 @@ class TrainConfig(BaseModel, ABC): maps_manager: MapsManagerConfig model: NetworkConfig optimization: OptimizationConfig - optimizer: OptimizerConfig reproducibility: ReproducibilityConfig split: SplitConfig transfer_learning: TransferLearningConfig @@ -72,7 +71,6 @@ def __init__(self, **kwargs): maps_manager=kwargs, model=kwargs, optimization=kwargs, - optimizer=kwargs, reproducibility=kwargs, split=kwargs, transfer_learning=kwargs, @@ -91,7 +89,6 @@ def _update(self, config_dict: Dict[str, Any]) -> None: self.maps_manager.__dict__.update(config_dict) self.model.__dict__.update(config_dict) self.optimization.__dict__.update(config_dict) - self.optimizer.__dict__.update(config_dict) self.reproducibility.__dict__.update(config_dict) self.split.__dict__.update(config_dict) self.transfer_learning.__dict__.update(config_dict) diff --git a/clinicadl/trainer/old_trainer.py b/clinicadl/trainer/old_trainer.py new file mode 100644 index 000000000..fb798d788 --- /dev/null +++ b/clinicadl/trainer/old_trainer.py @@ -0,0 +1,901 @@ +from __future__ import annotations # noqa: I001 + + +from contextlib import nullcontext +from datetime import datetime +from logging import getLogger +from pathlib import Path +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union, Callable + +import pandas as pd +import torch +import torch.distributed as dist +from torch.amp.grad_scaler import GradScaler +from torch.amp.autocast_mode import autocast +from torch.utils.data import DataLoader +from torch.utils.data.distributed import DistributedSampler + +from clinicadl.splitter.split_utils import find_finished_splits, find_stopped_splits +from clinicadl.dataset.caps_dataset import return_dataset +from clinicadl.utils.early_stopping.early_stopping import EarlyStopping +from clinicadl.utils.exceptions import MAPSError +from clinicadl.utils.computational.ddp import DDP +from clinicadl.utils import cluster +from clinicadl.utils.logwriter import LogWriter +from clinicadl.dataset.caps_dataset_utils import read_json +from clinicadl.metrics.old_metrics.metric_module import RetainBest +from clinicadl.utils.seed import pl_worker_init_function, seed_everything +from clinicadl.experiment_manager.maps_manager import MapsManager +from clinicadl.utils.seed import get_seed +from clinicadl.utils.enum import Task +from clinicadl.utils.iotools.trainer_utils import ( + create_parameters_dict, + patch_to_read_json, +) +from clinicadl.trainer.tasks_utils import create_training_config +from clinicadl.predictor.old_predictor import Predictor +from clinicadl.predictor.config import PredictConfig +from clinicadl.splitter.old_splitter import Splitter +from clinicadl.splitter.config import SplitterConfig +from clinicadl.transforms.config import TransformsConfig + +if TYPE_CHECKING: + from clinicadl.callbacks.callbacks import Callback + from clinicadl.trainer.config.train import TrainConfig + +from clinicadl.trainer.tasks_utils import ( + evaluation_metrics, + generate_sampler, + get_criterion, + save_outputs, +) + +logger = getLogger("clinicadl.trainer") + + +class Trainer: + """Temporary Trainer extracted from the MAPSManager.""" + + def __init__( + self, + config: TrainConfig, + ) -> None: + """ + Parameters + ---------- + config : TrainConfig + """ + self.config = config + + self.maps_manager = self._init_maps_manager(config) + predict_config = PredictConfig(**config.get_dict()) + self.validator = Predictor(predict_config) + + # test + splitter_config = SplitterConfig(**self.config.get_dict()) + self.splitter = Splitter(splitter_config) + self._check_args() + + def _init_maps_manager(self, config) -> MapsManager: + # temporary: to match CLI data. TODO : change CLI data + + parameters, maps_path = create_parameters_dict(config) + + if maps_path.is_dir(): + return MapsManager( + maps_path, verbose=None + ) # TODO : precise which parameters in config are useful + else: + # parameters["maps_path"] = maps_path + return MapsManager( + maps_path, parameters, verbose=None + ) # TODO : precise which parameters in config are useful + + @classmethod + def from_json( + cls, + config_file: str | Path, + maps_path: str | Path, + split: Optional[list[int]] = None, + ) -> Trainer: + """ + Creates a Trainer from a json configuration file. + + Parameters + ---------- + config_file : str | Path + The parameters, stored in a json files. + maps_path : str | Path + The folder where the results of a futur training will be stored. + + Returns + ------- + Trainer + The Trainer object, instantiated with parameters found in config_file. + + Raises + ------ + FileNotFoundError + If config_file doesn't exist. + """ + config_file = Path(config_file) + + if not (config_file).is_file(): + raise FileNotFoundError(f"No file found at {str(config_file)}.") + config_dict = patch_to_read_json(read_json(config_file)) # TODO : remove patch + config_dict["maps_dir"] = maps_path + config_dict["split"] = split if split else () + config_object = create_training_config(config_dict["network_task"])( + **config_dict + ) + return cls(config_object) + + @classmethod + def from_maps(cls, maps_path: str | Path) -> Trainer: + """ + Creates a Trainer from a json configuration file. + + Parameters + ---------- + maps_path : str | Path + The path of the MAPS folder. + + Returns + ------- + Trainer + The Trainer object, instantiated with parameters found in maps_path. + + Raises + ------ + MAPSError + If maps_path folder doesn't exist or there is no maps.json file in it. + """ + maps_path = Path(maps_path) + + if not (maps_path / "maps.json").is_file(): + raise MAPSError( + f"MAPS was not found at {str(maps_path)}." + f"To initiate a new MAPS please give a train_dict." + ) + return cls.from_json(maps_path / "maps.json", maps_path) + + def resume(self) -> None: + """ + Resume a prematurely stopped training. + + Parameters + ---------- + splits : List[int] + The splits that must be resumed. + """ + stopped_splits = set(find_stopped_splits(self.config.maps_manager.maps_dir)) + finished_splits = set(find_finished_splits(self.config.maps_manager.maps_dir)) + # TODO : check these two lines. Why do we need a self.splitter? + + splitter_config = SplitterConfig(**self.config.get_dict()) + self.splitter = Splitter(splitter_config) + + split_iterator = self.splitter.split_iterator() + ### + absent_splits = set(split_iterator) - stopped_splits - finished_splits + + logger.info( + f"Finished splits {finished_splits}\n" + f"Stopped splits {stopped_splits}\n" + f"Absent splits {absent_splits}" + ) + + if len(stopped_splits) == 0 and len(absent_splits) == 0: + raise ValueError( + "Training has been completed on all the splits you passed." + ) + if len(stopped_splits) > 0: + self._resume(list(stopped_splits)) + if len(absent_splits) > 0: + self.train(list(absent_splits), overwrite=True) + + def _check_args(self): + self.config.reproducibility.seed = get_seed(self.config.reproducibility.seed) + # if len(self.config.data.label_code) == 0: + # self.config.data.label_code = self.maps_manager.label_code + # TODO: deal with label_code and replace self.maps_manager.label_code + from clinicadl.trainer.tasks_utils import generate_label_code + + if ( + "label_code" not in self.config.data.model_dump() + or len(self.config.data.label_code) == 0 + or self.config.data.label_code is None + ): # Allows to set custom label code in TOML + train_df = self.splitter[0]["train"] + self.config.data.label_code = generate_label_code( + self.config.network_task, train_df, self.config.data.label + ) + + def train( + self, + split_list: Optional[List[int]] = None, + overwrite: bool = False, + ) -> None: + """ + Performs the training task for a defined list of splits. + + Parameters + ---------- + split_list : Optional[List[int]] (optional, default=None) + List of splits on which the training task is performed. + Default trains all splits of the cross-validation. + overwrite : bool (optional, default=False) + If True, previously trained splits that are going to be trained + are erased. + + Raises + ------ + MAPSError + If splits specified in input already exist and overwrite is False. + """ + + # splitter_config = SplitterConfig(**self.config.get_dict()) + # self.splitter = Splitter(splitter_config) + # self.splitter.check_split_list(self.config.maps_manager.maps_dir, self.config.maps_manager.overwrite) + self.splitter.check_split_list( + self.config.maps_manager.maps_dir, + overwrite, # overwrite change so careful it is not the maps manager overwrite parameters here + ) + for split in self.splitter.split_iterator(): + logger.info(f"Training split {split}") + seed_everything( + self.config.reproducibility.seed, + self.config.reproducibility.deterministic, + self.config.reproducibility.compensation, + ) + + split_df_dict = self.splitter[split] + + if self.config.model.multi_network: + resume, first_network = self.init_first_network(False, split) + for network in range(first_network, self.maps_manager.num_networks): + self._train_single( + split, split_df_dict, network=network, resume=resume + ) + else: + self._train_single(split, split_df_dict, resume=False) + + # def check_split_list(self, split_list, overwrite): + # existing_splits = [] + # splitter_config = SplitterConfig(**self.config.get_dict()) + # self.splitter = Splitter(splitter_config) + # for split in self.splitter.split_iterator(): + # split_path = self.maps_manager.maps_path / f"split-{split}" + # if split_path.is_dir(): + # if overwrite: + # if cluster.master: + # shutil.rmtree(split_path) + # else: + # existing_splits.append(split) + + # if len(existing_splits) > 0: + # raise MAPSError( + # f"Splits {existing_splits} already exist. Please " + # f"specify a list of splits not intersecting the previous list, " + # f"or use overwrite to erase previously trained splits." + # ) + + def _resume( + self, + split_list: Optional[List[int]] = None, + ) -> None: + """ + Resumes the training task for a defined list of splits. + + Parameters + ---------- + split_list : Optional[List[int]] (optional, default=None) + List of splits on which the training task is performed. + If None, the training task is performed on all splits. + + Raises + ------ + MAPSError + If splits specified in input do not exist. + """ + missing_splits = [] + splitter_config = SplitterConfig(**self.config.get_dict()) + self.splitter = Splitter(splitter_config) + for split in self.splitter.split_iterator(): + if not (self.maps_manager.maps_path / f"split-{split}" / "tmp").is_dir(): + missing_splits.append(split) + + if len(missing_splits) > 0: + raise MAPSError( + f"Splits {missing_splits} were not initialized. " + f"Please try train command on these splits and resume only others." + ) + + for split in self.splitter.split_iterator(): + logger.info(f"Training split {split}") + seed_everything( + self.config.reproducibility.seed, + self.config.reproducibility.deterministic, + self.config.reproducibility.compensation, + ) + + split_df_dict = self.splitter[split] + if self.config.model.multi_network: + resume, first_network = self.init_first_network(True, split) + for network in range(first_network, self.maps_manager.num_networks): + self._train_single( + split, split_df_dict, network=network, resume=resume + ) + else: + self._train_single(split, split_df_dict, resume=True) + + def init_first_network(self, resume: bool, split: int): + first_network = 0 + if resume: + training_logs = [ + int(str(network_folder).split("-")[1]) + for network_folder in list( + ( + self.maps_manager.maps_path / f"split-{split}" / "training_logs" + ).iterdir() + ) + ] + first_network = max(training_logs) + if not (self.maps_manager.maps_path / "tmp").is_dir(): + first_network += 1 + resume = False + return resume, first_network + + def get_dataloader( + self, + data_df: pd.DataFrame, + cnn_index: Optional[int] = None, + sampler_option: str = "random", + dp_degree: Optional[int] = None, + rank: Optional[int] = None, + worker_init_fn: Optional[Callable[[int], None]] = None, + shuffle: Optional[bool] = None, + num_replicas: Optional[int] = None, + homemade_sampler: bool = False, + ): + dataset = return_dataset( + input_dir=self.config.data.caps_directory, + data_df=data_df, + preprocessing_dict=self.config.data.preprocessing_dict, + transforms_config=self.config.transforms, + multi_cohort=self.config.data.multi_cohort, + label=self.config.data.label, + label_code=self.config.data.label_code, + cnn_index=cnn_index, + ) + if homemade_sampler: + sampler = generate_sampler( + network_task=self.maps_manager.network_task, + dataset=dataset, + sampler_option=sampler_option, + label_code=self.config.data.label_code, + dp_degree=dp_degree, + rank=rank, + ) + else: + sampler = DistributedSampler( + dataset, + num_replicas=num_replicas, + rank=rank, + shuffle=shuffle, + ) + + train_loader = DataLoader( + dataset=dataset, + batch_size=self.config.dataloader.batch_size, + sampler=sampler, + num_workers=self.config.dataloader.n_proc, + worker_init_fn=worker_init_fn, + shuffle=shuffle, + ) + logger.debug(f"Train loader size is {len(train_loader)}") + + return train_loader + + def _train_single( + self, + split, + split_df_dict: Dict, + network: Optional[int] = None, + resume: bool = False, + ) -> None: + """ + Trains a single CNN for all inputs. + + Parameters + ---------- + split_list : Optional[List[int]] (optional, default=None) + List of splits on which the training task is performed. + If None, performs training on all splits of the cross-validation. + resume : bool (optional, default=False) + If True, the job is resumed from checkpoint. + """ + + logger.debug("Loading training data...") + + train_loader = self.get_dataloader( + data_df=split_df_dict["train"], + cnn_index=network, + sampler_option=self.config.dataloader.sampler, + dp_degree=cluster.world_size, # type: ignore + rank=cluster.rank, # type: ignore + worker_init_fn=pl_worker_init_function, + homemade_sampler=True, + ) + + logger.debug(f"Train loader size is {len(train_loader)}") + logger.debug("Loading validation data...") + + valid_loader = self.get_dataloader( + data_df=split_df_dict["validation"], + cnn_index=network, + num_replicas=cluster.world_size, # type: ignore + rank=cluster.rank, # type: ignore + shuffle=False, + homemade_sampler=False, + ) + + logger.debug(f"Validation loader size is {len(valid_loader)}") + from clinicadl.callbacks.callbacks import CodeCarbonTracker + + self._train( + train_loader, + valid_loader, + split, + resume=resume, + callbacks=[CodeCarbonTracker], + network=network, + ) + + if network is not None: + resume = False + + if cluster.master: + self.validator._ensemble_prediction( + self.maps_manager, + "train", + split, + self.config.validation.selection_metrics, + ) + self.validator._ensemble_prediction( + self.maps_manager, + "validation", + split, + self.config.validation.selection_metrics, + ) + + self.maps_manager._erase_tmp(split) + + def _train( + self, + train_loader: DataLoader, + valid_loader: DataLoader, + split: int, + network: Optional[int] = None, + resume: bool = False, + callbacks: list[Callback] = [], + ): + """ + Core function shared by train and resume. + + Parameters + ---------- + train_loader : torch.utils.data.DataLoader + DataLoader wrapping the training set. + valid_loader : torch.utils.data.DataLoader + DataLoader wrapping the validation set. + split : int + Index of the split trained. + network : int (optional, default=None) + Index of the network trained (used in multi-network setting only). + resume : bool (optional, default=False) + If True the job is resumed from the checkpoint. + callbacks : List[Callback] (optional, default=[]) + List of callbacks to call during training. + + Raises + ------ + Exception + _description_ + """ + self._init_callbacks() + model, beginning_epoch = self.maps_manager._init_model( + split=split, + resume=resume, + transfer_path=self.config.transfer_learning.transfer_path, + transfer_selection=self.config.transfer_learning.transfer_selection_metric, + nb_unfrozen_layer=self.config.transfer_learning.nb_unfrozen_layer, + ) + model = DDP( + model, + fsdp=self.config.computational.fully_sharded_data_parallel, + amp=self.config.computational.amp, + ) + criterion = get_criterion( + self.maps_manager.network_task, self.config.model.loss + ) + + optimizer = self._init_optimizer(model, split=split, resume=resume) + self.callback_handler.on_train_begin( + self.maps_manager.parameters, + criterion=criterion, + optimizer=optimizer, + split=split, + maps_path=self.maps_manager.maps_path, + ) + + model.train() + train_loader.dataset.train() + + early_stopping = EarlyStopping( + "min", + min_delta=self.config.early_stopping.tolerance, + patience=self.config.early_stopping.patience, + ) + metrics_valid = {"loss": None} + + if cluster.master: + log_writer = LogWriter( + self.maps_manager.maps_path, + evaluation_metrics(self.maps_manager.network_task) + ["loss"], + split, + resume=resume, + beginning_epoch=beginning_epoch, + network=network, + ) + # retain_best = RetainBest( + # selection_metrics=list(self.config.validation.selection_metrics) + # ) ??? + + epoch = beginning_epoch + + retain_best = RetainBest( + selection_metrics=list(self.config.validation.selection_metrics) + ) + + scaler = GradScaler("cuda", enabled=self.config.computational.amp) + profiler = self._init_profiler() + + if self.config.callbacks.track_exp == "wandb": + from clinicadl.callbacks.tracking_exp import WandB_handler + + if self.config.lr_scheduler.adaptive_learning_rate: + from torch.optim.lr_scheduler import ReduceLROnPlateau + + # Initialize the ReduceLROnPlateau scheduler + scheduler = ReduceLROnPlateau(optimizer, mode="min", factor=0.1) + + while epoch < self.config.optimization.epochs and not early_stopping.step( + metrics_valid["loss"] + ): + # self.callback_handler.on_epoch_begin(self.parameters, epoch = epoch) + + if isinstance(train_loader.sampler, DistributedSampler): + # It should always be true for a random sampler. But just in case + # we get a WeightedRandomSampler or a forgotten RandomSampler, + # we do not want to execute this line. + train_loader.sampler.set_epoch(epoch) + + model.zero_grad(set_to_none=True) + evaluation_flag, step_flag = True, True + + with profiler: + for i, data in enumerate(train_loader): + update: bool = ( + i + 1 + ) % self.config.optimization.accumulation_steps == 0 + sync = nullcontext() if update else model.no_sync() + with sync: + with autocast("cuda", enabled=self.maps_manager.std_amp): + _, loss_dict = model(data, criterion) + logger.debug(f"Train loss dictionary {loss_dict}") + loss = loss_dict["loss"] + scaler.scale(loss).backward() + + if update: + step_flag = False + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad(set_to_none=True) + + del loss + + # Evaluate the model only when no gradients are accumulated + if ( + self.config.validation.evaluation_steps != 0 + and (i + 1) % self.config.validation.evaluation_steps == 0 + ): + evaluation_flag = False + + _, metrics_train = self.validator.test( + mode=self.maps_manager.mode, + metrics_module=self.maps_manager.metrics_module, + n_classes=self.maps_manager.n_classes, + network_task=self.maps_manager.network_task, + model=model, + dataloader=train_loader, + criterion=criterion, + amp=self.maps_manager.std_amp, + ) + _, metrics_valid = self.validator.test( + mode=self.maps_manager.mode, + metrics_module=self.maps_manager.metrics_module, + n_classes=self.maps_manager.n_classes, + network_task=self.maps_manager.network_task, + model=model, + dataloader=valid_loader, + criterion=criterion, + amp=self.maps_manager.std_amp, + ) + + model.train() + train_loader.dataset.train() + + if cluster.master: + log_writer.step( + epoch, + i, + metrics_train, + metrics_valid, + len(train_loader), + ) + logger.info( + f"{self.config.data.mode} level training loss is {metrics_train['loss']} " + f"at the end of iteration {i}" + ) + logger.info( + f"{self.config.data.mode} level validation loss is {metrics_valid['loss']} " + f"at the end of iteration {i}" + ) + + profiler.step() + + # If no step has been performed, raise Exception + if step_flag: + raise Exception( + "The model has not been updated once in the epoch. The accumulation step may be too large." + ) + + # If no evaluation has been performed, warn the user + elif evaluation_flag and self.config.validation.evaluation_steps != 0: + logger.warning( + f"Your evaluation steps {self.config.validation.evaluation_steps} are too big " + f"compared to the size of the dataset. " + f"The model is evaluated only once at the end epochs." + ) + + # Update weights one last time if gradients were computed without update + if (i + 1) % self.config.optimization.accumulation_steps != 0: + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad(set_to_none=True) + + # Always test the results and save them once at the end of the epoch + model.zero_grad(set_to_none=True) + logger.debug(f"Last checkpoint at the end of the epoch {epoch}") + + _, metrics_train = self.validator.test( + mode=self.maps_manager.mode, + metrics_module=self.maps_manager.metrics_module, + n_classes=self.maps_manager.n_classes, + network_task=self.maps_manager.network_task, + model=model, + dataloader=train_loader, + criterion=criterion, + amp=self.maps_manager.std_amp, + ) + _, metrics_valid = self.validator.test( + mode=self.maps_manager.mode, + metrics_module=self.maps_manager.metrics_module, + n_classes=self.maps_manager.n_classes, + network_task=self.maps_manager.network_task, + model=model, + dataloader=valid_loader, + criterion=criterion, + amp=self.maps_manager.std_amp, + ) + + model.train() + train_loader.dataset.train() + + self.callback_handler.on_epoch_end( + self.maps_manager.parameters, + metrics_train=metrics_train, + metrics_valid=metrics_valid, + mode=self.config.data.mode, + i=i, + ) + + model_weights = { + "model": model.state_dict(), + "epoch": epoch, + "name": self.config.model.architecture, + } + optimizer_weights = { + "optimizer": model.optim_state_dict(optimizer), + "epoch": epoch, + "name": self.config.model.architecture, + } + + if cluster.master: + # Save checkpoints and best models + best_dict = retain_best.step(metrics_valid) + self.maps_manager._write_weights( + model_weights, + best_dict, + split, + network=network, + save_all_models=self.config.reproducibility.save_all_models, + ) + self.maps_manager._write_weights( + optimizer_weights, + None, + split, + filename="optimizer.pth.tar", + save_all_models=self.config.reproducibility.save_all_models, + ) + dist.barrier() + + if self.config.lr_scheduler.adaptive_learning_rate: + scheduler.step( + metrics_valid["loss"] + ) # Update learning rate based on validation loss + + epoch += 1 + + del model + self.validator._test_loader( + self.maps_manager, + train_loader, + criterion, + "train", + split, + self.config.validation.selection_metrics, + amp=self.maps_manager.std_amp, + network=network, + ) + self.validator._test_loader( + self.maps_manager, + valid_loader, + criterion, + "validation", + split, + self.config.validation.selection_metrics, + amp=self.maps_manager.std_amp, + network=network, + ) + + if save_outputs(self.maps_manager.network_task): + self.validator._compute_output_tensors( + self.maps_manager, + train_loader.dataset, + "train", + split, + self.config.validation.selection_metrics, + nb_images=1, + network=network, + ) + self.validator._compute_output_tensors( + self.maps_manager, + valid_loader.dataset, + "validation", + split, + self.config.validation.selection_metrics, + nb_images=1, + network=network, + ) + + self.callback_handler.on_train_end(parameters=self.maps_manager.parameters) + + def _init_callbacks(self) -> None: + """ + Initializes training callbacks. + """ + from clinicadl.callbacks.callbacks import CallbacksHandler, LoggerCallback + + # if self.callbacks is None: + # self.callbacks = [Callback()] + + self.callback_handler = CallbacksHandler() # callbacks=self.callbacks) + + if self.config.callbacks.emissions_calculator: + from clinicadl.callbacks.callbacks import CodeCarbonTracker + + self.callback_handler.add_callback(CodeCarbonTracker()) + + if self.config.callbacks.track_exp: + from clinicadl.callbacks.callbacks import Tracker + + self.callback_handler.add_callback(Tracker) + + self.callback_handler.add_callback(LoggerCallback()) + # self.callback_handler.add_callback(MetricConsolePrinterCallback()) + + def _init_optimizer( + self, + model: DDP, + split: Optional[int] = None, + resume: bool = False, + ) -> torch.optim.Optimizer: + """ + Initializes the optimizer. + + Parameters + ---------- + model : clinicadl.utils.maps_manager.ddp.DDP + The parallelizer. + split : int (optional, default=None) + The split considered. Should not be None if resume is True, but is + useless when resume is False. + resume : bool (optional, default=False) + If True, uses checkpoint to recover optimizer's old state. + + Returns + ------- + torch.optim.Optimizer + The optimizer. + """ + + optimizer_cls = getattr(torch.optim, self.config.optimizer.optimizer) + parameters = filter(lambda x: x.requires_grad, model.parameters()) + optimizer_kwargs = dict( + lr=self.config.optimizer.learning_rate, + weight_decay=self.config.optimizer.weight_decay, + ) + + optimizer = optimizer_cls(parameters, **optimizer_kwargs) + + if resume: + checkpoint_path = ( + self.maps_manager.maps_path + / f"split-{split}" + / "tmp" + / "optimizer.pth.tar" + ) + checkpoint_state = torch.load( + checkpoint_path, map_location=model.device, weights_only=True + ) + model.load_optim_state_dict(optimizer, checkpoint_state["optimizer"]) + + return optimizer + + def _init_profiler(self) -> torch.profiler.profile: + """ + Initializes the profiler. + + Returns + ------- + torch.profiler.profile + Profiler context manager. + """ + if self.config.optimization.profiler: + # TODO: no more profiler ???? + from clinicadl.utils.cluster.profiler import ( + ProfilerActivity, + profile, + schedule, + tensorboard_trace_handler, + ) + + time = datetime.now().strftime("%H:%M:%S") + filename = [self.maps_manager.maps_path / "profiler" / f"clinicadl_{time}"] + dist.broadcast_object_list(filename, src=0) + profiler = profile( + activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], + schedule=schedule(wait=2, warmup=2, active=30, repeat=1), + on_trace_ready=tensorboard_trace_handler(filename[0]), + profile_memory=True, + record_shapes=False, + with_stack=False, + with_flops=False, + ) + else: + profiler = nullcontext() + profiler.step = lambda *args, **kwargs: None + + return profiler diff --git a/clinicadl/trainer/tasks_utils.py b/clinicadl/trainer/tasks_utils.py index e3946790c..a14bfa4a9 100644 --- a/clinicadl/trainer/tasks_utils.py +++ b/clinicadl/trainer/tasks_utils.py @@ -18,9 +18,9 @@ from torch.utils.data import DataLoader, Sampler, sampler from torch.utils.data.distributed import DistributedSampler -from clinicadl.caps_dataset.data import CapsDataset -from clinicadl.metrics.metric_module import MetricModule -from clinicadl.network.network import Network +from clinicadl.dataset.caps_dataset import CapsDataset +from clinicadl.metrics.old_metrics.metric_module import MetricModule +from clinicadl.networks.old_network.network import Network from clinicadl.trainer.config.train import TrainConfig from clinicadl.utils import cluster from clinicadl.utils.enum import ( @@ -125,7 +125,7 @@ def validate_criterion(criterion_name: str, compatible_losses: List[str]): } if criterion in reconstruction_losses: - from clinicadl.network.vae.vae_utils import ( + from clinicadl.networks.old_network.vae.vae_utils import ( VAEBernoulliLoss, VAEContinuousBernoulliLoss, VAEGaussianLoss, diff --git a/clinicadl/trainer/trainer.py b/clinicadl/trainer/trainer.py index 775ecd2c6..386a7d148 100644 --- a/clinicadl/trainer/trainer.py +++ b/clinicadl/trainer/trainer.py @@ -1,901 +1,20 @@ -from __future__ import annotations # noqa: I001 - - -from contextlib import nullcontext -from datetime import datetime -from logging import getLogger from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union, Callable - -import pandas as pd -import torch -import torch.distributed as dist -from torch.amp.grad_scaler import GradScaler -from torch.amp.autocast_mode import autocast -from torch.utils.data import DataLoader -from torch.utils.data.distributed import DistributedSampler - -from clinicadl.splitter.split_utils import find_finished_splits, find_stopped_splits -from clinicadl.caps_dataset.data import return_dataset -from clinicadl.utils.early_stopping.early_stopping import EarlyStopping -from clinicadl.utils.exceptions import MAPSError -from clinicadl.utils.computational.ddp import DDP -from clinicadl.utils import cluster -from clinicadl.utils.logwriter import LogWriter -from clinicadl.caps_dataset.caps_dataset_utils import read_json -from clinicadl.metrics.metric_module import RetainBest -from clinicadl.utils.seed import pl_worker_init_function, seed_everything -from clinicadl.maps_manager.maps_manager import MapsManager -from clinicadl.utils.seed import get_seed -from clinicadl.utils.enum import Task -from clinicadl.utils.iotools.trainer_utils import ( - create_parameters_dict, - patch_to_read_json, -) -from clinicadl.trainer.tasks_utils import create_training_config -from clinicadl.predictor.predictor import Predictor -from clinicadl.predictor.config import PredictConfig -from clinicadl.splitter.splitter import Splitter -from clinicadl.splitter.config import SplitterConfig -from clinicadl.transforms.config import TransformsConfig -if TYPE_CHECKING: - from clinicadl.callbacks.callbacks import Callback - from clinicadl.trainer.config.train import TrainConfig - -from clinicadl.trainer.tasks_utils import ( - evaluation_metrics, - generate_sampler, - get_criterion, - save_outputs, -) - -logger = getLogger("clinicadl.trainer") +from clinicadl.dataset.caps_dataset import CapsDataset +from clinicadl.experiment_manager.experiment_manager import ExperimentManager +from clinicadl.model.clinicadl_model import ClinicaDLModel +from clinicadl.splitter.kfold import Split class Trainer: - """Temporary Trainer extracted from the MAPSManager.""" - - def __init__( - self, - config: TrainConfig, - ) -> None: - """ - Parameters - ---------- - config : TrainConfig - """ - self.config = config - - self.maps_manager = self._init_maps_manager(config) - predict_config = PredictConfig(**config.get_dict()) - self.validator = Predictor(predict_config) - - # test - splitter_config = SplitterConfig(**self.config.get_dict()) - self.splitter = Splitter(splitter_config) - self._check_args() - - def _init_maps_manager(self, config) -> MapsManager: - # temporary: to match CLI data. TODO : change CLI data - - parameters, maps_path = create_parameters_dict(config) - - if maps_path.is_dir(): - return MapsManager( - maps_path, verbose=None - ) # TODO : precise which parameters in config are useful - else: - # parameters["maps_path"] = maps_path - return MapsManager( - maps_path, parameters, verbose=None - ) # TODO : precise which parameters in config are useful + def __init__(self) -> None: + """TO COMPLETE""" @classmethod - def from_json( - cls, - config_file: str | Path, - maps_path: str | Path, - split: Optional[list[int]] = None, - ) -> Trainer: - """ - Creates a Trainer from a json configuration file. - - Parameters - ---------- - config_file : str | Path - The parameters, stored in a json files. - maps_path : str | Path - The folder where the results of a futur training will be stored. - - Returns - ------- - Trainer - The Trainer object, instantiated with parameters found in config_file. - - Raises - ------ - FileNotFoundError - If config_file doesn't exist. - """ - config_file = Path(config_file) - - if not (config_file).is_file(): - raise FileNotFoundError(f"No file found at {str(config_file)}.") - config_dict = patch_to_read_json(read_json(config_file)) # TODO : remove patch - config_dict["maps_dir"] = maps_path - config_dict["split"] = split if split else () - config_object = create_training_config(config_dict["network_task"])( - **config_dict - ) - return cls(config_object) - - @classmethod - def from_maps(cls, maps_path: str | Path) -> Trainer: - """ - Creates a Trainer from a json configuration file. - - Parameters - ---------- - maps_path : str | Path - The path of the MAPS folder. - - Returns - ------- - Trainer - The Trainer object, instantiated with parameters found in maps_path. - - Raises - ------ - MAPSError - If maps_path folder doesn't exist or there is no maps.json file in it. - """ - maps_path = Path(maps_path) - - if not (maps_path / "maps.json").is_file(): - raise MAPSError( - f"MAPS was not found at {str(maps_path)}." - f"To initiate a new MAPS please give a train_dict." - ) - return cls.from_json(maps_path / "maps.json", maps_path) - - def resume(self) -> None: - """ - Resume a prematurely stopped training. - - Parameters - ---------- - splits : List[int] - The splits that must be resumed. - """ - stopped_splits = set(find_stopped_splits(self.config.maps_manager.maps_dir)) - finished_splits = set(find_finished_splits(self.config.maps_manager.maps_dir)) - # TODO : check these two lines. Why do we need a self.splitter? - - splitter_config = SplitterConfig(**self.config.get_dict()) - self.splitter = Splitter(splitter_config) - - split_iterator = self.splitter.split_iterator() - ### - absent_splits = set(split_iterator) - stopped_splits - finished_splits - - logger.info( - f"Finished splits {finished_splits}\n" - f"Stopped splits {stopped_splits}\n" - f"Absent splits {absent_splits}" - ) - - if len(stopped_splits) == 0 and len(absent_splits) == 0: - raise ValueError( - "Training has been completed on all the splits you passed." - ) - if len(stopped_splits) > 0: - self._resume(list(stopped_splits)) - if len(absent_splits) > 0: - self.train(list(absent_splits), overwrite=True) - - def _check_args(self): - self.config.reproducibility.seed = get_seed(self.config.reproducibility.seed) - # if len(self.config.data.label_code) == 0: - # self.config.data.label_code = self.maps_manager.label_code - # TODO: deal with label_code and replace self.maps_manager.label_code - from clinicadl.trainer.tasks_utils import generate_label_code - - if ( - "label_code" not in self.config.data.model_dump() - or len(self.config.data.label_code) == 0 - or self.config.data.label_code is None - ): # Allows to set custom label code in TOML - train_df = self.splitter[0]["train"] - self.config.data.label_code = generate_label_code( - self.config.network_task, train_df, self.config.data.label - ) - - def train( - self, - split_list: Optional[List[int]] = None, - overwrite: bool = False, - ) -> None: - """ - Performs the training task for a defined list of splits. - - Parameters - ---------- - split_list : Optional[List[int]] (optional, default=None) - List of splits on which the training task is performed. - Default trains all splits of the cross-validation. - overwrite : bool (optional, default=False) - If True, previously trained splits that are going to be trained - are erased. - - Raises - ------ - MAPSError - If splits specified in input already exist and overwrite is False. - """ - - # splitter_config = SplitterConfig(**self.config.get_dict()) - # self.splitter = Splitter(splitter_config) - # self.splitter.check_split_list(self.config.maps_manager.maps_dir, self.config.maps_manager.overwrite) - self.splitter.check_split_list( - self.config.maps_manager.maps_dir, - overwrite, # overwrite change so careful it is not the maps manager overwrite parameters here - ) - for split in self.splitter.split_iterator(): - logger.info(f"Training split {split}") - seed_everything( - self.config.reproducibility.seed, - self.config.reproducibility.deterministic, - self.config.reproducibility.compensation, - ) - - split_df_dict = self.splitter[split] - - if self.config.model.multi_network: - resume, first_network = self.init_first_network(False, split) - for network in range(first_network, self.maps_manager.num_networks): - self._train_single( - split, split_df_dict, network=network, resume=resume - ) - else: - self._train_single(split, split_df_dict, resume=False) - - # def check_split_list(self, split_list, overwrite): - # existing_splits = [] - # splitter_config = SplitterConfig(**self.config.get_dict()) - # self.splitter = Splitter(splitter_config) - # for split in self.splitter.split_iterator(): - # split_path = self.maps_manager.maps_path / f"split-{split}" - # if split_path.is_dir(): - # if overwrite: - # if cluster.master: - # shutil.rmtree(split_path) - # else: - # existing_splits.append(split) - - # if len(existing_splits) > 0: - # raise MAPSError( - # f"Splits {existing_splits} already exist. Please " - # f"specify a list of splits not intersecting the previous list, " - # f"or use overwrite to erase previously trained splits." - # ) - - def _resume( - self, - split_list: Optional[List[int]] = None, - ) -> None: - """ - Resumes the training task for a defined list of splits. - - Parameters - ---------- - split_list : Optional[List[int]] (optional, default=None) - List of splits on which the training task is performed. - If None, the training task is performed on all splits. - - Raises - ------ - MAPSError - If splits specified in input do not exist. - """ - missing_splits = [] - splitter_config = SplitterConfig(**self.config.get_dict()) - self.splitter = Splitter(splitter_config) - for split in self.splitter.split_iterator(): - if not (self.maps_manager.maps_path / f"split-{split}" / "tmp").is_dir(): - missing_splits.append(split) - - if len(missing_splits) > 0: - raise MAPSError( - f"Splits {missing_splits} were not initialized. " - f"Please try train command on these splits and resume only others." - ) - - for split in self.splitter.split_iterator(): - logger.info(f"Training split {split}") - seed_everything( - self.config.reproducibility.seed, - self.config.reproducibility.deterministic, - self.config.reproducibility.compensation, - ) - - split_df_dict = self.splitter[split] - if self.config.model.multi_network: - resume, first_network = self.init_first_network(True, split) - for network in range(first_network, self.maps_manager.num_networks): - self._train_single( - split, split_df_dict, network=network, resume=resume - ) - else: - self._train_single(split, split_df_dict, resume=True) - - def init_first_network(self, resume: bool, split: int): - first_network = 0 - if resume: - training_logs = [ - int(str(network_folder).split("-")[1]) - for network_folder in list( - ( - self.maps_manager.maps_path / f"split-{split}" / "training_logs" - ).iterdir() - ) - ] - first_network = max(training_logs) - if not (self.maps_manager.maps_path / "tmp").is_dir(): - first_network += 1 - resume = False - return resume, first_network - - def get_dataloader( - self, - data_df: pd.DataFrame, - cnn_index: Optional[int] = None, - sampler_option: str = "random", - dp_degree: Optional[int] = None, - rank: Optional[int] = None, - worker_init_fn: Optional[Callable[[int], None]] = None, - shuffle: Optional[bool] = None, - num_replicas: Optional[int] = None, - homemade_sampler: bool = False, - ): - dataset = return_dataset( - input_dir=self.config.data.caps_directory, - data_df=data_df, - preprocessing_dict=self.config.data.preprocessing_dict, - transforms_config=self.config.transforms, - multi_cohort=self.config.data.multi_cohort, - label=self.config.data.label, - label_code=self.config.data.label_code, - cnn_index=cnn_index, - ) - if homemade_sampler: - sampler = generate_sampler( - network_task=self.maps_manager.network_task, - dataset=dataset, - sampler_option=sampler_option, - label_code=self.config.data.label_code, - dp_degree=dp_degree, - rank=rank, - ) - else: - sampler = DistributedSampler( - dataset, - num_replicas=num_replicas, - rank=rank, - shuffle=shuffle, - ) - - train_loader = DataLoader( - dataset=dataset, - batch_size=self.config.dataloader.batch_size, - sampler=sampler, - num_workers=self.config.dataloader.n_proc, - worker_init_fn=worker_init_fn, - shuffle=shuffle, - ) - logger.debug(f"Train loader size is {len(train_loader)}") - - return train_loader - - def _train_single( - self, - split, - split_df_dict: Dict, - network: Optional[int] = None, - resume: bool = False, - ) -> None: - """ - Trains a single CNN for all inputs. - - Parameters - ---------- - split_list : Optional[List[int]] (optional, default=None) - List of splits on which the training task is performed. - If None, performs training on all splits of the cross-validation. - resume : bool (optional, default=False) - If True, the job is resumed from checkpoint. - """ - - logger.debug("Loading training data...") - - train_loader = self.get_dataloader( - data_df=split_df_dict["train"], - cnn_index=network, - sampler_option=self.config.dataloader.sampler, - dp_degree=cluster.world_size, # type: ignore - rank=cluster.rank, # type: ignore - worker_init_fn=pl_worker_init_function, - homemade_sampler=True, - ) - - logger.debug(f"Train loader size is {len(train_loader)}") - logger.debug("Loading validation data...") - - valid_loader = self.get_dataloader( - data_df=split_df_dict["validation"], - cnn_index=network, - num_replicas=cluster.world_size, # type: ignore - rank=cluster.rank, # type: ignore - shuffle=False, - homemade_sampler=False, - ) - - logger.debug(f"Validation loader size is {len(valid_loader)}") - from clinicadl.callbacks.callbacks import CodeCarbonTracker - - self._train( - train_loader, - valid_loader, - split, - resume=resume, - callbacks=[CodeCarbonTracker], - network=network, - ) - - if network is not None: - resume = False - - if cluster.master: - self.validator._ensemble_prediction( - self.maps_manager, - "train", - split, - self.config.validation.selection_metrics, - ) - self.validator._ensemble_prediction( - self.maps_manager, - "validation", - split, - self.config.validation.selection_metrics, - ) - - self.maps_manager._erase_tmp(split) - - def _train( - self, - train_loader: DataLoader, - valid_loader: DataLoader, - split: int, - network: Optional[int] = None, - resume: bool = False, - callbacks: list[Callback] = [], - ): - """ - Core function shared by train and resume. - - Parameters - ---------- - train_loader : torch.utils.data.DataLoader - DataLoader wrapping the training set. - valid_loader : torch.utils.data.DataLoader - DataLoader wrapping the validation set. - split : int - Index of the split trained. - network : int (optional, default=None) - Index of the network trained (used in multi-network setting only). - resume : bool (optional, default=False) - If True the job is resumed from the checkpoint. - callbacks : List[Callback] (optional, default=[]) - List of callbacks to call during training. - - Raises - ------ - Exception - _description_ - """ - self._init_callbacks() - model, beginning_epoch = self.maps_manager._init_model( - split=split, - resume=resume, - transfer_path=self.config.transfer_learning.transfer_path, - transfer_selection=self.config.transfer_learning.transfer_selection_metric, - nb_unfrozen_layer=self.config.transfer_learning.nb_unfrozen_layer, - ) - model = DDP( - model, - fsdp=self.config.computational.fully_sharded_data_parallel, - amp=self.config.computational.amp, - ) - criterion = get_criterion( - self.maps_manager.network_task, self.config.model.loss - ) - - optimizer = self._init_optimizer(model, split=split, resume=resume) - self.callback_handler.on_train_begin( - self.maps_manager.parameters, - criterion=criterion, - optimizer=optimizer, - split=split, - maps_path=self.maps_manager.maps_path, - ) - - model.train() - train_loader.dataset.train() - - early_stopping = EarlyStopping( - "min", - min_delta=self.config.early_stopping.tolerance, - patience=self.config.early_stopping.patience, - ) - metrics_valid = {"loss": None} - - if cluster.master: - log_writer = LogWriter( - self.maps_manager.maps_path, - evaluation_metrics(self.maps_manager.network_task) + ["loss"], - split, - resume=resume, - beginning_epoch=beginning_epoch, - network=network, - ) - # retain_best = RetainBest( - # selection_metrics=list(self.config.validation.selection_metrics) - # ) ??? - - epoch = beginning_epoch - - retain_best = RetainBest( - selection_metrics=list(self.config.validation.selection_metrics) - ) - - scaler = GradScaler("cuda", enabled=self.config.computational.amp) - profiler = self._init_profiler() - - if self.config.callbacks.track_exp == "wandb": - from clinicadl.callbacks.tracking_exp import WandB_handler - - if self.config.lr_scheduler.adaptive_learning_rate: - from torch.optim.lr_scheduler import ReduceLROnPlateau - - # Initialize the ReduceLROnPlateau scheduler - scheduler = ReduceLROnPlateau(optimizer, mode="min", factor=0.1) - - while epoch < self.config.optimization.epochs and not early_stopping.step( - metrics_valid["loss"] - ): - # self.callback_handler.on_epoch_begin(self.parameters, epoch = epoch) - - if isinstance(train_loader.sampler, DistributedSampler): - # It should always be true for a random sampler. But just in case - # we get a WeightedRandomSampler or a forgotten RandomSampler, - # we do not want to execute this line. - train_loader.sampler.set_epoch(epoch) - - model.zero_grad(set_to_none=True) - evaluation_flag, step_flag = True, True - - with profiler: - for i, data in enumerate(train_loader): - update: bool = ( - i + 1 - ) % self.config.optimization.accumulation_steps == 0 - sync = nullcontext() if update else model.no_sync() - with sync: - with autocast("cuda", enabled=self.maps_manager.std_amp): - _, loss_dict = model(data, criterion) - logger.debug(f"Train loss dictionary {loss_dict}") - loss = loss_dict["loss"] - scaler.scale(loss).backward() - - if update: - step_flag = False - scaler.step(optimizer) - scaler.update() - optimizer.zero_grad(set_to_none=True) - - del loss - - # Evaluate the model only when no gradients are accumulated - if ( - self.config.validation.evaluation_steps != 0 - and (i + 1) % self.config.validation.evaluation_steps == 0 - ): - evaluation_flag = False - - _, metrics_train = self.validator.test( - mode=self.maps_manager.mode, - metrics_module=self.maps_manager.metrics_module, - n_classes=self.maps_manager.n_classes, - network_task=self.maps_manager.network_task, - model=model, - dataloader=train_loader, - criterion=criterion, - amp=self.maps_manager.std_amp, - ) - _, metrics_valid = self.validator.test( - mode=self.maps_manager.mode, - metrics_module=self.maps_manager.metrics_module, - n_classes=self.maps_manager.n_classes, - network_task=self.maps_manager.network_task, - model=model, - dataloader=valid_loader, - criterion=criterion, - amp=self.maps_manager.std_amp, - ) - - model.train() - train_loader.dataset.train() - - if cluster.master: - log_writer.step( - epoch, - i, - metrics_train, - metrics_valid, - len(train_loader), - ) - logger.info( - f"{self.config.data.mode} level training loss is {metrics_train['loss']} " - f"at the end of iteration {i}" - ) - logger.info( - f"{self.config.data.mode} level validation loss is {metrics_valid['loss']} " - f"at the end of iteration {i}" - ) - - profiler.step() - - # If no step has been performed, raise Exception - if step_flag: - raise Exception( - "The model has not been updated once in the epoch. The accumulation step may be too large." - ) - - # If no evaluation has been performed, warn the user - elif evaluation_flag and self.config.validation.evaluation_steps != 0: - logger.warning( - f"Your evaluation steps {self.config.validation.evaluation_steps} are too big " - f"compared to the size of the dataset. " - f"The model is evaluated only once at the end epochs." - ) - - # Update weights one last time if gradients were computed without update - if (i + 1) % self.config.optimization.accumulation_steps != 0: - scaler.step(optimizer) - scaler.update() - optimizer.zero_grad(set_to_none=True) - - # Always test the results and save them once at the end of the epoch - model.zero_grad(set_to_none=True) - logger.debug(f"Last checkpoint at the end of the epoch {epoch}") - - _, metrics_train = self.validator.test( - mode=self.maps_manager.mode, - metrics_module=self.maps_manager.metrics_module, - n_classes=self.maps_manager.n_classes, - network_task=self.maps_manager.network_task, - model=model, - dataloader=train_loader, - criterion=criterion, - amp=self.maps_manager.std_amp, - ) - _, metrics_valid = self.validator.test( - mode=self.maps_manager.mode, - metrics_module=self.maps_manager.metrics_module, - n_classes=self.maps_manager.n_classes, - network_task=self.maps_manager.network_task, - model=model, - dataloader=valid_loader, - criterion=criterion, - amp=self.maps_manager.std_amp, - ) - - model.train() - train_loader.dataset.train() - - self.callback_handler.on_epoch_end( - self.maps_manager.parameters, - metrics_train=metrics_train, - metrics_valid=metrics_valid, - mode=self.config.data.mode, - i=i, - ) - - model_weights = { - "model": model.state_dict(), - "epoch": epoch, - "name": self.config.model.architecture, - } - optimizer_weights = { - "optimizer": model.optim_state_dict(optimizer), - "epoch": epoch, - "name": self.config.model.architecture, - } - - if cluster.master: - # Save checkpoints and best models - best_dict = retain_best.step(metrics_valid) - self.maps_manager._write_weights( - model_weights, - best_dict, - split, - network=network, - save_all_models=self.config.reproducibility.save_all_models, - ) - self.maps_manager._write_weights( - optimizer_weights, - None, - split, - filename="optimizer.pth.tar", - save_all_models=self.config.reproducibility.save_all_models, - ) - dist.barrier() - - if self.config.lr_scheduler.adaptive_learning_rate: - scheduler.step( - metrics_valid["loss"] - ) # Update learning rate based on validation loss - - epoch += 1 - - del model - self.validator._test_loader( - self.maps_manager, - train_loader, - criterion, - "train", - split, - self.config.validation.selection_metrics, - amp=self.maps_manager.std_amp, - network=network, - ) - self.validator._test_loader( - self.maps_manager, - valid_loader, - criterion, - "validation", - split, - self.config.validation.selection_metrics, - amp=self.maps_manager.std_amp, - network=network, - ) - - if save_outputs(self.maps_manager.network_task): - self.validator._compute_output_tensors( - self.maps_manager, - train_loader.dataset, - "train", - split, - self.config.validation.selection_metrics, - nb_images=1, - network=network, - ) - self.validator._compute_output_tensors( - self.maps_manager, - valid_loader.dataset, - "validation", - split, - self.config.validation.selection_metrics, - nb_images=1, - network=network, - ) - - self.callback_handler.on_train_end(parameters=self.maps_manager.parameters) - - def _init_callbacks(self) -> None: - """ - Initializes training callbacks. - """ - from clinicadl.callbacks.callbacks import CallbacksHandler, LoggerCallback - - # if self.callbacks is None: - # self.callbacks = [Callback()] - - self.callback_handler = CallbacksHandler() # callbacks=self.callbacks) - - if self.config.callbacks.emissions_calculator: - from clinicadl.callbacks.callbacks import CodeCarbonTracker - - self.callback_handler.add_callback(CodeCarbonTracker()) - - if self.config.callbacks.track_exp: - from clinicadl.callbacks.callbacks import Tracker - - self.callback_handler.add_callback(Tracker) - - self.callback_handler.add_callback(LoggerCallback()) - # self.callback_handler.add_callback(MetricConsolePrinterCallback()) - - def _init_optimizer( - self, - model: DDP, - split: Optional[int] = None, - resume: bool = False, - ) -> torch.optim.Optimizer: - """ - Initializes the optimizer. - - Parameters - ---------- - model : clinicadl.utils.maps_manager.ddp.DDP - The parallelizer. - split : int (optional, default=None) - The split considered. Should not be None if resume is True, but is - useless when resume is False. - resume : bool (optional, default=False) - If True, uses checkpoint to recover optimizer's old state. - - Returns - ------- - torch.optim.Optimizer - The optimizer. - """ - - optimizer_cls = getattr(torch.optim, self.config.optimizer.optimizer) - parameters = filter(lambda x: x.requires_grad, model.parameters()) - optimizer_kwargs = dict( - lr=self.config.optimizer.learning_rate, - weight_decay=self.config.optimizer.weight_decay, - ) - - optimizer = optimizer_cls(parameters, **optimizer_kwargs) - - if resume: - checkpoint_path = ( - self.maps_manager.maps_path - / f"split-{split}" - / "tmp" - / "optimizer.pth.tar" - ) - checkpoint_state = torch.load( - checkpoint_path, map_location=model.device, weights_only=True - ) - model.load_optim_state_dict(optimizer, checkpoint_state["optimizer"]) - - return optimizer - - def _init_profiler(self) -> torch.profiler.profile: - """ - Initializes the profiler. - - Returns - ------- - torch.profiler.profile - Profiler context manager. - """ - if self.config.optimization.profiler: - # TODO: no more profiler ???? - from clinicadl.utils.cluster.profiler import ( - ProfilerActivity, - profile, - schedule, - tensorboard_trace_handler, - ) - - time = datetime.now().strftime("%H:%M:%S") - filename = [self.maps_manager.maps_path / "profiler" / f"clinicadl_{time}"] - dist.broadcast_object_list(filename, src=0) - profiler = profile( - activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], - schedule=schedule(wait=2, warmup=2, active=30, repeat=1), - on_trace_ready=tensorboard_trace_handler(filename[0]), - profile_memory=True, - record_shapes=False, - with_stack=False, - with_flops=False, - ) - else: - profiler = nullcontext() - profiler.step = lambda *args, **kwargs: None + def from_json(cls, config_file: Path, manager: ExperimentManager) -> Trainer: + """TO COMPLETE""" + return cls() - return profiler + def train(self, model: ClinicaDLModel, split: Split): + """TO COMPLETE""" + pass diff --git a/clinicadl/transforms/config.py b/clinicadl/transforms/config.py index 91c80c0de..79f4db7c7 100644 --- a/clinicadl/transforms/config.py +++ b/clinicadl/transforms/config.py @@ -4,7 +4,7 @@ import torchvision.transforms as torch_transforms from pydantic import BaseModel, ConfigDict, field_validator -from clinicadl.transforms import transforms +from clinicadl.transforms import factory from clinicadl.utils.enum import ( SizeReductionFactor, Transform, @@ -47,16 +47,16 @@ def get_transforms( transforms to apply in train and evaluation mode / transforms to apply in evaluation mode only. """ augmentation_dict = { - "Noise": transforms.RandomNoising(sigma=0.1), + "Noise": factory.RandomNoising(sigma=0.1), "Erasing": torch_transforms.RandomErasing(), - "CropPad": transforms.RandomCropPad(10), - "Smoothing": transforms.RandomSmoothing(), - "Motion": transforms.RandomMotion((2, 4), (2, 4), 2), - "Ghosting": transforms.RandomGhosting((4, 10)), - "Spike": transforms.RandomSpike(1, (1, 3)), - "BiasField": transforms.RandomBiasField(0.5), - "RandomBlur": transforms.RandomBlur((0, 2)), - "RandomSwap": transforms.RandomSwap(15, 100), + "CropPad": factory.RandomCropPad(10), + "Smoothing": factory.RandomSmoothing(), + "Motion": factory.RandomMotion((2, 4), (2, 4), 2), + "Ghosting": factory.RandomGhosting((4, 10)), + "Spike": factory.RandomSpike(1, (1, 3)), + "BiasField": factory.RandomBiasField(0.5), + "RandomBlur": factory.RandomBlur((0, 2)), + "RandomSwap": factory.RandomSwap(15, 100), "None": None, } @@ -71,12 +71,12 @@ def get_transforms( ] ) - transformations_list.append(transforms.NanRemoval()) + transformations_list.append(factory.NanRemoval()) if self.normalize: - transformations_list.append(transforms.MinMaxNormalization()) + transformations_list.append(factory.MinMaxNormalization()) if self.size_reduction: transformations_list.append( - transforms.SizeReduction(self.size_reduction_factor) + factory.SizeReduction(self.size_reduction_factor) ) all_transformations = torch_transforms.Compose(transformations_list) diff --git a/clinicadl/transforms/factory.py b/clinicadl/transforms/factory.py new file mode 100644 index 000000000..7b6de09a7 --- /dev/null +++ b/clinicadl/transforms/factory.py @@ -0,0 +1,228 @@ +# coding: utf8 + +from logging import getLogger + +import numpy as np +import torch +import torchio as tio + +from clinicadl.utils.exceptions import ClinicaDLConfigurationError + +logger = getLogger("clinicadl") + +################################## +# Transformations +################################## + + +class RandomNoising(object): + """Applies a random zoom to a tensor""" + + def __init__(self, sigma=0.1): + self.sigma = sigma + + def __call__(self, image): + import random + + sigma = random.uniform(0, self.sigma) + dist = torch.distributions.normal.Normal(0, sigma) + return image + dist.sample(image.shape) + + +class RandomSmoothing(object): + """Applies a random zoom to a tensor""" + + def __init__(self, sigma=1): + self.sigma = sigma + + def __call__(self, image): + import random + + from scipy.ndimage import gaussian_filter + + sigma = random.uniform(0, self.sigma) + image = gaussian_filter(image, sigma) # smoothing of data + image = torch.from_numpy(image).float() + return image + + +class RandomCropPad(object): + def __init__(self, length): + self.length = length + + def __call__(self, image): + dimensions = len(image.shape) - 1 + crop = np.random.randint(-self.length, self.length, dimensions) + if dimensions == 2: + output = torch.nn.functional.pad( + image, (-crop[0], crop[0], -crop[1], crop[1]) + ) + elif dimensions == 3: + output = torch.nn.functional.pad( + image, (-crop[0], crop[0], -crop[1], crop[1], -crop[2], crop[2]) + ) + else: + raise ValueError( + f"RandomCropPad is only available for 2D or 3D data. Image is {dimensions}D" + ) + return output + + +class GaussianSmoothing(object): + def __init__(self, sigma): + self.sigma = sigma + + def __call__(self, sample): + from scipy.ndimage.filters import gaussian_filter + + image = sample["image"] + np.nan_to_num(image, copy=False) + smoothed_image = gaussian_filter(image, sigma=self.sigma) + sample["image"] = smoothed_image + + return sample + + +class RandomMotion(object): + """Applies a Random Motion""" + + def __init__(self, translation, rotation, num_transforms): + self.rotation = rotation + self.translation = translation + self.num_transforms = num_transforms + + def __call__(self, image): + motion = tio.RandomMotion( + degrees=self.rotation, + translation=self.translation, + num_transforms=self.num_transforms, + ) + image = motion(image) + + return image + + +class RandomGhosting(object): + """Applies a Random Ghosting""" + + def __init__(self, num_ghosts): + self.num_ghosts = num_ghosts + + def __call__(self, image): + ghost = tio.RandomGhosting(num_ghosts=self.num_ghosts) + image = ghost(image) + + return image + + +class RandomSpike(object): + """Applies a Random Spike""" + + def __init__(self, num_spikes, intensity): + self.num_spikes = num_spikes + self.intensity = intensity + + def __call__(self, image): + spike = tio.RandomSpike( + num_spikes=self.num_spikes, + intensity=self.intensity, + ) + image = spike(image) + + return image + + +class RandomBiasField(object): + """Applies a Random Bias Field""" + + def __init__(self, coefficients): + self.coefficients = coefficients + + def __call__(self, image): + bias_field = tio.RandomBiasField(coefficients=self.coefficients) + image = bias_field(image) + + return image + + +class RandomBlur(object): + """Applies a Random Blur""" + + def __init__(self, std): + self.std = std + + def __call__(self, image): + blur = tio.RandomBlur(std=self.std) + image = blur(image) + + return image + + +class RandomSwap(object): + """Applies a Random Swap""" + + def __init__(self, patch_size, num_iterations): + self.patch_size = patch_size + self.num_iterations = num_iterations + + def __call__(self, image): + swap = tio.RandomSwap( + patch_size=self.patch_size, num_iterations=self.num_iterations + ) + image = swap(image) + + return image + + +class ToTensor(object): + """Convert image type to Tensor and diagnosis to diagnosis code""" + + def __call__(self, image): + np.nan_to_num(image, copy=False) + image = image.astype(float) + + return torch.from_numpy(image[np.newaxis, :]).float() + + +class MinMaxNormalization(object): + """Normalizes a tensor between 0 and 1""" + + def __call__(self, image): + return (image - image.min()) / (image.max() - image.min()) + + +class NanRemoval(object): + def __init__(self): + self.nan_detected = False # Avoid warning each time new data is seen + + def __call__(self, image): + if torch.isnan(image).any().item(): + if not self.nan_detected: + logger.warning( + "NaN values were found in your images and will be removed." + ) + self.nan_detected = True + return torch.nan_to_num(image) + else: + return image + + +class SizeReduction(object): + """Reshape the input tensor to be of size [80, 96, 80]""" + + def __init__(self, size_reduction_factor=2) -> None: + self.size_reduction_factor = size_reduction_factor + + def __call__(self, image): + if self.size_reduction_factor == 2: + return image[:, 4:164:2, 8:200:2, 8:168:2] + elif self.size_reduction_factor == 3: + return image[:, 0:168:3, 8:200:3, 4:172:3] + elif self.size_reduction_factor == 4: + return image[:, 4:164:4, 8:200:4, 8:168:4] + elif self.size_reduction_factor == 5: + return image[:, 4:164:5, 0:200:5, 8:168:5] + else: + raise ClinicaDLConfigurationError( + "size_reduction_factor must be 2, 3, 4 or 5." + ) diff --git a/clinicadl/transforms/transforms.py b/clinicadl/transforms/transforms.py index 7b6de09a7..be18c382a 100644 --- a/clinicadl/transforms/transforms.py +++ b/clinicadl/transforms/transforms.py @@ -1,228 +1,14 @@ -# coding: utf8 +from typing import List -from logging import getLogger +import torchio -import numpy as np -import torch -import torchio as tio -from clinicadl.utils.exceptions import ClinicaDLConfigurationError - -logger = getLogger("clinicadl") - -################################## -# Transformations -################################## - - -class RandomNoising(object): - """Applies a random zoom to a tensor""" - - def __init__(self, sigma=0.1): - self.sigma = sigma - - def __call__(self, image): - import random - - sigma = random.uniform(0, self.sigma) - dist = torch.distributions.normal.Normal(0, sigma) - return image + dist.sample(image.shape) - - -class RandomSmoothing(object): - """Applies a random zoom to a tensor""" - - def __init__(self, sigma=1): - self.sigma = sigma - - def __call__(self, image): - import random - - from scipy.ndimage import gaussian_filter - - sigma = random.uniform(0, self.sigma) - image = gaussian_filter(image, sigma) # smoothing of data - image = torch.from_numpy(image).float() - return image - - -class RandomCropPad(object): - def __init__(self, length): - self.length = length - - def __call__(self, image): - dimensions = len(image.shape) - 1 - crop = np.random.randint(-self.length, self.length, dimensions) - if dimensions == 2: - output = torch.nn.functional.pad( - image, (-crop[0], crop[0], -crop[1], crop[1]) - ) - elif dimensions == 3: - output = torch.nn.functional.pad( - image, (-crop[0], crop[0], -crop[1], crop[1], -crop[2], crop[2]) - ) - else: - raise ValueError( - f"RandomCropPad is only available for 2D or 3D data. Image is {dimensions}D" - ) - return output - - -class GaussianSmoothing(object): - def __init__(self, sigma): - self.sigma = sigma - - def __call__(self, sample): - from scipy.ndimage.filters import gaussian_filter - - image = sample["image"] - np.nan_to_num(image, copy=False) - smoothed_image = gaussian_filter(image, sigma=self.sigma) - sample["image"] = smoothed_image - - return sample - - -class RandomMotion(object): - """Applies a Random Motion""" - - def __init__(self, translation, rotation, num_transforms): - self.rotation = rotation - self.translation = translation - self.num_transforms = num_transforms - - def __call__(self, image): - motion = tio.RandomMotion( - degrees=self.rotation, - translation=self.translation, - num_transforms=self.num_transforms, - ) - image = motion(image) - - return image - - -class RandomGhosting(object): - """Applies a Random Ghosting""" - - def __init__(self, num_ghosts): - self.num_ghosts = num_ghosts - - def __call__(self, image): - ghost = tio.RandomGhosting(num_ghosts=self.num_ghosts) - image = ghost(image) - - return image - - -class RandomSpike(object): - """Applies a Random Spike""" - - def __init__(self, num_spikes, intensity): - self.num_spikes = num_spikes - self.intensity = intensity - - def __call__(self, image): - spike = tio.RandomSpike( - num_spikes=self.num_spikes, - intensity=self.intensity, - ) - image = spike(image) - - return image - - -class RandomBiasField(object): - """Applies a Random Bias Field""" - - def __init__(self, coefficients): - self.coefficients = coefficients - - def __call__(self, image): - bias_field = tio.RandomBiasField(coefficients=self.coefficients) - image = bias_field(image) - - return image - - -class RandomBlur(object): - """Applies a Random Blur""" - - def __init__(self, std): - self.std = std - - def __call__(self, image): - blur = tio.RandomBlur(std=self.std) - image = blur(image) - - return image - - -class RandomSwap(object): - """Applies a Random Swap""" - - def __init__(self, patch_size, num_iterations): - self.patch_size = patch_size - self.num_iterations = num_iterations - - def __call__(self, image): - swap = tio.RandomSwap( - patch_size=self.patch_size, num_iterations=self.num_iterations - ) - image = swap(image) - - return image - - -class ToTensor(object): - """Convert image type to Tensor and diagnosis to diagnosis code""" - - def __call__(self, image): - np.nan_to_num(image, copy=False) - image = image.astype(float) - - return torch.from_numpy(image[np.newaxis, :]).float() - - -class MinMaxNormalization(object): - """Normalizes a tensor between 0 and 1""" - - def __call__(self, image): - return (image - image.min()) / (image.max() - image.min()) - - -class NanRemoval(object): - def __init__(self): - self.nan_detected = False # Avoid warning each time new data is seen - - def __call__(self, image): - if torch.isnan(image).any().item(): - if not self.nan_detected: - logger.warning( - "NaN values were found in your images and will be removed." - ) - self.nan_detected = True - return torch.nan_to_num(image) - else: - return image - - -class SizeReduction(object): - """Reshape the input tensor to be of size [80, 96, 80]""" - - def __init__(self, size_reduction_factor=2) -> None: - self.size_reduction_factor = size_reduction_factor - - def __call__(self, image): - if self.size_reduction_factor == 2: - return image[:, 4:164:2, 8:200:2, 8:168:2] - elif self.size_reduction_factor == 3: - return image[:, 0:168:3, 8:200:3, 4:172:3] - elif self.size_reduction_factor == 4: - return image[:, 4:164:4, 8:200:4, 8:168:4] - elif self.size_reduction_factor == 5: - return image[:, 4:164:5, 0:200:5, 8:168:5] - else: - raise ClinicaDLConfigurationError( - "size_reduction_factor must be 2, 3, 4 or 5." - ) +class Transforms: + def __init__( + self, + data_augmentation=List[torchio], + image_transforms=List[torchio], + object_transforms=List[torchio], + ) -> None: + """TO COMPLETE""" + self.data_augmentation = data_augmentation diff --git a/clinicadl/utils/iotools/train_utils.py b/clinicadl/utils/iotools/train_utils.py index 71595811d..21fb160d5 100644 --- a/clinicadl/utils/iotools/train_utils.py +++ b/clinicadl/utils/iotools/train_utils.py @@ -90,7 +90,7 @@ def get_model_list(architecture=None, input_size=None, model_layers=False): """ from inspect import getmembers, isclass - import clinicadl.network as network_package + import clinicadl.networks.old_network as network_package if not architecture: print("The list of currently available models is:") @@ -220,7 +220,7 @@ def merge_cli_and_maps_json_options(maps_json: Path, **kwargs) -> Dict[str, Any] Dict[str, Any] A dictionary with training options. """ - from clinicadl.caps_dataset.caps_dataset_utils import read_json + from clinicadl.dataset.caps_dataset_utils import read_json options = read_json(maps_json) for arg in kwargs: @@ -253,7 +253,7 @@ def merge_options_and_maps_json_options(maps_json: Path, **kwargs) -> Dict[str, Dict[str, Any] A dictionary with training options. """ - from clinicadl.caps_dataset.caps_dataset_utils import read_json + from clinicadl.dataset.caps_dataset_utils import read_json options = read_json(maps_json) for arg in kwargs: diff --git a/clinicadl/utils/meta_maps/getter.py b/clinicadl/utils/meta_maps/getter.py index 1fa524950..4698c34ab 100644 --- a/clinicadl/utils/meta_maps/getter.py +++ b/clinicadl/utils/meta_maps/getter.py @@ -6,8 +6,8 @@ import pandas as pd -from clinicadl.maps_manager.maps_manager import MapsManager -from clinicadl.metrics.utils import find_selection_metrics, get_metrics +from clinicadl.experiment_manager.maps_manager import MapsManager +from clinicadl.metrics.old_metrics.utils import find_selection_metrics, get_metrics from clinicadl.splitter.split_utils import find_splits from clinicadl.utils.exceptions import MAPSError diff --git a/tests/conftest.py b/tests/conftest.py deleted file mode 100644 index e5a4a7302..000000000 --- a/tests/conftest.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf8 - -""" -This file contains a set of functional tests designed to check the correct execution of the pipeline and the -different functions available in ClinicaDL -""" - -import pytest - - -def pytest_addoption(parser): - parser.addoption( - "--input_data_directory", - action="store", - help="Directory for (only-read) inputs for tests", - ) - parser.addoption( - "--no-gpu", - action="store_true", - help="""To run tests on cpu. Default is False. - To use carefully, only to run tests locally. Should not be used in final CI tests. - Concretely, the tests won't fail if gpu option is false in the output MAPS whereas - it is true in the reference MAPS.""", - ) - parser.addoption( - "--adapt-base-dir", - action="store_true", - help="""To virtually change the base directory in the paths stored in the MAPS of the CI data. - Default is False. - To use carefully, only to run tests locally. Should not be used in final CI tests. - Concretely, the tests won't fail if only the base directories differ in the paths stored - in the output and reference MAPS.""", - ) - - -@pytest.fixture -def cmdopt(request): - config_param = {} - config_param["input"] = request.config.getoption("--input_data_directory") - config_param["no-gpu"] = request.config.getoption("--no-gpu") - config_param["adapt-base-dir"] = request.config.getoption("--adapt-base-dir") - return config_param diff --git a/tests/test_cli.py b/tests/test_cli.py deleted file mode 100644 index 687592bec..000000000 --- a/tests/test_cli.py +++ /dev/null @@ -1,200 +0,0 @@ -# coding: utf8 - -import pytest -from click.testing import CliRunner - -from clinicadl.cmdline import cli -from clinicadl.utils.enum import SliceDirection - - -# Test to ensure that the help string, at the command line, is invoked without errors -# Test for the first level at the command line -@pytest.fixture( - params=[ - "prepare-data", - "generate", - "interpret", - "predict", - "quality-check", - "random-search", - "train", - "tsvtools", - ] -) -def cli_args_first_lv(request): - task = request.param - return task - - -def test_first_lv(cli_args_first_lv): - runner = CliRunner() - task = cli_args_first_lv - print(f"Testing input cli {task}") - result = runner.invoke(cli, f"{task} -h") - assert result.exit_code == 0 - - -# Test for prepare-data cli, second level -@pytest.fixture( - params=[ - "image", - "slice", - "patch", - "roi", - ] -) -def prepare_data_cli_arg1(request): - return request.param - - -@pytest.fixture( - params=[ - "t1-linear", - "pet-linear", - "custom", - ] -) -def prepare_data_cli_arg2(request): - return request.param - - -def test_second_lv_prepare_data(prepare_data_cli_arg1, prepare_data_cli_arg2): - runner = CliRunner() - arg1 = prepare_data_cli_arg1 - arg2 = prepare_data_cli_arg2 - print(f"Testing input prepare_data cli {arg1} {arg2}") - result = runner.invoke(cli, f"prepare-data {arg1} {arg2} -h") - assert result.exit_code == 0 - - -# Test for the generate cli, second level -@pytest.fixture( - params=[ - "shepplogan", - "random", - "trivial", - ] -) -def generate_cli_arg1(request): - return request.param - - -def test_second_lv_generate(generate_cli_arg1): - runner = CliRunner() - arg1 = generate_cli_arg1 - print(f"Testing input generate cli {arg1}") - result = runner.invoke(cli, f"generate {arg1} -h") - assert result.exit_code == 0 - - -# Test for the interpret cli, second level -@pytest.fixture( - params=[ - "", - ] -) -def interpret_cli_arg1(request): - return request.param - - -def test_second_lv_interpret(interpret_cli_arg1): - runner = CliRunner() - cli_input = interpret_cli_arg1 - print(f"Testing input generate cli {cli_input}") - result = runner.invoke(cli, f"interpret {cli_input} -h") - assert result.exit_code == 0 - - -# Test for the predict cli, second level -@pytest.fixture( - params=[ - "", - ] -) -def predict_cli_arg1(request): - return request.param - - -def test_second_lv_predict(predict_cli_arg1): - runner = CliRunner() - cli_input = predict_cli_arg1 - print(f"Testing input predict cli {cli_input}") - result = runner.invoke(cli, f"predict {cli_input} -h") - assert result.exit_code == 0 - - -# Test for the train cli, second level -@pytest.fixture( - params=[ - "classification", - "regression", - "reconstruction", - "from_json", - "resume", - "list_models", - ] -) -def train_cli_arg1(request): - return request.param - - -def test_second_lv_train(train_cli_arg1): - runner = CliRunner() - cli_input = train_cli_arg1 - print(f"Testing input train cli {cli_input}") - result = runner.invoke(cli, f"train {cli_input} -h") - assert result.exit_code == 0 - - -# Test for the random-search cli, second level -@pytest.fixture(params=["generate", "analysis"]) -def rs_cli_arg1(request): - task = request.param - return task - - -def test_second_lv_random_search(rs_cli_arg1): - runner = CliRunner() - arg1 = rs_cli_arg1 - print(f"Testing input random-search cli {arg1}") - result = runner.invoke(cli, f"random-search {arg1} -h") - assert result.exit_code == 0 - - -# Test for the quality-check cli, second level -@pytest.fixture(params=["t1-linear", "t1-volume"]) -def qc_cli_arg1(request): - task = request.param - return task - - -def test_second_lv_quality_check(qc_cli_arg1): - runner = CliRunner() - arg1 = qc_cli_arg1 - print(f"Testing input quality-check cli {arg1}") - result = runner.invoke(cli, f"quality-check {arg1} -h") - assert result.exit_code == 0 - - -# Test for the tsvtool cli, second level -@pytest.fixture( - params=[ - "analysis", - "get-labels", - "kfold", - "split", - "prepare-experiment", - "get-progression", - "get-metadata", - ] -) -def tsvtool_cli_arg1(request): - return request.param - - -def test_second_lv_tsvtool(tsvtool_cli_arg1): - runner = CliRunner() - arg1 = tsvtool_cli_arg1 - print(f"Testing input tsvtools cli {arg1}") - result = runner.invoke(cli, f"tsvtools {arg1} -h") - assert result.exit_code == 0 diff --git a/tests/test_generate.py b/tests/test_generate.py deleted file mode 100644 index 9fc03535b..000000000 --- a/tests/test_generate.py +++ /dev/null @@ -1,119 +0,0 @@ -# coding: utf8 - -import os -from pathlib import Path - -import pytest - -from tests.testing_tools import clean_folder, compare_folders - - -@pytest.fixture( - params=[ - "random_example", - "trivial_example", - "shepplogan_example", - "hypometabolic_example", - "artifacts_example", - ] -) -def test_name(request): - return request.param - - -def test_generate(cmdopt, tmp_path, test_name): - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "generate" / "in" - ref_dir = base_dir / "generate" / "ref" - tmp_out_dir = tmp_path / "generate" / "out" - tmp_out_dir.mkdir(parents=True) - - clean_folder(tmp_out_dir, recreate=True) - - data_caps_pet = str(input_dir / "caps_pet") - data_caps_folder = str(input_dir / "caps") - - if test_name == "trivial_example": - output_folder = tmp_out_dir / test_name - test_input = [ - "generate", - "trivial", - data_caps_folder, - str(output_folder), - "--n_subjects", - "4", - "--preprocessing", - "t1-linear", - ] - elif test_name == "hypometabolic_example": - output_folder = tmp_out_dir / test_name - test_input = [ - "generate", - "hypometabolic", - data_caps_pet, - str(output_folder), - "--n_subjects", - "2", - "--pathology", - "ad", - "--anomaly_degree", - "50", - "--sigma", - "5", - ] - - elif test_name == "random_example": - output_folder = tmp_out_dir / test_name - test_input = [ - "generate", - "random", - data_caps_folder, - str(output_folder), - "--n_subjects", - "4", - "--mean", - "4000", - "--sigma", - "1000", - "--preprocessing", - "t1-linear", - ] - - elif test_name == "shepplogan_example": - n_subjects = 10 - output_folder = tmp_out_dir / test_name - test_input = [ - "generate", - "shepplogan", - str(output_folder), - "--n_subjects", - f"{n_subjects}", - ] - elif test_name == "artifacts_example": - output_folder = tmp_out_dir / test_name - test_input = [ - "generate", - "artifacts", - data_caps_folder, - str(output_folder), - "--preprocessing", - "t1-linear", - "--noise", - "--motion", - "--contrast", - ] - - else: - raise NotImplementedError(f"Test {test_name} is not implemented.") - - flag_error = not os.system("clinicadl " + " ".join(test_input)) - - assert flag_error - - if test_name == "shepplogan_example": - file = list((output_folder / "tensor_extraction").iterdir()) - old_name = output_folder / "tensor_extraction" / file[0] - new_name = output_folder / "tensor_extraction" / "extract_test.json" - old_name.rename(new_name) - - assert compare_folders(output_folder, ref_dir / test_name, tmp_out_dir) diff --git a/tests/test_interpret.py b/tests/test_interpret.py deleted file mode 100644 index ef6f394f8..000000000 --- a/tests/test_interpret.py +++ /dev/null @@ -1,97 +0,0 @@ -# coding: utf8 - -import os -import shutil -from pathlib import Path - -import pytest - -from clinicadl.interpret.config import InterpretConfig -from clinicadl.predictor.predictor import Predictor - - -@pytest.fixture(params=["classification", "regression"]) -def test_name(request): - return request.param - - -def test_interpret(cmdopt, tmp_path, test_name): - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "interpret" / "in" - ref_dir = base_dir / "interpret" / "ref" - tmp_out_dir = tmp_path / "interpret" / "out" - tmp_out_dir.mkdir(parents=True) - - labels_dir_str = str(input_dir / "labels_list" / "2_fold") - maps_tmp_out_dir = str(tmp_out_dir / "maps") - if test_name == "classification": - cnn_input = [ - "train", - "classification", - str(input_dir / "caps_image"), - "t1-linear_mode-image.json", - labels_dir_str, - maps_tmp_out_dir, - "--architecture Conv5_FC3", - "--epochs", - "1", - "--n_splits", - "2", - "--split", - "0", - ] - - elif test_name == "regression": - cnn_input = [ - "train", - "regression", - str(input_dir / "caps_patch"), - "t1-linear_mode-patch.json", - labels_dir_str, - maps_tmp_out_dir, - "--architecture Conv5_FC3", - "--epochs", - "1", - "--n_splits", - "2", - "--split", - "0", - ] - else: - raise NotImplementedError(f"Test {test_name} is not implemented.") - - if cmdopt["no-gpu"]: - cnn_input.append("--no-gpu") - - run_interpret(cnn_input, tmp_out_dir, ref_dir) - - -def run_interpret(cnn_input, tmp_out_dir, ref_dir): - from clinicadl.utils.enum import InterpretationMethod - - maps_path = tmp_out_dir / "maps" - if maps_path.is_dir(): - shutil.rmtree(maps_path) - - train_error = not os.system("clinicadl " + " ".join(cnn_input)) - assert train_error - - for method in list(InterpretationMethod): - from clinicadl.utils.iotools.train_utils import ( - merge_options_and_maps_json_options, - ) - - dict_ = { - "maps_dir": maps_path, - "data_group": "train", - "name": f"test-{method}", - "method_cls": method, - } - # options = merge_options_and_maps_json_options(maps_path / "maps.json", **dict_) - interpret_config = InterpretConfig(**dict_) - - interpret_manager = Predictor(interpret_config) - interpret_manager.interpret() - interpret_map = interpret_manager.get_interpretation( - "train", f"test-{interpret_config.interpret.method}" - ) diff --git a/tests/test_predict.py b/tests/test_predict.py deleted file mode 100644 index e515ef41c..000000000 --- a/tests/test_predict.py +++ /dev/null @@ -1,136 +0,0 @@ -# coding: utf8 -import json -import shutil -from os.path import exists -from pathlib import Path - -import pytest - -from clinicadl.metrics.utils import get_metrics -from clinicadl.predictor.predictor import Predictor -from clinicadl.predictor.utils import get_prediction - -from .testing_tools import compare_folders, modify_maps - - -@pytest.fixture( - params=[ - "predict_image_classification", - "predict_roi_regression", - "predict_slice_classification", - "predict_patch_regression", - "predict_patch_multi_classification", - "predict_roi_reconstruction", - ] -) -def test_name(request): - return request.param - - -def test_predict(cmdopt, tmp_path, test_name): - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "predict" / "in" - ref_dir = base_dir / "predict" / "ref" - tmp_out_dir = tmp_path / "predict" / "out" - tmp_out_dir.mkdir(parents=True) - - if test_name == "predict_image_classification": - maps_name = "maps_image_cnn" - modes = ["image"] - use_labels = True - elif test_name == "predict_slice_classification": - maps_name = "maps_slice_cnn" - modes = ["image", "slice"] - use_labels = True - elif test_name == "predict_patch_regression": - maps_name = "maps_patch_cnn" - modes = ["image", "patch"] - use_labels = False - elif test_name == "predict_roi_regression": - maps_name = "maps_roi_cnn" - modes = ["image", "roi"] - use_labels = False - elif test_name == "predict_patch_multi_classification": - maps_name = "maps_patch_multi_cnn" - modes = ["image", "patch"] - use_labels = False - elif test_name == "predict_roi_reconstruction": - maps_name = "maps_roi_ae" - modes = ["roi"] - use_labels = False - else: - raise NotImplementedError(f"Test {test_name} is not implemented.") - - shutil.copytree(input_dir / maps_name, tmp_out_dir / maps_name) - model_folder = tmp_out_dir / maps_name - - if cmdopt["adapt-base-dir"]: - with open(model_folder / "maps.json", "r") as f: - config = json.load(f) - config = modify_maps( - maps=config, - base_dir=base_dir, - no_gpu=cmdopt["no-gpu"], - adapt_base_dir=cmdopt["adapt-base-dir"], - ) - with open(model_folder / "maps.json", "w") as f: - json.dump(config, f, skipkeys=True, indent=4) - - with open(model_folder / "groups/test-RANDOM/maps.json", "r") as f: - config = json.load(f) - config = modify_maps( - maps=config, - base_dir=base_dir, - no_gpu=False, - adapt_base_dir=cmdopt["adapt-base-dir"], - ) - with open(model_folder / "groups/test-RANDOM/maps.json", "w") as f: - json.dump(config, f, skipkeys=True, indent=4) - - tmp_out_subdir = str(model_folder / "split-0/best-loss/test-RANDOM") - if exists(tmp_out_subdir): - shutil.rmtree(tmp_out_subdir) - - # # Correction of JSON file for ROI - # if "roi" in modes: - # json_path = model_folder / "maps.json" - # with open(json_path, "r") as f: - # parameters = json.load(f) - # parameters["roi_list"] = ["leftHippocampusBox", "rightHippocampusBox"] - # json_data = json.dumps(parameters, skipkeys=True, indent=4) - # with open(json_path, "w") as f: - # f.write(json_data) - - from clinicadl.predictor.config import PredictConfig - - predict_config = PredictConfig( - maps_dir=model_folder, - data_group="test-RANDOM", - caps_directory=input_dir / "caps_random", - tsv_path=input_dir / "caps_random/data.tsv", - gpu=False, - use_labels=use_labels, - overwrite=True, - diagnoses=["CN"], - ) - predict_manager = Predictor(predict_config) - predict_manager.predict() - - for mode in modes: - get_prediction( - predict_manager.maps_manager.maps_path, - data_group="test-RANDOM", - mode=mode, - ) - if use_labels: - get_metrics( - predict_manager.maps_manager.maps_path, - data_group="test-RANDOM", - mode=mode, - ) - - assert compare_folders( - tmp_out_dir / maps_name, - input_dir / maps_name, - tmp_out_dir, - ) diff --git a/tests/test_prepare_data.py b/tests/test_prepare_data.py deleted file mode 100644 index b6dda43d1..000000000 --- a/tests/test_prepare_data.py +++ /dev/null @@ -1,209 +0,0 @@ -# coding: utf8 - -import os -import shutil -import warnings -from os import PathLike -from os.path import join -from pathlib import Path -from typing import Any, Dict, List - -import pytest - -from clinicadl.caps_dataset.caps_dataset_config import ( - CapsDatasetConfig, - get_preprocessing, -) -from clinicadl.caps_dataset.extraction.config import ExtractionROIConfig -from clinicadl.caps_dataset.preprocessing.config import ( - CustomPreprocessingConfig, - PETPreprocessingConfig, -) -from clinicadl.utils.enum import ( - ExtractionMethod, - Preprocessing, - SUVRReferenceRegions, - Tracer, -) -from tests.testing_tools import clean_folder, compare_folders - -warnings.filterwarnings("ignore") - - -@pytest.fixture( - params=[ - "slice", - "patch", - "image", - "roi", - ] -) -def test_name(request): - return request.param - - -def test_prepare_data(cmdopt, tmp_path, test_name): - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "prepare_data" / "in" - ref_dir = base_dir / "prepare_data" / "ref" - tmp_out_dir = tmp_path / "prepare_data" / "out" - tmp_out_dir.mkdir(parents=True) - - clean_folder(tmp_out_dir, recreate=True) - - input_caps_directory = input_dir / "caps" - input_caps_flair_directory = input_dir / "caps_flair" - if test_name == "image": - if (tmp_out_dir / "caps_image").is_dir(): - shutil.rmtree(tmp_out_dir / "caps_image") - shutil.copytree(input_caps_directory, tmp_out_dir / "caps_image") - - if (tmp_out_dir / "caps_image_flair").is_dir(): - shutil.rmtree(tmp_out_dir / "caps_image_flair") - shutil.copytree(input_caps_flair_directory, tmp_out_dir / "caps_image_flair") - - config = CapsDatasetConfig.from_preprocessing_and_extraction_method( - extraction=ExtractionMethod.IMAGE, - preprocessing_type=Preprocessing.T1_LINEAR, - preprocessing=Preprocessing.T1_LINEAR, - caps_directory=tmp_out_dir / "caps_image", - ) - - elif test_name == "patch": - if (tmp_out_dir / "caps_patch").is_dir(): - shutil.rmtree(tmp_out_dir / "caps_patch") - shutil.copytree(input_caps_directory, tmp_out_dir / "caps_patch") - - if (tmp_out_dir / "caps_patch_flair").is_dir(): - shutil.rmtree(tmp_out_dir / "caps_patch_flair") - shutil.copytree(input_caps_flair_directory, tmp_out_dir / "caps_patch_flair") - - config = CapsDatasetConfig.from_preprocessing_and_extraction_method( - extraction=ExtractionMethod.PATCH, - preprocessing_type=Preprocessing.T1_LINEAR, - preprocessing=Preprocessing.T1_LINEAR, - caps_directory=tmp_out_dir / "caps_patch", - ) - - elif test_name == "slice": - if (tmp_out_dir / "caps_slice").is_dir(): - shutil.rmtree(tmp_out_dir / "caps_slice") - shutil.copytree(input_caps_directory, tmp_out_dir / "caps_slice") - - if (tmp_out_dir / "caps_slice_flair").is_dir(): - shutil.rmtree(tmp_out_dir / "caps_slice_flair") - shutil.copytree(input_caps_flair_directory, tmp_out_dir / "caps_slice_flair") - - config = CapsDatasetConfig.from_preprocessing_and_extraction_method( - extraction=ExtractionMethod.SLICE, - preprocessing_type=Preprocessing.T1_LINEAR, - preprocessing=Preprocessing.T1_LINEAR, - caps_directory=tmp_out_dir / "caps_slice", - ) - - elif test_name == "roi": - if (tmp_out_dir / "caps_roi").is_dir(): - shutil.rmtree(tmp_out_dir / "caps_roi") - shutil.copytree(input_caps_directory, tmp_out_dir / "caps_roi") - - if (tmp_out_dir / "caps_roi_flair").is_dir(): - shutil.rmtree(tmp_out_dir / "caps_roi_flair") - shutil.copytree(input_caps_flair_directory, tmp_out_dir / "caps_roi_flair") - - config = CapsDatasetConfig.from_preprocessing_and_extraction_method( - extraction=ExtractionMethod.ROI, - preprocessing_type=Preprocessing.T1_LINEAR, - preprocessing=Preprocessing.T1_LINEAR, - caps_directory=tmp_out_dir / "caps_image", - roi_list=["rightHippocampusBox", "leftHippocampusBox"], - ) - - else: - print(f"Test {test_name} not available.") - assert 0 - - run_test_prepare_data(input_dir, ref_dir, tmp_out_dir, test_name, config) - - -def run_test_prepare_data( - input_dir, ref_dir, out_dir, test_name: str, config: CapsDatasetConfig -): - modalities = ["t1-linear", "pet-linear", "flair-linear"] - uncropped_image = [True, False] - acquisition_label = ["18FAV45", "11CPIB"] - config.extraction.save_features = True - - for modality in modalities: - config.preprocessing.preprocessing = Preprocessing(modality) - config.preprocessing = get_preprocessing(Preprocessing(modality))() - if modality == "pet-linear": - for acq in acquisition_label: - assert isinstance(config.preprocessing, PETPreprocessingConfig) - config.preprocessing.tracer = Tracer(acq) - config.preprocessing.suvr_reference_region = SUVRReferenceRegions( - "pons2" - ) - config.preprocessing.use_uncropped_image = False - config.extraction.extract_json = ( - f"{modality}-{acq}_mode-{test_name}.json" - ) - tsv_file = join(input_dir, f"pet_{acq}.tsv") - mode = test_name - extract_generic(out_dir, mode, tsv_file, config) - - elif modality == "custom": - assert isinstance(config.preprocessing, CustomPreprocessingConfig) - config.preprocessing.use_uncropped_image = True - config.preprocessing.custom_suffix = ( - "graymatter_space-Ixi549Space_modulated-off_probability.nii.gz" - ) - if isinstance(config.extraction, ExtractionROIConfig): - config.extraction.roi_custom_template = "Ixi549Space" - config.extraction.extract_json = f"{modality}_mode-{test_name}.json" - tsv_file = input_dir / "subjects.tsv" - mode = test_name - extract_generic(out_dir, mode, tsv_file, config) - - elif modality == "t1-linear": - for flag in uncropped_image: - config.preprocessing.use_uncropped_image = flag - config.extraction.extract_json = ( - f"{modality}_crop-{not flag}_mode-{test_name}.json" - ) - mode = test_name - extract_generic(out_dir, mode, None, config) - - elif modality == "flair-linear": - config.data.caps_directory = Path( - str(config.data.caps_directory) + "_flair" - ) - config.extraction.save_features = False - for flag in uncropped_image: - config.preprocessing.use_uncropped_image = flag - config.extraction.extract_json = ( - f"{modality}_crop-{not flag}_mode-{test_name}.json" - ) - mode = f"{test_name}_flair" - extract_generic(out_dir, mode, None, config) - else: - raise NotImplementedError( - f"Test for modality {modality} was not implemented." - ) - - assert compare_folders( - out_dir / f"caps_{test_name}_flair", - ref_dir / f"caps_{test_name}_flair", - out_dir, - ) - assert compare_folders( - out_dir / f"caps_{test_name}", ref_dir / f"caps_{test_name}", out_dir - ) - - -def extract_generic(out_dir, mode, tsv_file, config: CapsDatasetConfig): - from clinicadl.prepare_data.prepare_data import DeepLearningPrepareData - - config.data.caps_directory = out_dir / f"caps_{mode}" - config.data.data_tsv = tsv_file - config.dataloader.n_proc = 1 - DeepLearningPrepareData(config) diff --git a/tests/test_qc.py b/tests/test_qc.py deleted file mode 100644 index 653986d9d..000000000 --- a/tests/test_qc.py +++ /dev/null @@ -1,94 +0,0 @@ -import shutil -from os import system -from os.path import join -from pathlib import Path - -import pandas as pd -import pytest - -from tests.testing_tools import compare_folders - - -@pytest.fixture(params=["t1-linear", "t1-volume", "pet-linear"]) -def test_name(request): - return request.param - - -def test_qc(cmdopt, tmp_path, test_name): - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "qualityCheck" / "in" - ref_dir = base_dir / "qualityCheck" / "ref" - tmp_out_dir = tmp_path / "qualityCheck" / "out" - tmp_out_dir.mkdir(parents=True) - - if test_name == "t1-linear": - out_tsv = tmp_out_dir / "QC.tsv" - test_input = [ - "t1-linear", - str(input_dir / "caps"), - str(out_tsv), - "--no-gpu", - ] - - elif test_name == "t1-volume": - out_dir = tmp_out_dir / "QC_T1V" - test_input = [ - "t1-volume", - str(input_dir / "caps_T1V"), - str(out_dir), - "Ixi549Space", - ] - - elif test_name == "pet-linear": - out_tsv = tmp_out_dir / "QC_pet.tsv" - test_input = [ - "pet-linear", - str(input_dir / "caps_pet"), - str(out_tsv), - "--tracer ", - "18FFDG", - "-suvr ", - "cerebellumPons2", - "--threshold", - "0.5", - ] - else: - raise NotImplementedError( - f"Quality check test on {test_name} is not implemented ." - ) - - flag_error = not system(f"clinicadl quality-check " + " ".join(test_input)) - assert flag_error - - if test_name == "t1-linear": - ref_tsv = join(ref_dir, "QC.tsv") - ref_df = pd.read_csv(ref_tsv, sep="\t") - ref_df.reset_index(inplace=True) - - out_df = pd.read_csv(out_tsv, sep="\t") - out_df.reset_index(inplace=True) - - out_df["pass_probability"] = round(out_df["pass_probability"], 2) - ref_df["pass_probability"] = round(ref_df["pass_probability"], 2) - - system(f"diff {out_tsv} {ref_tsv} ") - assert out_df.equals(ref_df) - - elif test_name == "t1-volume": - assert compare_folders(out_dir, ref_dir / "QC_T1V", tmp_out_dir) - - elif test_name == "pet-linear": - out_df = pd.read_csv(out_tsv, sep="\t") - ref_tsv = join(ref_dir, "QC_pet.tsv") - ref_df = pd.read_csv(ref_tsv, sep="\t") - out_df.reset_index(inplace=True) - ref_df.reset_index(inplace=True) - - out_df = pd.read_csv(out_tsv, sep="\t") - out_df.reset_index(inplace=True) - - out_df["pass_probability"] = round(out_df["pass_probability"], 2) - ref_df["pass_probability"] = round(ref_df["pass_probability"], 2) - - system(f"diff {out_tsv} {ref_tsv} ") - assert out_df.equals(ref_df) diff --git a/tests/test_random_search.py b/tests/test_random_search.py deleted file mode 100644 index 864f8b1fa..000000000 --- a/tests/test_random_search.py +++ /dev/null @@ -1,66 +0,0 @@ -# coding: utf8 - -import os -import shutil -from os.path import join -from pathlib import Path - -import pytest - -from .testing_tools import compare_folders, modify_toml - - -# random searxh for ROI with CNN -@pytest.fixture( - params=[ - "rs_roi_cnn", - ] -) -def test_name(request): - return request.param - - -def test_random_search(cmdopt, tmp_path, test_name): - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "randomSearch" / "in" - ref_dir = base_dir / "randomSearch" / "ref" - tmp_out_dir = tmp_path / "randomSearch" / "out" - - if os.path.exists(tmp_out_dir): - shutil.rmtree(tmp_out_dir) - tmp_out_dir.mkdir(parents=True) - - if test_name == "rs_roi_cnn": - toml_path = join(input_dir / "random_search.toml") - generate_input = ["random-search", str(tmp_out_dir), "job-1"] - else: - raise NotImplementedError(f"Test {test_name} is not implemented.") - - # Write random_search.toml file - shutil.copy(toml_path, tmp_out_dir) - - if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: - modify_toml( - toml_path=tmp_out_dir / "random_search.toml", - base_dir=base_dir, - no_gpu=cmdopt["no-gpu"], - adapt_base_dir=cmdopt["adapt-base-dir"], - ) - - flag_error_generate = not os.system("clinicadl " + " ".join(generate_input)) - performances_flag = os.path.exists( - tmp_out_dir / "job-1" / "split-0" / "best-loss" / "train" - ) - assert flag_error_generate - assert performances_flag - - assert compare_folders( - tmp_out_dir / "job-1" / "groups", - ref_dir / "job-1" / "groups", - tmp_out_dir, - ) - assert compare_folders( - tmp_out_dir / "job-1" / "split-0" / "best-loss", - ref_dir / "job-1" / "split-0" / "best-loss", - tmp_out_dir, - ) diff --git a/tests/test_resume.py b/tests/test_resume.py deleted file mode 100644 index 1598267d8..000000000 --- a/tests/test_resume.py +++ /dev/null @@ -1,75 +0,0 @@ -# coding: utf8 -import json -import shutil -from os import system -from pathlib import Path - -import pytest - -from clinicadl.maps_manager.maps_manager import MapsManager -from clinicadl.splitter.config import SplitterConfig -from clinicadl.splitter.splitter import Splitter - -from .testing_tools import modify_maps - - -@pytest.fixture( - params=[ - "stopped_1", - "stopped_2", - "stopped_3", - ] -) -def test_name(request): - return request.param - - -def test_resume(cmdopt, tmp_path, test_name): - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "resume" / "in" - ref_dir = base_dir / "resume" / "ref" - tmp_out_dir = tmp_path / "resume" / "out" - tmp_out_dir.mkdir(parents=True) - - shutil.copytree(input_dir / test_name, tmp_out_dir / test_name) - maps_stopped = tmp_out_dir / test_name - - if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: # modify the input MAPS - with open(maps_stopped / "maps.json", "r") as f: - config = json.load(f) - config = modify_maps( - maps=config, - base_dir=base_dir, - no_gpu=cmdopt["no-gpu"], - adapt_base_dir=cmdopt["adapt-base-dir"], - ) - with open(maps_stopped / "maps.json", "w") as f: - json.dump(config, f, skipkeys=True, indent=4) - - flag_error = not system(f"clinicadl -vv train resume {maps_stopped}") - assert flag_error - - maps_manager = MapsManager(maps_stopped) - splitter_config = SplitterConfig(**maps_manager.parameters) - split_manager = Splitter(splitter_config) - - for split in split_manager.split_iterator(): - performances_flag = ( - maps_stopped / f"split-{split}" / "best-loss" / "train" - ).exists() - assert performances_flag - - with open(maps_stopped / "maps.json", "r") as out: - json_data_out = json.load(out) - with open(ref_dir / "maps_image_cnn" / "maps.json", "r") as ref: - json_data_ref = json.load(ref) - - if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: - json_data_ref = modify_maps( - maps=json_data_ref, - base_dir=base_dir, - no_gpu=cmdopt["no-gpu"], - adapt_base_dir=cmdopt["adapt-base-dir"], - ) - - assert json_data_ref == json_data_out diff --git a/tests/test_train_ae.py b/tests/test_train_ae.py deleted file mode 100644 index d4611e188..000000000 --- a/tests/test_train_ae.py +++ /dev/null @@ -1,126 +0,0 @@ -# coding: utf8 - -import json -import os -import shutil -from pathlib import Path - -import pytest - -from .testing_tools import clean_folder, compare_folders, modify_maps - - -@pytest.fixture( - params=[ - "image_ae", - "patch_multi_ae", - "roi_ae", - "slice_ae", - ] -) -def test_name(request): - return request.param - - -def test_train_ae(cmdopt, tmp_path, test_name): - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "train" / "in" - ref_dir = base_dir / "train" / "ref" - tmp_out_dir = tmp_path / "train" / "out" - tmp_out_dir.mkdir(parents=True) - - clean_folder(tmp_out_dir, recreate=True) - - labels_path = str(input_dir / "labels_list" / "2_fold") - config_path = str(input_dir / "train_config.toml") - split = 0 - - if test_name == "image_ae": - split = 1 - test_input = [ - "train", - "reconstruction", - str(input_dir / "caps_image"), - "t1-linear_crop-True_mode-image.json", - labels_path, - str(tmp_out_dir), - "-c", - config_path, - "--split", - str(split), - ] - elif test_name == "patch_multi_ae": - test_input = [ - "train", - "reconstruction", - str(input_dir / "caps_patch"), - "t1-linear_crop-True_mode-patch.json", - labels_path, - str(tmp_out_dir), - "-c", - config_path, - "--multi_network", - ] - elif test_name == "roi_ae": - test_input = [ - "train", - "reconstruction", - str(input_dir / "caps_roi"), - "t1-linear_crop-True_mode-roi.json", - labels_path, - str(tmp_out_dir), - "-c", - config_path, - ] - elif test_name == "slice_ae": - test_input = [ - "train", - "reconstruction", - str(input_dir / "caps_slice"), - "t1-linear_crop-True_mode-slice.json", - labels_path, - str(tmp_out_dir), - "-c", - config_path, - ] - else: - raise NotImplementedError(f"Test {test_name} is not implemented.") - - if cmdopt["no-gpu"]: - test_input.append("--no-gpu") - - if tmp_out_dir.is_dir(): - shutil.rmtree(tmp_out_dir) - - flag_error = not os.system("clinicadl " + " ".join(test_input)) - assert flag_error - - with open(tmp_out_dir / "maps.json", "r") as out: - json_data_out = json.load(out) - with open(ref_dir / ("maps_" + test_name) / "maps.json", "r") as ref: - json_data_ref = json.load(ref) - - if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: - json_data_ref = modify_maps( - maps=json_data_ref, - base_dir=base_dir, - no_gpu=cmdopt["no-gpu"], - adapt_base_dir=cmdopt["adapt-base-dir"], - ) - json_data_out = modify_maps( - maps=json_data_out, - base_dir=base_dir, - ssda=True, - ) - assert json_data_out == json_data_ref # ["mode"] == mode - - assert compare_folders( - tmp_out_dir / "groups", - ref_dir / ("maps_" + test_name) / "groups", - tmp_path, - ) - assert compare_folders( - tmp_out_dir / f"split-{split}" / "best-loss", - ref_dir / ("maps_" + test_name) / f"split-{split}" / "best-loss", - tmp_path, - ) diff --git a/tests/test_train_cnn.py b/tests/test_train_cnn.py deleted file mode 100644 index 2a29a3166..000000000 --- a/tests/test_train_cnn.py +++ /dev/null @@ -1,148 +0,0 @@ -# coding: utf8 - -import json -import os -import shutil -from pathlib import Path - -import pytest - -from .testing_tools import compare_folders, modify_maps - - -@pytest.fixture( - params=[ - "slice_cnn", - "image_cnn", - "patch_cnn", - "patch_multi_cnn", - "roi_cnn", - ] -) -def test_name(request): - return request.param - - -def test_train_cnn(cmdopt, tmp_path, test_name): - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "train" / "in" - ref_dir = base_dir / "train" / "ref" - tmp_out_dir = tmp_path / "train" / "out" - tmp_out_dir.mkdir(parents=True) - - labels_path = input_dir / "labels_list" / "2_fold" - config_path = input_dir / "train_config.toml" - split = "0" - - if test_name == "slice_cnn": - split_ref = 0 - test_input = [ - "train", - "classification", - str(input_dir / "caps_slice"), - "t1-linear_crop-True_mode-slice.json", - str(labels_path), - str(tmp_out_dir), - "-c", - str(config_path), - ] - elif test_name == "image_cnn": - split_ref = 1 - test_input = [ - "train", - "regression", - str(input_dir / "caps_image"), - "t1-linear_crop-True_mode-image.json", - str(labels_path), - str(tmp_out_dir), - "-c", - str(config_path), - ] - elif test_name == "patch_cnn": - split_ref = 0 - test_input = [ - "train", - "classification", - str(input_dir / "caps_patch"), - "t1-linear_crop-True_mode-patch.json", - str(labels_path), - str(tmp_out_dir), - "-c", - str(config_path), - "--split", - split, - ] - elif test_name == "patch_multi_cnn": - split_ref = 0 - test_input = [ - "train", - "classification", - str(input_dir / "caps_patch"), - "t1-linear_crop-True_mode-patch.json", - str(labels_path), - str(tmp_out_dir), - "-c", - str(config_path), - "--multi_network", - ] - elif test_name == "roi_cnn": - split_ref = 0 - test_input = [ - "train", - "classification", - str(input_dir / "caps_roi"), - "t1-linear_crop-True_mode-roi.json", - str(labels_path), - str(tmp_out_dir), - "-c", - str(config_path), - ] - else: - raise NotImplementedError(f"Test {test_name} is not implemented.") - - if cmdopt["no-gpu"]: - test_input.append("--no-gpu") - - if tmp_out_dir.is_dir(): - shutil.rmtree(tmp_out_dir) - - flag_error = not os.system("clinicadl " + " ".join(test_input)) - assert flag_error - - performances_flag = ( - tmp_out_dir / f"split-{split}" / "best-loss" / "train" - ).exists() - assert performances_flag - - with open(tmp_out_dir / "maps.json", "r") as out: - json_data_out = json.load(out) - with open(ref_dir / ("maps_" + test_name) / "maps.json", "r") as ref: - json_data_ref = json.load(ref) - - if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: - json_data_ref = modify_maps( - maps=json_data_ref, - base_dir=base_dir, - no_gpu=cmdopt["no-gpu"], - adapt_base_dir=cmdopt["adapt-base-dir"], - ssda=True, - ) - json_data_out = modify_maps( - maps=json_data_out, - base_dir=base_dir, - ssda=True, - ) - assert json_data_out == json_data_ref # ["mode"] == mode - - assert compare_folders( - tmp_out_dir / "groups", - ref_dir / ("maps_" + test_name) / "groups", - tmp_path, - ) - assert compare_folders( - tmp_out_dir / "split-0" / "best-loss", - ref_dir / ("maps_" + test_name) / f"split-{split_ref}" / "best-loss", - tmp_path, - ) - - shutil.rmtree(tmp_out_dir) diff --git a/tests/test_train_from_json.py b/tests/test_train_from_json.py deleted file mode 100644 index f1bdaff01..000000000 --- a/tests/test_train_from_json.py +++ /dev/null @@ -1,86 +0,0 @@ -import json -import shutil -from os import system -from pathlib import Path - -from .testing_tools import compare_folders_with_hashes, create_hashes_dict, modify_maps - - -def test_json_compatibility(cmdopt, tmp_path): - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "train_from_json" / "in" - tmp_out_dir = tmp_path / "train_from_json" / "out" - tmp_out_dir.mkdir(parents=True) - - split = "0" - config_json = input_dir / "maps_roi_cnn/maps.json" - reproduced_maps_dir = tmp_out_dir / "maps_reproduced" - - if reproduced_maps_dir.exists(): - shutil.rmtree(reproduced_maps_dir) - - if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: # virtually modify the input MAPS - with open(config_json, "r") as f: - config = json.load(f) - config_json = tmp_out_dir / "modified_maps.json" - config = modify_maps( - maps=config, - base_dir=base_dir, - no_gpu=cmdopt["no-gpu"], - adapt_base_dir=cmdopt["adapt-base-dir"], - ) - with open(config_json, "w+") as f: - json.dump(config, f, skipkeys=True, indent=4) - - flag_error = not system( - f"clinicadl train from_json {str(config_json)} {str(reproduced_maps_dir)} -s {split}" - ) - assert flag_error - - -def test_determinism(cmdopt, tmp_path): - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "train_from_json" / "in" - tmp_out_dir = tmp_path / "train_from_json" / "out" - tmp_out_dir.mkdir(parents=True) - - maps_dir = tmp_out_dir / "maps_roi_cnn" - reproduced_maps_dir = tmp_out_dir / "reproduced_MAPS" - if maps_dir.exists(): - shutil.rmtree(maps_dir) - if reproduced_maps_dir.exists(): - shutil.rmtree(reproduced_maps_dir) - test_input = [ - "train", - "classification", - str(input_dir / "caps_roi"), - "t1-linear_mode-roi.json", - str(input_dir / "labels_list" / "2_fold"), - str(maps_dir), - "-c", - str(input_dir / "reproducibility_config.toml"), - ] - - if cmdopt["no-gpu"]: - test_input.append("--no-gpu") - - # Run first experiment - flag_error = not system("clinicadl " + " ".join(test_input)) - assert flag_error - input_hashes = create_hashes_dict( - maps_dir, - ignore_pattern_list=["tensorboard", ".log", "training.tsv", "maps.json"], - ) - - # Reproduce experiment (train from json) - config_json = tmp_out_dir / "maps_roi_cnn/maps.json" - - flag_error = not system( - f"clinicadl train from_json {str(config_json)} {str(reproduced_maps_dir)} -s 0" - ) - assert flag_error - compare_folders_with_hashes( - reproduced_maps_dir, - input_hashes, - ignore_pattern_list=["tensorboard", ".log", "training.tsv", "maps.json"], - ) diff --git a/tests/test_transfer_learning.py b/tests/test_transfer_learning.py deleted file mode 100644 index 6a7850f9b..000000000 --- a/tests/test_transfer_learning.py +++ /dev/null @@ -1,185 +0,0 @@ -import json -import os -import shutil -from pathlib import Path - -import pytest - -from .testing_tools import compare_folders, modify_maps - - -# Everything is tested on roi except for cnn --> multicnn (patch) as multicnn is not implemented for roi. -@pytest.fixture( - params=[ - "transfer_ae_ae", - "transfer_ae_cnn", - "transfer_cnn_cnn", - "transfer_cnn_multicnn", - ] -) -def test_name(request): - return request.param - - -def test_transfer_learning(cmdopt, tmp_path, test_name): - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "transferLearning" / "in" - ref_dir = base_dir / "transferLearning" / "ref" - tmp_out_dir = tmp_path / "transferLearning" / "out" - tmp_target_dir = tmp_path / "transferLearning" / "target" - tmp_out_dir.mkdir(parents=True) - - caps_roi_path = input_dir / "caps_roi" - extract_roi_str = "t1-linear_mode-roi.json" - labels_path = input_dir / "labels_list" / "2_fold" - config_path = input_dir / "train_config.toml" - if test_name == "transfer_ae_ae": - source_task = [ - "train", - "reconstruction", - str(caps_roi_path), - extract_roi_str, - str(labels_path), - str(tmp_out_dir / "maps_roi_ae"), - "-c", - str(config_path), - ] - target_task = [ - "train", - "reconstruction", - str(caps_roi_path), - extract_roi_str, - str(labels_path), - str(tmp_target_dir), - "-c", - str(config_path), - "--transfer_path", - str(tmp_out_dir / "maps_roi_ae"), - ] - name = "aeTOae" - elif test_name == "transfer_ae_cnn": - source_task = [ - "train", - "reconstruction", - str(caps_roi_path), - extract_roi_str, - str(labels_path), - str(tmp_out_dir / "maps_roi_ae"), - "-c", - str(config_path), - ] - target_task = [ - "train", - "classification", - str(caps_roi_path), - extract_roi_str, - str(labels_path), - str(tmp_target_dir), - "-c", - str(config_path), - "--transfer_path", - str(tmp_out_dir / "maps_roi_ae"), - ] - name = "aeTOcnn" - elif test_name == "transfer_cnn_cnn": - source_task = [ - "train", - "classification", - str(caps_roi_path), - extract_roi_str, - str(labels_path), - str(tmp_out_dir / "maps_roi_cnn"), - "-c", - str(config_path), - ] - target_task = [ - "train", - "classification", - str(caps_roi_path), - extract_roi_str, - str(labels_path), - str(tmp_target_dir), - "-c", - str(config_path), - "--transfer_path", - str(tmp_out_dir / "maps_roi_cnn"), - ] - name = "cnnTOcnn" - elif test_name == "transfer_cnn_multicnn": - source_task = [ - "train", - "classification", - str(caps_roi_path), - extract_roi_str, - str(labels_path), - str(tmp_out_dir / "maps_roi_cnn"), - "-c", - str(config_path), - ] - target_task = [ - "train", - "classification", - str(caps_roi_path), - extract_roi_str, - str(labels_path), - str(tmp_target_dir), - "-c", - str(config_path), - "--transfer_path", - str(tmp_out_dir / "maps_roi_cnn"), - "--multi_network", - ] - name = "cnnTOmulticnn" - else: - raise NotImplementedError(f"Test {test_name} is not implemented.") - - if cmdopt["no-gpu"]: - source_task.append("--no-gpu") - target_task.append("--no-gpu") - - if tmp_out_dir.exists(): - shutil.rmtree(tmp_out_dir) - if tmp_target_dir.exists(): - shutil.rmtree(tmp_target_dir) - - flag_source = not os.system("clinicadl -vvv " + " ".join(source_task)) - flag_target = not os.system("clinicadl -vvv " + " ".join(target_task)) - assert flag_source - assert flag_target - - with open(tmp_target_dir / "maps.json", "r") as out: - json_data_out = json.load(out) - with open(ref_dir / ("maps_roi_" + name) / "maps.json", "r") as ref: - json_data_ref = json.load(ref) - - # TODO : uncomment when CI data are correct - # ref_source_dir = Path(json_data_ref["transfer_path"]).parent - # json_data_ref["transfer_path"] = str( - # tmp_out_dir / Path(json_data_ref["transfer_path"]).relative_to(ref_source_dir) - # ) - # if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: - # json_data_ref = modify_maps( - # maps=json_data_ref, - # base_dir=base_dir, - # no_gpu=cmdopt["no-gpu"], - # adapt_base_dir=cmdopt["adapt-base-dir"], - # ) - # TODO: remove and update data - json_data_ref["caps_directory"] = json_data_out["caps_directory"] - json_data_ref["gpu"] = json_data_out["gpu"] - json_data_ref["transfer_path"] = json_data_out["transfer_path"] - json_data_ref["tsv_path"] = json_data_out["tsv_path"] - json_data_out["ssda_network"] = json_data_ref["ssda_network"] - ### - assert json_data_out == json_data_ref # ["mode"] == mode - - assert compare_folders( - tmp_target_dir / "groups", - ref_dir / ("maps_roi_" + name) / "groups", - tmp_path, - ) - assert compare_folders( - tmp_target_dir / "split-0" / "best-loss", - ref_dir / ("maps_roi_" + name) / "split-0" / "best-loss", - tmp_path, - ) diff --git a/tests/test_tsvtools.py b/tests/test_tsvtools.py deleted file mode 100644 index f77a333ac..000000000 --- a/tests/test_tsvtools.py +++ /dev/null @@ -1,284 +0,0 @@ -import os -import shutil -from pathlib import Path - -import pandas as pd -import pytest - -from clinicadl.tsvtools.tsvtools_utils import extract_baseline -from tests.testing_tools import compare_folders - -""" -Check the absence of data leakage - 1) Baseline datasets contain only one scan per subject - 2) No intersection between train and test sets - 3) Absence of MCI train subjects in test sets of subcategories of MCI -""" - - -@pytest.fixture( - params=[ - "test_getlabels", - "test_split", - "test_analysis", - "test_get_progression", - "test_prepare_experiment", - "test_get_metadata", - ] -) -def test_name(request): - return request.param - - -def test_tsvtools(cmdopt, tmp_path, test_name): - if test_name == "test_getlabels": - run_test_getlabels(cmdopt, tmp_path) - elif test_name == "test_split": - run_test_split(cmdopt, tmp_path) - elif test_name == "test_analysis": - run_test_analysis(cmdopt, tmp_path) - elif test_name == "test_prepare_experiment": - run_test_prepare_experiment(cmdopt, tmp_path) - elif test_name == "test_get_progression": - run_test_get_progression(cmdopt, tmp_path) - elif test_name == "test_get_metadata": - run_test_get_metadata(cmdopt, tmp_path) - - -def check_is_subject_unique(labels_path_baseline: Path): - print("Check subject uniqueness", labels_path_baseline) - - flag_is_unique = True - check_df = pd.read_csv(labels_path_baseline, sep="\t") - check_df.set_index(["participant_id", "session_id"], inplace=True) - if labels_path_baseline.name[-12:] != "baseline.tsv": - check_df = extract_baseline(check_df, set_index=False) - for _, subject_df in check_df.groupby(level=0): - if len(subject_df) > 1: - flag_is_unique = False - assert flag_is_unique - - -def check_is_independant( - train_path_baseline: Path, test_path_baseline: Path, subject_flag=True -): - print("Check independence") - - flag_is_independant = True - train_df = pd.read_csv(train_path_baseline, sep="\t") - train_df.set_index(["participant_id", "session_id"], inplace=True) - test_df = pd.read_csv(test_path_baseline, sep="\t") - test_df.set_index(["participant_id", "session_id"], inplace=True) - - for subject, session in train_df.index: - if (subject, session) in test_df.index: - flag_is_independant = False - - assert flag_is_independant - - -def run_test_suite(data_tsv, n_splits): - check_train = True - if n_splits == 0: - train_baseline_tsv = data_tsv / "train_baseline.tsv" - test_baseline_tsv = data_tsv / "test_baseline.tsv" - if not train_baseline_tsv.exists(): - check_train = False - - check_is_subject_unique(test_baseline_tsv) - if check_train: - check_is_subject_unique(train_baseline_tsv) - check_is_independant(train_baseline_tsv, test_baseline_tsv) - - else: - for split_number in range(n_splits): - for folder, _, files in os.walk(data_tsv / "split"): - folder = Path(folder) - - for file in files: - if file[-3:] == "tsv": - check_is_subject_unique(folder / file) - train_baseline_tsv = folder / "train_baseline.tsv" - test_baseline_tsv = folder / "test_baseline.tsv" - if train_baseline_tsv.exists(): - if test_baseline_tsv.exists(): - check_is_independant(train_baseline_tsv, test_baseline_tsv) - - -def run_test_getlabels(cmdopt, tmp_path): - """Checks that getlabels is working and that it is coherent with - previous version in reference_path.""" - - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "tsvtools" / "in" - ref_dir = base_dir / "tsvtools" / "ref" - tmp_out_dir = tmp_path / "tsvtools" / "out" - tmp_out_dir.mkdir(parents=True) - - import shutil - - bids_output = tmp_out_dir / "bids" - bids_directory = input_dir / "bids" - restrict_tsv = input_dir / "restrict.tsv" - output_tsv = tmp_out_dir - if tmp_out_dir.exists(): - shutil.rmtree(tmp_out_dir) - tmp_out_dir.mkdir(parents=True) - shutil.copytree(bids_directory, bids_output) - merged_tsv = input_dir / "merge-tsv.tsv" - missing_mods_directory = input_dir / "missing_mods" - - flag_getlabels = not os.system( - f"clinicadl -vvv tsvtools get-labels {str(bids_output)} {str(output_tsv)} " - f"-d AD -d MCI -d CN -d Dementia " - f"--merged_tsv {str(merged_tsv)} --missing_mods {str(missing_mods_directory)} " - f"--restriction_tsv {str(restrict_tsv)}" - ) - assert flag_getlabels - - out_df = pd.read_csv(tmp_out_dir / "labels.tsv", sep="\t") - ref_df = pd.read_csv(ref_dir / "labels.tsv", sep="\t") - assert out_df.equals(ref_df) - - -def run_test_split(cmdopt, tmp_path): - """Checks that: - - split and kfold are working - - the loading functions can find the output - - no data leakage is introduced in split and kfold. - """ - - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "tsvtools" / "in" - ref_dir = base_dir / "tsvtools" / "ref" - tmp_out_dir = tmp_path / "tsvtools" / "out" - tmp_out_dir.mkdir(parents=True) - - n_test = 10 - n_splits = 2 - train_tsv = tmp_out_dir / "split/train.tsv" - labels_tsv = tmp_out_dir / "labels.tsv" - shutil.copyfile(input_dir / "labels.tsv", labels_tsv) - - flag_split = not os.system( - f"clinicadl -vvv tsvtools split {str(labels_tsv)} --subset_name test --n_test {n_test}" - ) - flag_getmetadata = not os.system( - f"clinicadl -vvv tsvtools get-metadata {str(train_tsv)} {str(labels_tsv)} -voi age -voi sex -voi diagnosis" - ) - flag_kfold = not os.system( - f"clinicadl -vvv tsvtools kfold {str(train_tsv)} --n_splits {n_splits} --subset_name validation" - ) - assert flag_split - assert flag_getmetadata - assert flag_kfold - - assert compare_folders(tmp_out_dir / "split", ref_dir / "split", tmp_out_dir) - - run_test_suite(tmp_out_dir, n_splits) - - -def run_test_analysis(cmdopt, tmp_path): - """Checks that analysis can be performed.""" - - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "tsvtools" / "in" - ref_dir = base_dir / "tsvtools" / "ref" - tmp_out_dir = tmp_path / "tsvtools" / "out" - tmp_out_dir.mkdir(parents=True) - - merged_tsv = input_dir / "merge-tsv.tsv" - labels_tsv = input_dir / "labels.tsv" - output_tsv = tmp_out_dir / "analysis.tsv" - ref_analysis_tsv = ref_dir / "analysis.tsv" - - flag_analysis = not os.system( - f"clinicadl tsvtools analysis {str(merged_tsv)} {str(labels_tsv)} {str(output_tsv)} " - f"--diagnoses CN --diagnoses MCI --diagnoses Dementia" - ) - - assert flag_analysis - ref_df = pd.read_csv(ref_analysis_tsv, sep="\t") - out_df = pd.read_csv(output_tsv, sep="\t") - assert out_df.equals(ref_df) - - -def run_test_get_progression(cmdopt, tmp_path): - """Checks that get-progression can be performed""" - - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "tsvtools" / "in" - ref_dir = base_dir / "tsvtools" / "ref" - tmp_out_dir = tmp_path / "tsvtools" / "out" - tmp_out_dir.mkdir(parents=True) - - input_progression_tsv = input_dir / "labels.tsv" - progression_tsv = tmp_out_dir / "progression.tsv" - ref_progression_tsv = ref_dir / "progression.tsv" - shutil.copyfile(input_progression_tsv, progression_tsv) - - flag_get_progression = not os.system( - f"clinicadl tsvtools get-progression {str(progression_tsv)} " - ) - assert flag_get_progression - - ref_df = pd.read_csv(ref_progression_tsv, sep="\t") - out_df = pd.read_csv(progression_tsv, sep="\t") - assert out_df.equals(ref_df) - - -def run_test_prepare_experiment(cmdopt, tmp_path): - """Checks that: - - split and kfold are working - - the loading functions can find the output - - no data leakage is introduced in split and kfold. - """ - - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "tsvtools" / "in" - ref_dir = base_dir / "tsvtools" / "ref" - tmp_out_dir = tmp_path / "tsvtools" / "out" - tmp_out_dir.mkdir(parents=True) - - labels_tsv = tmp_out_dir / "labels.tsv" - shutil.copyfile(input_dir / "labels.tsv", labels_tsv) - - validation_type = "kfold" - n_valid = 2 - n_test = 10 - flag_prepare_experiment = not os.system( - f"clinicadl -vvv tsvtools prepare-experiment {str(labels_tsv)} --n_test {n_test} --validation_type {validation_type} --n_validation {n_valid}" - ) - - assert flag_prepare_experiment - - assert compare_folders(tmp_out_dir / "split", ref_dir / "split", tmp_out_dir) - - run_test_suite(tmp_out_dir, n_valid) - - -def run_test_get_metadata(cmdopt, tmp_path): - """Checks that get-metadata can be performed""" - base_dir = Path(cmdopt["input"]) - input_dir = base_dir / "tsvtools" / "in" - ref_dir = base_dir / "tsvtools" / "ref" - tmp_out_dir = tmp_path / "tsvtools" / "out" - tmp_out_dir.mkdir(parents=True) - - input_metadata_tsv = input_dir / "restrict.tsv" - metadata_tsv = tmp_out_dir / "metadata.tsv" - input_labels_tsv = input_dir / "labels.tsv" - labels_tsv = tmp_out_dir / "labels.tsv" - ref_metadata_tsv = ref_dir / "metadata.tsv" - - shutil.copyfile(input_metadata_tsv, metadata_tsv) - shutil.copyfile(input_labels_tsv, labels_tsv) - - flag_get_metadata = not os.system( - f"clinicadl tsvtools get-metadata {str(metadata_tsv)} {str(labels_tsv)} -voi diagnosis -voi sex -voi age" - ) - assert flag_get_metadata - - ref_df = pd.read_csv(ref_metadata_tsv, sep="\t") - out_df = pd.read_csv(metadata_tsv, sep="\t") - assert out_df.equals(ref_df) diff --git a/tests/testing_tools.py b/tests/testing_tools.py deleted file mode 100644 index 885096374..000000000 --- a/tests/testing_tools.py +++ /dev/null @@ -1,263 +0,0 @@ -import pathlib -from os import PathLike -from pathlib import Path -from typing import Any, Dict, List - - -def ignore_pattern(file_path: pathlib.Path, ignore_pattern_list: List[str]) -> bool: - if not ignore_pattern_list: - return False - - for pattern in ignore_pattern_list: - if pattern in file_path.__str__(): - return True - return False - - -def create_hashes_dict( - path_folder: pathlib.Path, ignore_pattern_list: List[str] = None -) -> Dict[str, str]: - """ - Computes a dictionary of files with their corresponding hashes - - Args: - path_folder: starting point for the tree listing. - ignore_pattern_list: list of patterns to be ignored to create hash dictionary. - - Returns: - all_files: a dictionary of the form {/path/to/file.extension: hash(file.extension)} - """ - import hashlib - - def file_as_bytes(input_file): - with input_file: - return input_file.read() - - all_files = [] - for file in path_folder.rglob("*"): - if not ignore_pattern(file, ignore_pattern_list) and file.is_file(): - all_files.append(file) - - dict_hashes = { - fname.relative_to(path_folder).__str__(): str( - hashlib.md5(file_as_bytes(open(fname, "rb"))).digest() - ) - for fname in all_files - } - return dict_hashes - - -def compare_folders_with_hashes( - path_folder: pathlib.Path, - hashes_dict: Dict[str, str], - ignore_pattern_list: List[str] = None, -): - """ - Compares the files of a folder against a reference - - Args: - path_folder: starting point for the tree listing. - hashes_dict: a dictionary of the form {/path/to/file.extension: hash(file.extension)} - ignore_pattern_list: list of patterns to be ignored to create hash dictionary. - """ - hashes_new = create_hashes_dict(path_folder, ignore_pattern_list) - - if hashes_dict != hashes_new: - error_message1 = "" - error_message2 = "" - for key in hashes_dict: - if key not in hashes_new: - error_message1 += "{0} not found !\n".format(key) - elif hashes_dict[key] != hashes_new[key]: - error_message2 += "{0} does not match the reference file !\n".format( - key - ) - raise ValueError(error_message1 + error_message2) - - -def models_equal(state_dict_1, state_dict_2, epsilon=0): - import torch - - for key_item_1, key_item_2 in zip(state_dict_1.items(), state_dict_2.items()): - if torch.mean(torch.abs(key_item_1[1] - key_item_2[1])) > epsilon: - print(f"Not equivalent: {key_item_1[0]} != {key_item_2[0]}") - return False - return True - - -def tree(dir_: PathLike, file_out: PathLike): - """Creates a file (file_out) with a visual tree representing the file - hierarchy at a given directory - - .. note:: - Does not display empty directories. - - """ - from pathlib import Path - - if not dir_.is_dir(): - raise FileNotFoundError(f"No directory found at {dir_}.") - - file_content = "" - - for path in sorted(Path(dir_).rglob("*")): - if path.is_dir() and not any(path.iterdir()): - continue - depth = len(path.relative_to(dir_).parts) - spacer = " " * depth - file_content = file_content + f"{spacer}+ {path.name}\n" - - Path(file_out).write_text(file_content) - - -def compare_folders(outdir: PathLike, refdir: PathLike, tmp_path: PathLike) -> bool: - """ - Compares the file hierarchy of two folders. - - Args: - outdir: path to the first fodler. - refdir: path to the second folder. - tmp_path: path to a temporary folder. - """ - - from filecmp import cmp - from pathlib import PurePath - - file_out = PurePath(tmp_path) / "file_out.txt" - file_ref = PurePath(tmp_path) / "file_ref.txt" - tree(outdir, file_out) - tree(refdir, file_ref) - if not cmp(file_out, file_ref): - with open(file_out, "r") as fin: - out_message = fin.read() - with open(file_ref, "r") as fin: - ref_message = fin.read() - raise ValueError( - "Comparison of out and ref directories shows mismatch :\n " - "OUT :\n" + out_message + "\n REF :\n" + ref_message - ) - return True - - -def compare_folder_with_files(folder: str, file_list: List[str]) -> bool: - """Compare file existing in two folders - Args: - folder: path to a folder - file_list: list of files which must be found in folder - Returns: - True if files in file_list were all found in folder. - """ - import os - - folder_list = [] - for root, dirs, files in os.walk(folder): - folder_list.extend(files) - - print(f"Missing files {set(file_list) - set(folder_list)}") - return set(file_list).issubset(set(folder_list)) - - -def clean_folder(path, recreate=True): - from os import makedirs - from os.path import abspath, exists - from shutil import rmtree - - abs_path = abspath(path) - if exists(abs_path): - rmtree(abs_path) - if recreate: - makedirs(abs_path) - - -def modify_maps( - maps: Dict[str, Any], - base_dir: Path, - no_gpu: bool = False, - adapt_base_dir: bool = False, - modify_split: bool = False, - ssda: bool = False, -) -> Dict[str, Any]: - """ - Modifies a MAPS dictionary if the user passed --no-gpu or --adapt-base-dir flags. - - Parameters - ---------- - maps : Dict[str, Any] - The MAPS dictionary. - base_dir : Path - The base directory, where CI data are stored. - no_gpu : bool (optional, default=False) - Whether the user activated the --no-gpu flag. - adapt_base_dir : bool (optional, default=False) - Whether the user activated the --adapt-base-dir flag. - - Returns - ------- - Dict[str, Any] - The modified MAPS dictionary. - """ - if no_gpu: - maps["gpu"] = False - if adapt_base_dir: - base_dir = base_dir.resolve() - ref_base_dir = Path(maps["caps_directory"]).parents[2] - maps["caps_directory"] = str( - base_dir / Path(maps["caps_directory"]).relative_to(ref_base_dir) - ) - try: - maps["tsv_path"] = str( - base_dir / Path(maps["tsv_path"]).relative_to(ref_base_dir) - ) - except KeyError: # maps with only caps directory - pass - - if modify_split: - maps["split"] = (0,) - - if ssda: - maps["ssda_network"] = False - return maps - - -def modify_toml( - toml_path: Path, - base_dir: Path, - no_gpu: bool = False, - adapt_base_dir: bool = False, -) -> None: - """ - Modifies a TOML file if the user passed --no-gpu or --adapt-base-dir flags. - - Parameters - ---------- - toml_path : Path - The path of the TOML file. - base_dir : Path - The base directory, where CI data are stored. - no_gpu : bool (optional, default=False) - Whether the user activated the --no-gpu flag. - adapt_base_dir : bool (optional, default=False) - Whether the user activated the --adapt-base-dir flag. - """ - import toml - - config = toml.load(toml_path) - if no_gpu: - try: - config["Computational"]["gpu"] = False - except KeyError: - config["Computational"] = {"gpu": False} - if adapt_base_dir: - random_search_config = config["Random_Search"] - base_dir = base_dir.resolve() - ref_base_dir = Path(random_search_config["caps_directory"]).parents[2] - random_search_config["caps_directory"] = str( - base_dir - / Path(random_search_config["caps_directory"]).relative_to(ref_base_dir) - ) - random_search_config["tsv_path"] = str( - base_dir / Path(random_search_config["tsv_path"]).relative_to(ref_base_dir) - ) - f = open(toml_path, "w") - toml.dump(config, f) - f.close() diff --git a/tests/unittests/monai_metrics/config/test_classification.py b/tests/unittests/monai_metrics/config/test_classification.py index e4192a254..16941099d 100644 --- a/tests/unittests/monai_metrics/config/test_classification.py +++ b/tests/unittests/monai_metrics/config/test_classification.py @@ -1,11 +1,11 @@ import pytest from pydantic import ValidationError -from clinicadl.monai_metrics.config.classification import ( +from clinicadl.metrics.config.classification import ( ROCAUCConfig, create_confusion_matrix_config, ) -from clinicadl.monai_metrics.config.enum import ConfusionMatrixMetric +from clinicadl.metrics.config.enum import ConfusionMatrixMetric # ROCAUC diff --git a/tests/unittests/monai_metrics/config/test_factory.py b/tests/unittests/monai_metrics/config/test_factory.py index 5eed6f459..97be7453d 100644 --- a/tests/unittests/monai_metrics/config/test_factory.py +++ b/tests/unittests/monai_metrics/config/test_factory.py @@ -1,6 +1,6 @@ import pytest -from clinicadl.monai_metrics.config import ImplementedMetrics, create_metric_config +from clinicadl.metrics.config import ImplementedMetrics, create_metric_config def test_create_training_config(): diff --git a/tests/unittests/monai_metrics/config/test_generation.py b/tests/unittests/monai_metrics/config/test_generation.py index 4e1691567..1c55fb221 100644 --- a/tests/unittests/monai_metrics/config/test_generation.py +++ b/tests/unittests/monai_metrics/config/test_generation.py @@ -1,7 +1,7 @@ import pytest from pydantic import ValidationError -from clinicadl.monai_metrics.config.generation import MMDMetricConfig +from clinicadl.metrics.config.generation import MMDMetricConfig def test_fails_validation(): diff --git a/tests/unittests/monai_metrics/config/test_reconstruction.py b/tests/unittests/monai_metrics/config/test_reconstruction.py index 521c2717e..4ade04f5c 100644 --- a/tests/unittests/monai_metrics/config/test_reconstruction.py +++ b/tests/unittests/monai_metrics/config/test_reconstruction.py @@ -1,7 +1,7 @@ import pytest from pydantic import ValidationError -from clinicadl.monai_metrics.config.reconstruction import ( +from clinicadl.metrics.config.reconstruction import ( MultiScaleSSIMConfig, PSNRConfig, SSIMConfig, diff --git a/tests/unittests/monai_metrics/config/test_regression.py b/tests/unittests/monai_metrics/config/test_regression.py index f95f20b6a..7c4407e30 100644 --- a/tests/unittests/monai_metrics/config/test_regression.py +++ b/tests/unittests/monai_metrics/config/test_regression.py @@ -1,7 +1,7 @@ import pytest from pydantic import ValidationError -from clinicadl.monai_metrics.config.regression import ( +from clinicadl.metrics.config.regression import ( MAEConfig, MSEConfig, RMSEConfig, diff --git a/tests/unittests/monai_metrics/config/test_segmentation.py b/tests/unittests/monai_metrics/config/test_segmentation.py index 537f289c9..52fa8c501 100644 --- a/tests/unittests/monai_metrics/config/test_segmentation.py +++ b/tests/unittests/monai_metrics/config/test_segmentation.py @@ -1,7 +1,7 @@ import pytest from pydantic import ValidationError -from clinicadl.monai_metrics.config.segmentation import ( +from clinicadl.metrics.config.segmentation import ( DiceConfig, GeneralizedDiceConfig, HausdorffDistanceConfig, diff --git a/tests/unittests/monai_metrics/test_factory.py b/tests/unittests/monai_metrics/test_factory.py index 5d265e416..3896c6bbc 100644 --- a/tests/unittests/monai_metrics/test_factory.py +++ b/tests/unittests/monai_metrics/test_factory.py @@ -6,8 +6,8 @@ def test_get_metric(): from monai.metrics import SSIMMetric - from clinicadl.monai_metrics import get_metric - from clinicadl.monai_metrics.config import ImplementedMetrics, create_metric_config + from clinicadl.metrics import get_metric + from clinicadl.metrics.config import ImplementedMetrics, create_metric_config for metric_name in [e.value for e in ImplementedMetrics if e != "Loss"]: if ( @@ -53,7 +53,7 @@ def loss_fn_bis(y_pred: Tensor) -> Tensor: def test_loss_to_metric(): from torch import randn - from clinicadl.monai_metrics import loss_to_metric + from clinicadl.metrics import loss_to_metric y_pred = randn(10, 5, 5) y_true = randn(10, 5, 5) diff --git a/tests/unittests/monai_networks/config/test_config.py b/tests/unittests/monai_networks/config/test_config.py index 9da6756f5..f4ef7c65e 100644 --- a/tests/unittests/monai_networks/config/test_config.py +++ b/tests/unittests/monai_networks/config/test_config.py @@ -1,24 +1,24 @@ import pytest -from clinicadl.monai_networks.config.densenet import ( +from clinicadl.networks.config.densenet import ( DenseNet121Config, DenseNet161Config, DenseNet169Config, DenseNet201Config, ) -from clinicadl.monai_networks.config.resnet import ( +from clinicadl.networks.config.resnet import ( ResNet18Config, ResNet34Config, ResNet50Config, ResNet101Config, ResNet152Config, ) -from clinicadl.monai_networks.config.senet import ( +from clinicadl.networks.config.senet import ( SEResNet50Config, SEResNet101Config, SEResNet152Config, ) -from clinicadl.monai_networks.config.vit import ( +from clinicadl.networks.config.vit import ( ViTB16Config, ViTB32Config, ViTL16Config, @@ -77,7 +77,7 @@ def test_sota_vit_config(config_class): def test_autoencoder_config(): - from clinicadl.monai_networks.config.autoencoder import AutoEncoderConfig + from clinicadl.networks.config.autoencoder import AutoEncoderConfig config = AutoEncoderConfig( in_shape=(1, 10, 10), @@ -92,7 +92,7 @@ def test_autoencoder_config(): def test_vae_config(): - from clinicadl.monai_networks.config.autoencoder import VAEConfig + from clinicadl.networks.config.autoencoder import VAEConfig config = VAEConfig( in_shape=(1, 10), @@ -107,7 +107,7 @@ def test_vae_config(): def test_cnn_config(): - from clinicadl.monai_networks.config.cnn import CNNConfig + from clinicadl.networks.config.cnn import CNNConfig config = CNNConfig( in_shape=(2, 10, 10, 10), num_outputs=1, conv_args={"channels": [1]} @@ -118,7 +118,7 @@ def test_cnn_config(): def test_generator_config(): - from clinicadl.monai_networks.config.generator import GeneratorConfig + from clinicadl.networks.config.generator import GeneratorConfig config = GeneratorConfig( start_shape=(2, 10, 10), latent_size=2, conv_args={"channels": [1]} @@ -129,7 +129,7 @@ def test_generator_config(): def test_conv_decoder_config(): - from clinicadl.monai_networks.config.conv_decoder import ConvDecoderConfig + from clinicadl.networks.config.conv_decoder import ConvDecoderConfig config = ConvDecoderConfig( in_channels=1, spatial_dims=2, channels=[1, 2], kernel_size=(3, 4) @@ -140,7 +140,7 @@ def test_conv_decoder_config(): def test_conv_encoder_config(): - from clinicadl.monai_networks.config.conv_encoder import ConvEncoderConfig + from clinicadl.networks.config.conv_encoder import ConvEncoderConfig config = ConvEncoderConfig( in_channels=1, spatial_dims=2, channels=[1, 2], kernel_size=[(3, 4), (4, 5)] @@ -151,7 +151,7 @@ def test_conv_encoder_config(): def test_mlp_config(): - from clinicadl.monai_networks.config.mlp import MLPConfig + from clinicadl.networks.config.mlp import MLPConfig config = MLPConfig( in_channels=1, out_channels=1, hidden_channels=[2, 3], dropout=0.1 @@ -162,7 +162,7 @@ def test_mlp_config(): def test_resnet_config(): - from clinicadl.monai_networks.config.resnet import ResNetConfig + from clinicadl.networks.config.resnet import ResNetConfig config = ResNetConfig( spatial_dims=1, in_channels=1, num_outputs=None, block_type="bottleneck" @@ -173,7 +173,7 @@ def test_resnet_config(): def test_seresnet_config(): - from clinicadl.monai_networks.config.senet import SEResNetConfig + from clinicadl.networks.config.senet import SEResNetConfig config = SEResNetConfig( spatial_dims=1, @@ -189,7 +189,7 @@ def test_seresnet_config(): def test_densenet_config(): - from clinicadl.monai_networks.config.densenet import DenseNetConfig + from clinicadl.networks.config.densenet import DenseNetConfig config = DenseNetConfig( spatial_dims=1, in_channels=1, num_outputs=2, n_dense_layers=(1, 2) @@ -200,7 +200,7 @@ def test_densenet_config(): def test_vit_config(): - from clinicadl.monai_networks.config.vit import ViTConfig + from clinicadl.networks.config.vit import ViTConfig config = ViTConfig(in_shape=(1, 10), patch_size=2, num_outputs=1, embedding_dim=42) assert config.num_outputs == 1 @@ -209,7 +209,7 @@ def test_vit_config(): def test_unet_config(): - from clinicadl.monai_networks.config.unet import UNetConfig + from clinicadl.networks.config.unet import UNetConfig config = UNetConfig(spatial_dims=1, in_channels=1, out_channels=1, channels=(4, 8)) assert config.out_channels == 1 @@ -218,7 +218,7 @@ def test_unet_config(): def test_att_unet_config(): - from clinicadl.monai_networks.config.unet import AttentionUNetConfig + from clinicadl.networks.config.unet import AttentionUNetConfig config = AttentionUNetConfig( spatial_dims=1, diff --git a/tests/unittests/monai_networks/config/test_factory.py b/tests/unittests/monai_networks/config/test_factory.py index 9dcd7fdc1..3f91b52e1 100644 --- a/tests/unittests/monai_networks/config/test_factory.py +++ b/tests/unittests/monai_networks/config/test_factory.py @@ -1,4 +1,4 @@ -from clinicadl.monai_networks.config import ImplementedNetworks, create_network_config +from clinicadl.networks.config import ImplementedNetworks, create_network_config def test_create_training_config(): diff --git a/tests/unittests/monai_networks/nn/test_att_unet.py b/tests/unittests/monai_networks/nn/test_att_unet.py index 711f11142..6f5786828 100644 --- a/tests/unittests/monai_networks/nn/test_att_unet.py +++ b/tests/unittests/monai_networks/nn/test_att_unet.py @@ -1,8 +1,8 @@ import pytest import torch -from clinicadl.monai_networks.nn import AttentionUNet -from clinicadl.monai_networks.nn.layers.utils import ActFunction +from clinicadl.networks.nn import AttentionUNet +from clinicadl.networks.nn.layers.utils import ActFunction INPUT_1D = torch.randn(2, 1, 16) INPUT_2D = torch.randn(2, 2, 32, 64) diff --git a/tests/unittests/monai_networks/nn/test_autoencoder.py b/tests/unittests/monai_networks/nn/test_autoencoder.py index c59874353..c3d54b458 100644 --- a/tests/unittests/monai_networks/nn/test_autoencoder.py +++ b/tests/unittests/monai_networks/nn/test_autoencoder.py @@ -2,8 +2,8 @@ import torch from torch.nn import GELU, Sigmoid, Tanh -from clinicadl.monai_networks.nn import AutoEncoder -from clinicadl.monai_networks.nn.layers.utils import ActFunction +from clinicadl.networks.nn import AutoEncoder +from clinicadl.networks.nn.layers.utils import ActFunction @pytest.mark.parametrize( diff --git a/tests/unittests/monai_networks/nn/test_cnn.py b/tests/unittests/monai_networks/nn/test_cnn.py index 095c8da5d..a1c2d5585 100644 --- a/tests/unittests/monai_networks/nn/test_cnn.py +++ b/tests/unittests/monai_networks/nn/test_cnn.py @@ -2,7 +2,7 @@ import torch from torch.nn import Flatten, Linear, Softmax -from clinicadl.monai_networks.nn import CNN, MLP, ConvEncoder +from clinicadl.networks.nn import CNN, MLP, ConvEncoder INPUT_1D = torch.randn(3, 1, 16) INPUT_2D = torch.randn(3, 1, 15, 16) diff --git a/tests/unittests/monai_networks/nn/test_conv_decoder.py b/tests/unittests/monai_networks/nn/test_conv_decoder.py index 73576918e..44b0a76c2 100644 --- a/tests/unittests/monai_networks/nn/test_conv_decoder.py +++ b/tests/unittests/monai_networks/nn/test_conv_decoder.py @@ -2,8 +2,8 @@ import torch from torch.nn import ELU, ConvTranspose2d, Dropout, InstanceNorm2d, Upsample -from clinicadl.monai_networks.nn import ConvDecoder -from clinicadl.monai_networks.nn.layers.utils import ActFunction +from clinicadl.networks.nn import ConvDecoder +from clinicadl.networks.nn.layers.utils import ActFunction @pytest.fixture diff --git a/tests/unittests/monai_networks/nn/test_conv_encoder.py b/tests/unittests/monai_networks/nn/test_conv_encoder.py index 3a21859b8..7239a7530 100644 --- a/tests/unittests/monai_networks/nn/test_conv_encoder.py +++ b/tests/unittests/monai_networks/nn/test_conv_encoder.py @@ -11,8 +11,8 @@ MaxPool2d, ) -from clinicadl.monai_networks.nn import ConvEncoder -from clinicadl.monai_networks.nn.layers.utils import ActFunction +from clinicadl.networks.nn import ConvEncoder +from clinicadl.networks.nn.layers.utils import ActFunction @pytest.fixture diff --git a/tests/unittests/monai_networks/nn/test_densenet.py b/tests/unittests/monai_networks/nn/test_densenet.py index 303a22a6e..b7fdea50f 100644 --- a/tests/unittests/monai_networks/nn/test_densenet.py +++ b/tests/unittests/monai_networks/nn/test_densenet.py @@ -1,9 +1,9 @@ import pytest import torch -from clinicadl.monai_networks.nn import DenseNet, get_densenet -from clinicadl.monai_networks.nn.densenet import SOTADenseNet -from clinicadl.monai_networks.nn.layers.utils import ActFunction +from clinicadl.networks.nn import DenseNet, get_densenet +from clinicadl.networks.nn.densenet import SOTADenseNet +from clinicadl.networks.nn.layers.utils import ActFunction INPUT_1D = torch.randn(3, 1, 16) INPUT_2D = torch.randn(3, 2, 15, 16) diff --git a/tests/unittests/monai_networks/nn/test_generator.py b/tests/unittests/monai_networks/nn/test_generator.py index 1fa4fffa7..0bc918a7d 100644 --- a/tests/unittests/monai_networks/nn/test_generator.py +++ b/tests/unittests/monai_networks/nn/test_generator.py @@ -2,7 +2,7 @@ import torch from torch.nn import Flatten, Linear -from clinicadl.monai_networks.nn import MLP, ConvDecoder, Generator +from clinicadl.networks.nn import MLP, ConvDecoder, Generator @pytest.fixture diff --git a/tests/unittests/monai_networks/nn/test_mlp.py b/tests/unittests/monai_networks/nn/test_mlp.py index 5eb3105a8..91ad682d1 100644 --- a/tests/unittests/monai_networks/nn/test_mlp.py +++ b/tests/unittests/monai_networks/nn/test_mlp.py @@ -2,8 +2,8 @@ import torch from torch.nn import ELU, Dropout, InstanceNorm1d, Linear -from clinicadl.monai_networks.nn import MLP -from clinicadl.monai_networks.nn.layers.utils import ActFunction +from clinicadl.networks.nn import MLP +from clinicadl.networks.nn.layers.utils import ActFunction @pytest.fixture diff --git a/tests/unittests/monai_networks/nn/test_resnet.py b/tests/unittests/monai_networks/nn/test_resnet.py index 20ed028d0..a99ea06dc 100644 --- a/tests/unittests/monai_networks/nn/test_resnet.py +++ b/tests/unittests/monai_networks/nn/test_resnet.py @@ -1,10 +1,10 @@ import pytest import torch -from clinicadl.monai_networks.nn import ResNet, get_resnet -from clinicadl.monai_networks.nn.layers.resnet import ResNetBlock, ResNetBottleneck -from clinicadl.monai_networks.nn.layers.utils import ActFunction -from clinicadl.monai_networks.nn.resnet import SOTAResNet +from clinicadl.networks.nn import ResNet, get_resnet +from clinicadl.networks.nn.layers.resnet import ResNetBlock, ResNetBottleneck +from clinicadl.networks.nn.layers.utils import ActFunction +from clinicadl.networks.nn.resnet import SOTAResNet INPUT_1D = torch.randn(3, 1, 16) INPUT_2D = torch.randn(3, 2, 15, 16) diff --git a/tests/unittests/monai_networks/nn/test_senet.py b/tests/unittests/monai_networks/nn/test_senet.py index b46eb663a..6e3527b38 100644 --- a/tests/unittests/monai_networks/nn/test_senet.py +++ b/tests/unittests/monai_networks/nn/test_senet.py @@ -1,10 +1,10 @@ import pytest import torch -from clinicadl.monai_networks.nn import SEResNet, get_seresnet -from clinicadl.monai_networks.nn.layers.senet import SEResNetBlock, SEResNetBottleneck -from clinicadl.monai_networks.nn.layers.utils import ActFunction -from clinicadl.monai_networks.nn.senet import SOTAResNet +from clinicadl.networks.nn import SEResNet, get_seresnet +from clinicadl.networks.nn.layers.senet import SEResNetBlock, SEResNetBottleneck +from clinicadl.networks.nn.layers.utils import ActFunction +from clinicadl.networks.nn.senet import SOTAResNet INPUT_1D = torch.randn(3, 1, 16) INPUT_2D = torch.randn(3, 2, 15, 16) diff --git a/tests/unittests/monai_networks/nn/test_unet.py b/tests/unittests/monai_networks/nn/test_unet.py index b8f06faa8..b7f6349fc 100644 --- a/tests/unittests/monai_networks/nn/test_unet.py +++ b/tests/unittests/monai_networks/nn/test_unet.py @@ -1,8 +1,8 @@ import pytest import torch -from clinicadl.monai_networks.nn import UNet -from clinicadl.monai_networks.nn.layers.utils import ActFunction +from clinicadl.networks.nn import UNet +from clinicadl.networks.nn.layers.utils import ActFunction INPUT_1D = torch.randn(2, 1, 16) INPUT_2D = torch.randn(2, 2, 32, 64) diff --git a/tests/unittests/monai_networks/nn/test_vae.py b/tests/unittests/monai_networks/nn/test_vae.py index ca2fb24b8..6f2d0f279 100644 --- a/tests/unittests/monai_networks/nn/test_vae.py +++ b/tests/unittests/monai_networks/nn/test_vae.py @@ -3,7 +3,7 @@ from numpy import isclose from torch.nn import ReLU -from clinicadl.monai_networks.nn import VAE +from clinicadl.networks.nn import VAE @pytest.mark.parametrize( diff --git a/tests/unittests/monai_networks/nn/test_vit.py b/tests/unittests/monai_networks/nn/test_vit.py index 741d6e5f8..b8b5938b8 100644 --- a/tests/unittests/monai_networks/nn/test_vit.py +++ b/tests/unittests/monai_networks/nn/test_vit.py @@ -2,9 +2,9 @@ import pytest import torch -from clinicadl.monai_networks.nn import ViT, get_vit -from clinicadl.monai_networks.nn.layers.utils import ActFunction -from clinicadl.monai_networks.nn.vit import SOTAViT +from clinicadl.networks.nn import ViT, get_vit +from clinicadl.networks.nn.layers.utils import ActFunction +from clinicadl.networks.nn.vit import SOTAViT INPUT_1D = torch.randn(2, 1, 16) INPUT_2D = torch.randn(2, 2, 15, 16) diff --git a/tests/unittests/monai_networks/nn/utils/test_checks.py b/tests/unittests/monai_networks/nn/utils/test_checks.py index 27cc234f5..b90368662 100644 --- a/tests/unittests/monai_networks/nn/utils/test_checks.py +++ b/tests/unittests/monai_networks/nn/utils/test_checks.py @@ -1,6 +1,6 @@ import pytest -from clinicadl.monai_networks.nn.utils.checks import ( +from clinicadl.networks.nn.utils.checks import ( _check_conv_parameter, check_adn_ordering, check_conv_args, diff --git a/tests/unittests/monai_networks/nn/utils/test_shapes.py b/tests/unittests/monai_networks/nn/utils/test_shapes.py index b7ae2d444..a0116ffe8 100644 --- a/tests/unittests/monai_networks/nn/utils/test_shapes.py +++ b/tests/unittests/monai_networks/nn/utils/test_shapes.py @@ -1,7 +1,7 @@ import pytest import torch -from clinicadl.monai_networks.nn.utils.shapes import ( +from clinicadl.networks.nn.utils.shapes import ( _calculate_adaptivepool_out_shape, _calculate_avgpool_out_shape, _calculate_maxpool_out_shape, diff --git a/tests/unittests/monai_networks/test_factory.py b/tests/unittests/monai_networks/test_factory.py index 961238111..d52f67871 100644 --- a/tests/unittests/monai_networks/test_factory.py +++ b/tests/unittests/monai_networks/test_factory.py @@ -1,13 +1,13 @@ import pytest -from clinicadl.monai_networks import ( +from clinicadl.networks import ( ImplementedNetworks, get_network, get_network_from_config, ) -from clinicadl.monai_networks.config.autoencoder import AutoEncoderConfig -from clinicadl.monai_networks.factory import _update_config_with_defaults -from clinicadl.monai_networks.nn import AutoEncoder +from clinicadl.networks.config.autoencoder import AutoEncoderConfig +from clinicadl.networks.factory import _update_config_with_defaults +from clinicadl.networks.nn import AutoEncoder tested = [] diff --git a/tests/unittests/nn/blocks/test_decoder.py b/tests/unittests/nn/blocks/test_decoder.py index 01bf7aef1..38a2a9e28 100644 --- a/tests/unittests/nn/blocks/test_decoder.py +++ b/tests/unittests/nn/blocks/test_decoder.py @@ -1,7 +1,7 @@ import pytest import torch -import clinicadl.nn.blocks.decoder as decoder +import clinicadl.networks.old_network.nn.blocks.decoder as decoder @pytest.fixture diff --git a/tests/unittests/nn/blocks/test_encoder.py b/tests/unittests/nn/blocks/test_encoder.py index dcb676f96..9149b731a 100644 --- a/tests/unittests/nn/blocks/test_encoder.py +++ b/tests/unittests/nn/blocks/test_encoder.py @@ -1,7 +1,7 @@ import pytest import torch -import clinicadl.nn.blocks.encoder as encoder +import clinicadl.networks.old_network.nn.blocks.encoder as encoder @pytest.fixture diff --git a/tests/unittests/nn/blocks/test_residual.py b/tests/unittests/nn/blocks/test_residual.py index 302051ee3..7db9800ed 100644 --- a/tests/unittests/nn/blocks/test_residual.py +++ b/tests/unittests/nn/blocks/test_residual.py @@ -1,6 +1,6 @@ import torch -from clinicadl.nn.blocks import ResBlock +from clinicadl.networks.old_network.nn.blocks import ResBlock def test_resblock(): diff --git a/tests/unittests/nn/blocks/test_se.py b/tests/unittests/nn/blocks/test_se.py index 2444bcc3a..fba558ade 100644 --- a/tests/unittests/nn/blocks/test_se.py +++ b/tests/unittests/nn/blocks/test_se.py @@ -8,7 +8,7 @@ def input_3d(): def test_SE_Block(input_3d): - from clinicadl.nn.blocks import SE_Block + from clinicadl.networks.old_network.nn.blocks import SE_Block layer = SE_Block(num_channels=input_3d.shape[1], ratio_channel=4) out = layer(input_3d) @@ -16,7 +16,7 @@ def test_SE_Block(input_3d): def test_ResBlock_SE(input_3d): - from clinicadl.nn.blocks import ResBlock_SE + from clinicadl.networks.old_network.nn.blocks import ResBlock_SE layer = ResBlock_SE( num_channels=input_3d.shape[1], diff --git a/tests/unittests/nn/blocks/test_unet.py b/tests/unittests/nn/blocks/test_unet.py index 4e7170d77..e1f11ab5d 100644 --- a/tests/unittests/nn/blocks/test_unet.py +++ b/tests/unittests/nn/blocks/test_unet.py @@ -13,7 +13,7 @@ def skip_input(): def test_UNetDown(input_3d): - from clinicadl.nn.blocks import UNetDown + from clinicadl.networks.old_network.nn.blocks import UNetDown layer = UNetDown(in_size=input_3d.shape[1], out_size=8) out = layer(input_3d) @@ -21,7 +21,7 @@ def test_UNetDown(input_3d): def test_UNetUp(input_3d, skip_input): - from clinicadl.nn.blocks import UNetUp + from clinicadl.networks.old_network.nn.blocks import UNetUp layer = UNetUp(in_size=input_3d.shape[1] * 2, out_size=2) out = layer(input_3d, skip_input=skip_input) @@ -29,7 +29,7 @@ def test_UNetUp(input_3d, skip_input): def test_UNetFinalLayer(input_3d, skip_input): - from clinicadl.nn.blocks import UNetFinalLayer + from clinicadl.networks.old_network.nn.blocks import UNetFinalLayer layer = UNetFinalLayer(in_size=input_3d.shape[1] * 2, out_size=2) out = layer(input_3d, skip_input=skip_input) diff --git a/tests/unittests/nn/layers/factory/test_factories.py b/tests/unittests/nn/layers/factory/test_factories.py index 7036cc724..0c1af2da5 100644 --- a/tests/unittests/nn/layers/factory/test_factories.py +++ b/tests/unittests/nn/layers/factory/test_factories.py @@ -3,7 +3,7 @@ def test_get_conv_layer(): - from clinicadl.nn.layers.factory import get_conv_layer + from clinicadl.networks.old_network.nn.layers.factory import get_conv_layer assert get_conv_layer(2) == nn.Conv2d assert get_conv_layer(3) == nn.Conv3d @@ -12,7 +12,7 @@ def test_get_conv_layer(): def test_get_norm_layer(): - from clinicadl.nn.layers.factory import get_norm_layer + from clinicadl.networks.old_network.nn.layers.factory import get_norm_layer assert get_norm_layer("InstanceNorm", 2) == nn.InstanceNorm2d assert get_norm_layer("BatchNorm", 3) == nn.BatchNorm3d @@ -20,8 +20,8 @@ def test_get_norm_layer(): def test_get_pool_layer(): - from clinicadl.nn.layers import PadMaxPool3d - from clinicadl.nn.layers.factory import get_pool_layer + from clinicadl.networks.old_network.nn.layers import PadMaxPool3d + from clinicadl.networks.old_network.nn.layers.factory import get_pool_layer assert get_pool_layer("MaxPool", 2) == nn.MaxPool2d assert get_pool_layer("PadMaxPool", 3) == PadMaxPool3d diff --git a/tests/unittests/nn/layers/test_layers.py b/tests/unittests/nn/layers/test_layers.py index e07eb1cf6..633beb423 100644 --- a/tests/unittests/nn/layers/test_layers.py +++ b/tests/unittests/nn/layers/test_layers.py @@ -1,7 +1,7 @@ import pytest import torch -import clinicadl.nn.layers as layers +import clinicadl.networks.old_network.nn.layers as layers @pytest.fixture diff --git a/tests/unittests/nn/networks/factory/test_ae_factory.py b/tests/unittests/nn/networks/factory/test_ae_factory.py index a4fe1a762..8f997b874 100644 --- a/tests/unittests/nn/networks/factory/test_ae_factory.py +++ b/tests/unittests/nn/networks/factory/test_ae_factory.py @@ -2,7 +2,7 @@ import torch import torch.nn as nn -from clinicadl.nn.layers import ( +from clinicadl.networks.old_network.nn.layers import ( PadMaxPool2d, PadMaxPool3d, ) @@ -58,8 +58,8 @@ def __init__(self, input_size): @pytest.mark.parametrize("input, cnn", [("input_3d", "cnn3d"), ("input_2d", "cnn2d")]) def test_autoencoder_from_cnn(input, cnn, request): - from clinicadl.nn.networks.ae import AE - from clinicadl.nn.networks.factory import autoencoder_from_cnn + from clinicadl.networks.old_network.nn.networks.ae import AE + from clinicadl.networks.old_network.nn.networks.factory import autoencoder_from_cnn input_ = request.getfixturevalue(input) cnn = request.getfixturevalue(cnn)(input_size=input_.shape[1:]) diff --git a/tests/unittests/nn/networks/factory/test_resnet_factory.py b/tests/unittests/nn/networks/factory/test_resnet_factory.py index 1468d37ad..ed2d17610 100644 --- a/tests/unittests/nn/networks/factory/test_resnet_factory.py +++ b/tests/unittests/nn/networks/factory/test_resnet_factory.py @@ -5,7 +5,7 @@ def test_ResNetDesigner(): from torchvision.models.resnet import BasicBlock - from clinicadl.nn.networks.factory import ResNetDesigner + from clinicadl.networks.old_network.nn.networks.factory import ResNetDesigner input_ = torch.randn(2, 3, 100, 100) @@ -43,7 +43,7 @@ def forward(self, x): def test_ResNetDesigner3D(): - from clinicadl.nn.networks.factory import ResNetDesigner3D + from clinicadl.networks.old_network.nn.networks.factory import ResNetDesigner3D input_ = torch.randn(2, 3, 100, 100, 100) diff --git a/tests/unittests/nn/networks/factory/test_secnn_factory.py b/tests/unittests/nn/networks/factory/test_secnn_factory.py index 96be92620..c5650dfc7 100644 --- a/tests/unittests/nn/networks/factory/test_secnn_factory.py +++ b/tests/unittests/nn/networks/factory/test_secnn_factory.py @@ -3,7 +3,7 @@ def test_SECNNDesigner3D(): - from clinicadl.nn.networks.factory import SECNNDesigner3D + from clinicadl.networks.old_network.nn.networks.factory import SECNNDesigner3D input_ = torch.randn(2, 3, 100, 100, 100) diff --git a/tests/unittests/nn/networks/test_ae.py b/tests/unittests/nn/networks/test_ae.py index 9c6152d35..0f86ad24d 100644 --- a/tests/unittests/nn/networks/test_ae.py +++ b/tests/unittests/nn/networks/test_ae.py @@ -1,7 +1,7 @@ import pytest import torch -import clinicadl.nn.networks.ae as ae +import clinicadl.networks.old_network.nn.networks.ae as ae @pytest.mark.parametrize("network", [net.value for net in ae.AE2d]) diff --git a/tests/unittests/nn/networks/test_cnn.py b/tests/unittests/nn/networks/test_cnn.py index 3f6a0cb87..b09ff7bbf 100644 --- a/tests/unittests/nn/networks/test_cnn.py +++ b/tests/unittests/nn/networks/test_cnn.py @@ -1,7 +1,7 @@ import pytest import torch -import clinicadl.nn.networks.cnn as cnn +import clinicadl.networks.old_network.nn.networks.cnn as cnn @pytest.fixture diff --git a/tests/unittests/nn/networks/test_unet.py b/tests/unittests/nn/networks/test_unet.py index ba0408cdb..4279205b8 100644 --- a/tests/unittests/nn/networks/test_unet.py +++ b/tests/unittests/nn/networks/test_unet.py @@ -1,6 +1,6 @@ import torch -from clinicadl.nn.networks.unet import UNet +from clinicadl.networks.old_network.nn.networks.unet import UNet def test_UNet(): diff --git a/tests/unittests/nn/networks/test_vae.py b/tests/unittests/nn/networks/test_vae.py index 308a2f185..890b0eacc 100644 --- a/tests/unittests/nn/networks/test_vae.py +++ b/tests/unittests/nn/networks/test_vae.py @@ -1,7 +1,7 @@ import pytest import torch -import clinicadl.nn.networks.vae as vae +import clinicadl.networks.old_network.nn.networks.vae as vae @pytest.fixture diff --git a/tests/unittests/nn/test_utils.py b/tests/unittests/nn/test_utils.py index bcd379613..f70b8c518 100644 --- a/tests/unittests/nn/test_utils.py +++ b/tests/unittests/nn/test_utils.py @@ -3,7 +3,7 @@ def test_compute_output_size(): - from clinicadl.nn.utils import compute_output_size + from clinicadl.networks.old_network.nn.utils import compute_output_size input_2d = torch.randn(3, 2, 100, 100) input_3d = torch.randn(3, 1, 100, 100, 100) diff --git a/tests/unittests/optim/early_stopping/test_config.py b/tests/unittests/optim/early_stopping/test_config.py index 574887a6f..4c12d8208 100644 --- a/tests/unittests/optim/early_stopping/test_config.py +++ b/tests/unittests/optim/early_stopping/test_config.py @@ -1,7 +1,7 @@ import pytest from pydantic import ValidationError -from clinicadl.optim.early_stopping import EarlyStoppingConfig +from clinicadl.optimization.early_stopping import EarlyStoppingConfig def test_EarlyStoppingConfig(): diff --git a/tests/unittests/optim/early_stopping/test_early_stopper.py b/tests/unittests/optim/early_stopping/test_early_stopper.py index 13f0d9f9c..91bbd7e09 100644 --- a/tests/unittests/optim/early_stopping/test_early_stopper.py +++ b/tests/unittests/optim/early_stopping/test_early_stopper.py @@ -1,6 +1,6 @@ import numpy as np -from clinicadl.optim.early_stopping import EarlyStopping, EarlyStoppingConfig +from clinicadl.optimization.early_stopping import EarlyStopping, EarlyStoppingConfig def test_EarlyStopping(): diff --git a/tests/unittests/optim/lr_scheduler/test_config.py b/tests/unittests/optim/lr_scheduler/test_config.py index dbf96ccc8..a2233055c 100644 --- a/tests/unittests/optim/lr_scheduler/test_config.py +++ b/tests/unittests/optim/lr_scheduler/test_config.py @@ -1,7 +1,7 @@ import pytest from pydantic import ValidationError -from clinicadl.optim.lr_scheduler.config import ( +from clinicadl.optimization.lr_scheduler.config import ( ConstantLRConfig, LinearLRConfig, MultiStepLRConfig, diff --git a/tests/unittests/optim/lr_scheduler/test_factory.py b/tests/unittests/optim/lr_scheduler/test_factory.py index cffb3d138..559f078ab 100644 --- a/tests/unittests/optim/lr_scheduler/test_factory.py +++ b/tests/unittests/optim/lr_scheduler/test_factory.py @@ -4,7 +4,7 @@ from torch.optim import SGD from torch.optim.lr_scheduler import LambdaLR, ReduceLROnPlateau -from clinicadl.optim.lr_scheduler import ( +from clinicadl.optimization.lr_scheduler import ( ImplementedLRScheduler, create_lr_scheduler_config, get_lr_scheduler, diff --git a/tests/unittests/optim/optimizer/test_config.py b/tests/unittests/optim/optimizer/test_config.py index bf1dbcd8f..7888f4eb8 100644 --- a/tests/unittests/optim/optimizer/test_config.py +++ b/tests/unittests/optim/optimizer/test_config.py @@ -1,7 +1,7 @@ import pytest from pydantic import ValidationError -from clinicadl.optim.optimizer.config import ( +from clinicadl.optimization.optimizer.config import ( AdadeltaConfig, AdagradConfig, AdamConfig, diff --git a/tests/unittests/optim/optimizer/test_factory.py b/tests/unittests/optim/optimizer/test_factory.py index 47b44a00a..12507134c 100644 --- a/tests/unittests/optim/optimizer/test_factory.py +++ b/tests/unittests/optim/optimizer/test_factory.py @@ -5,12 +5,12 @@ import torch.nn as nn from torch.optim import Adagrad -from clinicadl.optim.optimizer import ( +from clinicadl.optimization.optimizer import ( ImplementedOptimizer, create_optimizer_config, get_optimizer, ) -from clinicadl.optim.optimizer.factory import ( +from clinicadl.optimization.optimizer.factory import ( _get_params_in_group, _get_params_not_in_group, _regroup_args, diff --git a/tests/unittests/optim/optimizer/test_utils.py b/tests/unittests/optim/optimizer/test_utils.py index afa06a5d0..ba480af94 100644 --- a/tests/unittests/optim/optimizer/test_utils.py +++ b/tests/unittests/optim/optimizer/test_utils.py @@ -27,7 +27,7 @@ def network(): def test_get_params_in_groups(network): import torch - from clinicadl.optim.optimizer.utils import get_params_in_groups + from clinicadl.optimization.optimizer.utils import get_params_in_groups iterator, list_layers = get_params_in_groups(network, "dense1") assert next(iter(iterator)).shape == torch.Size((10, 10)) @@ -77,7 +77,7 @@ def test_get_params_in_groups(network): def test_find_params_not_in_group(network): import torch - from clinicadl.optim.optimizer.utils import get_params_not_in_groups + from clinicadl.optimization.optimizer.utils import get_params_not_in_groups iterator, list_layers = get_params_not_in_groups( network, diff --git a/tests/unittests/optim/test_config.py b/tests/unittests/optim/test_config.py index 9b980bb84..c74a13265 100644 --- a/tests/unittests/optim/test_config.py +++ b/tests/unittests/optim/test_config.py @@ -1,4 +1,4 @@ -from clinicadl.optim import OptimizationConfig +from clinicadl.optimization import OptimizationConfig def test_OptimizationConfig(): diff --git a/tests/unittests/train/trainer/test_training_config.py b/tests/unittests/train/trainer/test_training_config.py index c6b130cb8..061c64afe 100644 --- a/tests/unittests/train/trainer/test_training_config.py +++ b/tests/unittests/train/trainer/test_training_config.py @@ -3,9 +3,9 @@ import pytest from pydantic import ValidationError -from clinicadl.caps_dataset.data_config import DataConfig -from clinicadl.caps_dataset.dataloader_config import DataLoaderConfig -from clinicadl.network.config import NetworkConfig +from clinicadl.dataset.data_config import DataConfig +from clinicadl.dataset.dataloader_config import DataLoaderConfig +from clinicadl.networks.old_network.config import NetworkConfig from clinicadl.predictor.validation import ValidationConfig from clinicadl.trainer.transfer_learning import TransferLearningConfig from clinicadl.transforms.config import TransformsConfig @@ -116,12 +116,10 @@ def network_task(self) -> str: params=[ {"gpu": "abc"}, {"n_splits": -1}, - {"optimizer": "abc"}, {"data_augmentation": ("abc",)}, {"diagnoses": "AD"}, {"batch_size": 0}, {"size_reduction_factor": 1}, - {"learning_rate": 0.0}, {"split": [-1]}, {"tolerance": -0.01}, ] @@ -135,7 +133,6 @@ def good_inputs(dummy_arguments): options = { "gpu": False, "n_splits": 7, - "optimizer": "Adagrad", "data_augmentation": ("Smoothing",), "diagnoses": ("AD",), "batch_size": 1, @@ -156,12 +153,10 @@ def test_passes_validations(good_inputs, training_config): c = training_config(**good_inputs) assert not c.computational.gpu assert c.split.n_splits == 7 - assert c.optimizer.optimizer == "Adagrad" assert c.transforms.data_augmentation == ("Smoothing",) assert c.data.diagnoses == ("AD",) assert c.dataloader.batch_size == 1 assert c.transforms.size_reduction_factor == 5 - assert c.optimizer.learning_rate == 1e-1 assert c.split.split == (0,) assert c.early_stopping.tolerance == 0.0 diff --git a/tests/unittests/utils/test_clinica_utils.py b/tests/unittests/utils/test_clinica_utils.py index 7b87ceacb..087441ff3 100644 --- a/tests/unittests/utils/test_clinica_utils.py +++ b/tests/unittests/utils/test_clinica_utils.py @@ -21,8 +21,8 @@ def test_pet_linear_nii( tracer, suvr_reference_region, uncropped_image, expected_pattern ): - from clinicadl.caps_dataset.preprocessing.config import PETPreprocessingConfig - from clinicadl.caps_dataset.preprocessing.utils import pet_linear_nii + from clinicadl.dataset.config.preprocessing import PETPreprocessingConfig + from clinicadl.dataset.utils import pet_linear_nii from clinicadl.utils.iotools.clinica_utils import FileType config = PETPreprocessingConfig( From c68eebdf7cf1c7b6d2d7cb85bcf9c473ca4f999a Mon Sep 17 00:00:00 2001 From: camillebrianceau Date: Thu, 7 Nov 2024 09:37:03 +0100 Subject: [PATCH 10/16] API ideas --- clinicadl/API/complicated_case.py | 135 ++++++++++++++++++++++++++++++ clinicadl/API/cross_val.py | 64 ++++++++++++++ clinicadl/API/single_split.py | 89 ++++++++++++++++++++ 3 files changed, 288 insertions(+) create mode 100644 clinicadl/API/complicated_case.py create mode 100644 clinicadl/API/cross_val.py create mode 100644 clinicadl/API/single_split.py diff --git a/clinicadl/API/complicated_case.py b/clinicadl/API/complicated_case.py new file mode 100644 index 000000000..b71862eff --- /dev/null +++ b/clinicadl/API/complicated_case.py @@ -0,0 +1,135 @@ +from pathlib import Path + +import torchio.transforms as transforms + +from clinicadl.dataset.caps_reader import CapsReader +from clinicadl.dataset.concat import ConcatDataset +from clinicadl.dataset.config.extraction import ExtractionConfig +from clinicadl.dataset.config.preprocessing import ( + PreprocessingConfig, + T1PreprocessingConfig, +) +from clinicadl.dataset.old_caps_dataset import ( + CapsDatasetPatch, + CapsDatasetRoi, + CapsDatasetSlice, +) +from clinicadl.experiment_manager.experiment_manager import ExperimentManager +from clinicadl.losses.config import CrossEntropyLossConfig +from clinicadl.losses.factory import get_loss_function +from clinicadl.model.clinicadl_model import ClinicaDLModel +from clinicadl.networks.config import ImplementedNetworks +from clinicadl.networks.factory import ( + ConvEncoderOptions, + create_network_config, + get_network_from_config, +) +from clinicadl.optimization.optimizer.config import AdamConfig, OptimizerConfig +from clinicadl.optimization.optimizer.factory import get_optimizer +from clinicadl.predictor.predictor import Predictor +from clinicadl.splitter.kfold import KFolder +from clinicadl.splitter.split import get_single_split, split_tsv +from clinicadl.trainer.trainer import Trainer +from clinicadl.transforms.config import TransformsConfig + +# Create the Maps Manager / Read/write manager / +maps_path = Path("/") +manager = ExperimentManager( + maps_path, overwrite=False +) # a ajouter dans le manager: mlflow/ profiler/ etc ... + +caps_directory = Path("caps_directory") # output of clinica pipelines +caps_reader = CapsReader(caps_directory, manager=manager) + +preprocessing_1 = caps_reader.get_preprocessing("t1-linear") +caps_reader.prepare_data( + preprocessing=preprocessing_1, data_tsv=Path(""), n_proc=2 +) # don't return anything -> just extract the image tensor and compute some information for each images + + +transforms_1 = TransformsConfig( + object_augmentation=[transforms.Crop, transforms.Transform], + image_augmentation=[transforms.Crop, transforms.Transform], + extraction=ExtractionPatchConfig(patch_size=3), + image_transforms=[transforms.Blur, transforms.Ghosting], + object_transforms=[transforms.BiasField, transforms.Motion], +) # not mandatory + +preprocessing_2 = caps_reader.get_preprocessing("pet-linear") +transforms_2 = TransformsConfig( + object_augmentation=[transforms.Crop, transforms.Transform], + image_augmentation=[transforms.Crop, transforms.Transform], + extraction=ExtractionSliceConfig(), + image_transforms=[transforms.Blur, transforms.Ghosting], + object_transforms=[transforms.BiasField, transforms.Motion], +) + +sub_ses_tsv = Path("") +split_dir = split_tsv(sub_ses_tsv) # -> creer un test.tsv et un train.tsv + +dataset_t1_roi = caps_reader.get_dataset( + preprocessing=preprocessing_1, + sub_ses_tsv=split_dir / "train.tsv", + transforms=transforms_1, +) # do we give config or object for transforms ? +dataset_pet_patch = caps_reader.get_dataset( + preprocessing=preprocessing_2, + sub_ses_tsv=split_dir / "train.tsv", + transforms=transforms_2, +) + +dataset_multi_modality_multi_extract = ConcatDataset( + [ + dataset_t1_roi, + dataset_pet_patch, + caps_reader.get_dataset_from_json(json_path=Path(""), sub_ses_tsv=sub_ses_tsv), + ] +) # 2 train.tsv en entrée qu'il faut concat et pareil pour les transforms à faire attention + +# TODO : think about adding transforms in extract_json + + +config_file = Path("config_file") +trainer = Trainer.from_json(config_file=config_file, manager=manager) + +# CAS CROSS-VALIDATION +splitter = KFolder( + n_splits=3, caps_dataset=dataset_multi_modality_multi_extract, manager=manager +) + +for split in splitter.split_iterator(split_list=[0, 1]): + # bien définir ce qu'il y a dans l'objet split + + network_config = create_network_config(ImplementedNetworks.CNN)( + in_shape=[2, 2, 2], + num_outputs=1, + conv_args=ConvEncoderOptions(channels=[3, 2, 2]), + ) + model = ClinicaDLModelClassif.from_config( + network_config=network_config, + loss_config=CrossEntropyLossConfig(), + optimizer_config=AdamConfig(), + ) + + trainer.train(model, split) + # le trainer va instancier un predictor/valdiator dans le train ou dans le init + +# TEST + +preprocessing_test = caps_reader.get_preprocessing("pet-linear") +transforms_test = Transforms( + object_augmentation=[transforms.Crop, transforms.Transform], + image_augmentation=[transforms.Crop, transforms.Transform], + extraction=ExtractioImageConfig(), + image_transforms=[transforms.Blur, transforms.Ghosting], + object_transforms=[transforms.BiasField, transforms.Motion], +) + +dataset_test = caps_reader.get_dataset( + preprocessing=preprocessing_test, + sub_ses_tsv=split_dir / "test.tsv", + transforms=transforms_test, +) + +predictor = Predictor(manager=manager) +predictor.predict(dataset_test=dataset_test, split_number=2) diff --git a/clinicadl/API/cross_val.py b/clinicadl/API/cross_val.py new file mode 100644 index 000000000..f2a6b90fe --- /dev/null +++ b/clinicadl/API/cross_val.py @@ -0,0 +1,64 @@ +from pathlib import Path + +import torchio.transforms as transforms + +from clinicadl.dataset.caps_reader import CapsReader +from clinicadl.dataset.concat import ConcatDataset +from clinicadl.dataset.config.extraction import ExtractionConfig +from clinicadl.dataset.config.preprocessing import ( + PreprocessingConfig, + T1PreprocessingConfig, +) +from clinicadl.dataset.old_caps_dataset import ( + CapsDatasetPatch, + CapsDatasetRoi, + CapsDatasetSlice, +) +from clinicadl.experiment_manager.experiment_manager import ExperimentManager +from clinicadl.losses.config import CrossEntropyLossConfig +from clinicadl.losses.factory import get_loss_function +from clinicadl.model.clinicadl_model import ClinicaDLModel +from clinicadl.networks.config import ImplementedNetworks +from clinicadl.networks.factory import ( + ConvEncoderOptions, + create_network_config, + get_network_from_config, +) +from clinicadl.optimization.optimizer.config import AdamConfig, OptimizerConfig +from clinicadl.optimization.optimizer.factory import get_optimizer +from clinicadl.predictor.predictor import Predictor +from clinicadl.splitter.kfold import KFolder +from clinicadl.splitter.split import get_single_split, split_tsv +from clinicadl.trainer.trainer import Trainer +from clinicadl.transforms.config import TransformsConfig + +# SIMPLE EXPERIMENT WITH A CAPS ALREADY EXISTING + +maps_path = Path("/") +manager = ExperimentManager(maps_path, overwrite=False) + +dataset_t1_image = CapsDatasetPatch.from_json( + extraction=Path("test.json"), + sub_ses_tsv=Path("split_dir") / "train.tsv", +) +config_file = Path("config_file") +trainer = Trainer.from_json(config_file=config_file, manager=manager) + +# CAS CROSS-VALIDATION +splitter = KFolder(n_splits=3, caps_dataset=dataset_t1_image, manager=manager) + +for split in splitter.split_iterator(split_list=[0, 1]): + # bien définir ce qu'il y a dans l'objet split + + loss, loss_config = get_loss_function(CrossEntropyLossConfig()) + network_config = create_network_config(ImplementedNetworks.CNN)( + in_shape=[2, 2, 2], + num_outputs=1, + conv_args=ConvEncoderOptions(channels=[3, 2, 2]), + ) + network, _ = get_network_from_config(network_config) + optimizer, _ = get_optimizer(network, AdamConfig()) + model = ClinicaDLModel(network=network, loss=loss, optimizer=optimizer) + + trainer.train(model, split) + # le trainer va instancier un predictor/valdiator dans le train ou dans le init diff --git a/clinicadl/API/single_split.py b/clinicadl/API/single_split.py new file mode 100644 index 000000000..4074a7c0b --- /dev/null +++ b/clinicadl/API/single_split.py @@ -0,0 +1,89 @@ +from pathlib import Path + +import torchio.transforms as transforms + +from clinicadl.dataset.caps_reader import CapsReader +from clinicadl.dataset.concat import ConcatDataset +from clinicadl.dataset.config.extraction import ExtractionConfig, ExtractionPatchConfig +from clinicadl.dataset.config.preprocessing import ( + PreprocessingConfig, + T1PreprocessingConfig, +) +from clinicadl.experiment_manager.experiment_manager import ExperimentManager +from clinicadl.losses.config import CrossEntropyLossConfig +from clinicadl.losses.factory import get_loss_function +from clinicadl.model.clinicadl_model import ClinicaDLModel +from clinicadl.networks.config import ImplementedNetworks +from clinicadl.networks.factory import ( + ConvEncoderOptions, + create_network_config, + get_network_from_config, +) +from clinicadl.optimization.optimizer.config import AdamConfig, OptimizerConfig +from clinicadl.optimization.optimizer.factory import get_optimizer +from clinicadl.predictor.predictor import Predictor +from clinicadl.splitter.kfold import KFolder +from clinicadl.splitter.split import get_single_split, split_tsv +from clinicadl.trainer.trainer import Trainer +from clinicadl.transforms.config import TransformsConfig +from clinicadl.transforms.transforms import Transforms +from clinicadl.utils.enum import ExtractionMethod + +# SIMPLE EXPERIMENT + + +maps_path = Path("/") +manager = ExperimentManager(maps_path, overwrite=False) + +caps_directory = Path("caps_directory") # output of clinica pipelines +caps_reader = CapsReader( + caps_directory, manager=manager +) # un peu bizarre de passer un maps_path a cet endroit via le manager pq on veut pas forcmeent faire un entrainement ?? + +preprocessing_t1 = caps_reader.get_preprocessing("t1-linear") +caps_reader.prepare_data( + preprocessing=preprocessing_t1, + data_tsv=Path(""), + n_proc=2, + use_uncropped_images=False, +) +transforms_1 = Transforms( + object_augmentation=[transforms.RandomMotion()], # default = no transforms + image_augmentation=[transforms.RandomMotion()], # default = no transforms + object_transforms=[transforms.Blur((0.4, 0.5, 0.6))], # default = none + image_transforms=[transforms.Noise(0.2, 0.5, 3)], # default = MiniMax + extraction=ExtractionPatchConfig(patch_size=30, stride_size=20), # default = Image +) # not mandatory + +sub_ses_tsv = Path("") +split_dir = split_tsv(sub_ses_tsv) # -> creer un test.tsv et un train.tsv + +# for the cli I think we should have a splitter for the single split case so we can have the same behaviour for single and kfold + +dataset_t1_image = caps_reader.get_dataset( + preprocessing=preprocessing_t1, + sub_ses_tsv=split_dir / "train.tsv", + transforms=transforms_1, +) # do we give config or ob + + +# CAS SINGLE SPLIT +split = get_single_split( + n_subject_validation=0, + caps_dataset=dataset_t1_image, + manager=manager, +) +config_file = Path("config_file") +trainer = Trainer.from_json(config_file=config_file, manager=manager) + + +loss, loss_config = get_loss_function(CrossEntropyLossConfig()) +network_config = create_network_config(ImplementedNetworks.CNN)( + in_shape=[2, 2, 2], num_outputs=1, conv_args=ConvEncoderOptions(channels=[3, 2, 2]) +) +network, _ = get_network_from_config(network_config) +optimizer, _ = get_optimizer(network, AdamConfig()) +model = ClinicaDLModelClassif(network=network, loss=loss, optimizer=optimizer) + +trainer.train(model, split) +# le trainer va instancier un predictor/valdiator dans le train ou dans le init From 418ccffb99281a989e36a0a1c1bcdba5f470b367 Mon Sep 17 00:00:00 2001 From: thibaultdvx Date: Wed, 16 Oct 2024 15:07:45 +0200 Subject: [PATCH 11/16] fix issue with enum --- clinicadl/dataset/caps_dataset.py | 4 ++-- clinicadl/trainer/tasks_utils.py | 22 ++++++---------------- 2 files changed, 8 insertions(+), 18 deletions(-) diff --git a/clinicadl/dataset/caps_dataset.py b/clinicadl/dataset/caps_dataset.py index d45dc5aa6..f03894a95 100644 --- a/clinicadl/dataset/caps_dataset.py +++ b/clinicadl/dataset/caps_dataset.py @@ -580,11 +580,11 @@ def _get_mask_paths_and_tensors( else: for template_ in Template: if preprocessing_.name == template_.name: - template_name = template_ + template_name = template_.value for pattern_ in Pattern: if preprocessing_.name == pattern_.name: - pattern = pattern_ + pattern = pattern_.value mask_location = caps_directory / "masks" / f"tpl-{template_name}" diff --git a/clinicadl/trainer/tasks_utils.py b/clinicadl/trainer/tasks_utils.py index a14bfa4a9..e17ab44c2 100644 --- a/clinicadl/trainer/tasks_utils.py +++ b/clinicadl/trainer/tasks_utils.py @@ -1,31 +1,21 @@ -from abc import abstractmethod from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union import numpy as np import pandas as pd import torch -import torch.distributed as dist -from pydantic import ( - BaseModel, - ConfigDict, - computed_field, - model_validator, -) from torch import Tensor, nn -from torch.amp import autocast from torch.nn.functional import softmax -from torch.nn.modules.loss import _Loss -from torch.utils.data import DataLoader, Sampler, sampler +from torch.utils.data import Sampler, sampler from torch.utils.data.distributed import DistributedSampler from clinicadl.dataset.caps_dataset import CapsDataset from clinicadl.metrics.old_metrics.metric_module import MetricModule from clinicadl.networks.old_network.network import Network from clinicadl.trainer.config.train import TrainConfig -from clinicadl.utils import cluster from clinicadl.utils.enum import ( ClassificationLoss, ClassificationMetric, + Mode, ReconstructionLoss, ReconstructionMetric, RegressionLoss, @@ -249,7 +239,7 @@ def save_outputs(network_task: Union[str, Task]): def generate_test_row( network_task: Union[str, Task], - mode: str, + mode: Mode, metrics_module, n_classes: int, idx: int, @@ -274,7 +264,7 @@ def generate_test_row( [ data["participant_id"][idx], data["session_id"][idx], - data[f"{mode}_id"][idx].item(), + data[f"{mode.value}_id"][idx].item(), data["label"][idx].item(), prediction, ] @@ -286,7 +276,7 @@ def generate_test_row( [ data["participant_id"][idx], data["session_id"][idx], - data[f"{mode}_id"][idx].item(), + data[f"{mode.value}_id"][idx].item(), data["label"][idx].item(), outputs[idx].item(), ] @@ -298,7 +288,7 @@ def generate_test_row( row = [ data["participant_id"][idx], data["session_id"][idx], - data[f"{mode}_id"][idx].item(), + data[f"{mode.value}_id"][idx].item(), ] for metric in evaluation_metrics(Task.RECONSTRUCTION): From 8e122ab39e173b5a39be42c160469f345c14c033 Mon Sep 17 00:00:00 2001 From: thibaultdvx Date: Wed, 16 Oct 2024 15:11:27 +0200 Subject: [PATCH 12/16] fix numpy issue --- clinicadl/networks/old_network/cnn/random.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clinicadl/networks/old_network/cnn/random.py b/clinicadl/networks/old_network/cnn/random.py index 221fee3f5..38f889b0d 100644 --- a/clinicadl/networks/old_network/cnn/random.py +++ b/clinicadl/networks/old_network/cnn/random.py @@ -208,7 +208,7 @@ def fc_dict_design(n_fcblocks, convolutions, initial_shape, n_classes=2): out_channels = last_conv["out_channels"] flattened_shape = np.ceil(np.array(initial_shape) / 2**n_conv) flattened_shape[0] = out_channels - in_features = np.product(flattened_shape) + in_features = np.prod(flattened_shape) # Sample number of FC layers ratio = (in_features / n_classes) ** (1 / n_fcblocks) From 255cb22265785d3201c353dbf9307ce44669516a Mon Sep 17 00:00:00 2001 From: Thibault de Varax <154365476+thibaultdvx@users.noreply.github.com> Date: Wed, 16 Oct 2024 18:09:00 +0200 Subject: [PATCH 13/16] Revert unwanted merge (#672) --- clinicadl/dataset/caps_dataset.py | 4 ++-- clinicadl/networks/old_network/cnn/random.py | 2 +- clinicadl/trainer/tasks_utils.py | 22 ++++++++++++++------ 3 files changed, 19 insertions(+), 9 deletions(-) diff --git a/clinicadl/dataset/caps_dataset.py b/clinicadl/dataset/caps_dataset.py index f03894a95..d45dc5aa6 100644 --- a/clinicadl/dataset/caps_dataset.py +++ b/clinicadl/dataset/caps_dataset.py @@ -580,11 +580,11 @@ def _get_mask_paths_and_tensors( else: for template_ in Template: if preprocessing_.name == template_.name: - template_name = template_.value + template_name = template_ for pattern_ in Pattern: if preprocessing_.name == pattern_.name: - pattern = pattern_.value + pattern = pattern_ mask_location = caps_directory / "masks" / f"tpl-{template_name}" diff --git a/clinicadl/networks/old_network/cnn/random.py b/clinicadl/networks/old_network/cnn/random.py index 38f889b0d..221fee3f5 100644 --- a/clinicadl/networks/old_network/cnn/random.py +++ b/clinicadl/networks/old_network/cnn/random.py @@ -208,7 +208,7 @@ def fc_dict_design(n_fcblocks, convolutions, initial_shape, n_classes=2): out_channels = last_conv["out_channels"] flattened_shape = np.ceil(np.array(initial_shape) / 2**n_conv) flattened_shape[0] = out_channels - in_features = np.prod(flattened_shape) + in_features = np.product(flattened_shape) # Sample number of FC layers ratio = (in_features / n_classes) ** (1 / n_fcblocks) diff --git a/clinicadl/trainer/tasks_utils.py b/clinicadl/trainer/tasks_utils.py index e17ab44c2..a14bfa4a9 100644 --- a/clinicadl/trainer/tasks_utils.py +++ b/clinicadl/trainer/tasks_utils.py @@ -1,21 +1,31 @@ +from abc import abstractmethod from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union import numpy as np import pandas as pd import torch +import torch.distributed as dist +from pydantic import ( + BaseModel, + ConfigDict, + computed_field, + model_validator, +) from torch import Tensor, nn +from torch.amp import autocast from torch.nn.functional import softmax -from torch.utils.data import Sampler, sampler +from torch.nn.modules.loss import _Loss +from torch.utils.data import DataLoader, Sampler, sampler from torch.utils.data.distributed import DistributedSampler from clinicadl.dataset.caps_dataset import CapsDataset from clinicadl.metrics.old_metrics.metric_module import MetricModule from clinicadl.networks.old_network.network import Network from clinicadl.trainer.config.train import TrainConfig +from clinicadl.utils import cluster from clinicadl.utils.enum import ( ClassificationLoss, ClassificationMetric, - Mode, ReconstructionLoss, ReconstructionMetric, RegressionLoss, @@ -239,7 +249,7 @@ def save_outputs(network_task: Union[str, Task]): def generate_test_row( network_task: Union[str, Task], - mode: Mode, + mode: str, metrics_module, n_classes: int, idx: int, @@ -264,7 +274,7 @@ def generate_test_row( [ data["participant_id"][idx], data["session_id"][idx], - data[f"{mode.value}_id"][idx].item(), + data[f"{mode}_id"][idx].item(), data["label"][idx].item(), prediction, ] @@ -276,7 +286,7 @@ def generate_test_row( [ data["participant_id"][idx], data["session_id"][idx], - data[f"{mode.value}_id"][idx].item(), + data[f"{mode}_id"][idx].item(), data["label"][idx].item(), outputs[idx].item(), ] @@ -288,7 +298,7 @@ def generate_test_row( row = [ data["participant_id"][idx], data["session_id"][idx], - data[f"{mode.value}_id"][idx].item(), + data[f"{mode}_id"][idx].item(), ] for metric in evaluation_metrics(Task.RECONSTRUCTION): From bd05e513586a7582993c051bf6165ff397a8761a Mon Sep 17 00:00:00 2001 From: Thibault de Varax <154365476+thibaultdvx@users.noreply.github.com> Date: Fri, 18 Oct 2024 12:54:58 +0200 Subject: [PATCH 14/16] Basic neural networks (#660) * add customizable networks (MLP, ConvEncoder, ConvDecoder, CNN, Generator, AutoEncoder, VAE) * add sota networks (ResNet, DenseNet, SE-ResNet, UNet, Attention-UNet, Vision Transformer) *update config classes *update factory function --- clinicadl/monai_networks/config/cnn.py | 24 + .../monai_networks/config/conv_decoder.py | 65 ++ .../monai_networks/config/conv_encoder.py | 64 ++ clinicadl/monai_networks/config/mlp.py | 52 ++ clinicadl/monai_networks/config/senet.py | 60 ++ clinicadl/monai_networks/nn/__init__.py | 13 + clinicadl/monai_networks/nn/att_unet.py | 207 +++++++ clinicadl/monai_networks/nn/autoencoder.py | 416 +++++++++++++ clinicadl/monai_networks/nn/cnn.py | 124 ++++ clinicadl/monai_networks/nn/conv_decoder.py | 388 ++++++++++++ clinicadl/monai_networks/nn/conv_encoder.py | 392 ++++++++++++ clinicadl/monai_networks/nn/densenet.py | 312 ++++++++++ clinicadl/monai_networks/nn/generator.py | 131 ++++ .../monai_networks/nn/layers/__init__.py | 0 clinicadl/monai_networks/nn/layers/resnet.py | 124 ++++ clinicadl/monai_networks/nn/layers/senet.py | 142 +++++ clinicadl/monai_networks/nn/layers/unet.py | 102 ++++ clinicadl/monai_networks/nn/layers/unpool.py | 87 +++ .../nn/layers/utils/__init__.py | 19 + .../monai_networks/nn/layers/utils/enum.py | 65 ++ .../monai_networks/nn/layers/utils/types.py | 37 ++ clinicadl/monai_networks/nn/layers/vit.py | 94 +++ clinicadl/monai_networks/nn/mlp.py | 146 +++++ clinicadl/monai_networks/nn/resnet.py | 566 ++++++++++++++++++ clinicadl/monai_networks/nn/senet.py | 214 +++++++ clinicadl/monai_networks/nn/unet.py | 250 ++++++++ clinicadl/monai_networks/nn/utils/__init__.py | 14 + clinicadl/monai_networks/nn/utils/checks.py | 167 ++++++ clinicadl/monai_networks/nn/utils/shapes.py | 203 +++++++ clinicadl/monai_networks/nn/vae.py | 200 +++++++ clinicadl/monai_networks/nn/vit.py | 420 +++++++++++++ 31 files changed, 5098 insertions(+) create mode 100644 clinicadl/monai_networks/config/cnn.py create mode 100644 clinicadl/monai_networks/config/conv_decoder.py create mode 100644 clinicadl/monai_networks/config/conv_encoder.py create mode 100644 clinicadl/monai_networks/config/mlp.py create mode 100644 clinicadl/monai_networks/config/senet.py create mode 100644 clinicadl/monai_networks/nn/__init__.py create mode 100644 clinicadl/monai_networks/nn/att_unet.py create mode 100644 clinicadl/monai_networks/nn/autoencoder.py create mode 100644 clinicadl/monai_networks/nn/cnn.py create mode 100644 clinicadl/monai_networks/nn/conv_decoder.py create mode 100644 clinicadl/monai_networks/nn/conv_encoder.py create mode 100644 clinicadl/monai_networks/nn/densenet.py create mode 100644 clinicadl/monai_networks/nn/generator.py create mode 100644 clinicadl/monai_networks/nn/layers/__init__.py create mode 100644 clinicadl/monai_networks/nn/layers/resnet.py create mode 100644 clinicadl/monai_networks/nn/layers/senet.py create mode 100644 clinicadl/monai_networks/nn/layers/unet.py create mode 100644 clinicadl/monai_networks/nn/layers/unpool.py create mode 100644 clinicadl/monai_networks/nn/layers/utils/__init__.py create mode 100644 clinicadl/monai_networks/nn/layers/utils/enum.py create mode 100644 clinicadl/monai_networks/nn/layers/utils/types.py create mode 100644 clinicadl/monai_networks/nn/layers/vit.py create mode 100644 clinicadl/monai_networks/nn/mlp.py create mode 100644 clinicadl/monai_networks/nn/resnet.py create mode 100644 clinicadl/monai_networks/nn/senet.py create mode 100644 clinicadl/monai_networks/nn/unet.py create mode 100644 clinicadl/monai_networks/nn/utils/__init__.py create mode 100644 clinicadl/monai_networks/nn/utils/checks.py create mode 100644 clinicadl/monai_networks/nn/utils/shapes.py create mode 100644 clinicadl/monai_networks/nn/vae.py create mode 100644 clinicadl/monai_networks/nn/vit.py diff --git a/clinicadl/monai_networks/config/cnn.py b/clinicadl/monai_networks/config/cnn.py new file mode 100644 index 000000000..a7d2043db --- /dev/null +++ b/clinicadl/monai_networks/config/cnn.py @@ -0,0 +1,24 @@ +from typing import Optional, Sequence, Union + +from pydantic import PositiveInt, computed_field + +from clinicadl.utils.factories import DefaultFromLibrary + +from .base import ImplementedNetworks, NetworkConfig +from .conv_encoder import ConvEncoderOptions +from .mlp import MLPOptions + + +class CNNConfig(NetworkConfig): + """Config class for CNN.""" + + in_shape: Sequence[PositiveInt] + num_outputs: PositiveInt + conv_args: ConvEncoderOptions + mlp_args: Union[Optional[MLPOptions], DefaultFromLibrary] = DefaultFromLibrary.YES + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.CNN diff --git a/clinicadl/monai_networks/config/conv_decoder.py b/clinicadl/monai_networks/config/conv_decoder.py new file mode 100644 index 000000000..5dc78dfec --- /dev/null +++ b/clinicadl/monai_networks/config/conv_decoder.py @@ -0,0 +1,65 @@ +from typing import Optional, Sequence, Union + +from pydantic import BaseModel, ConfigDict, PositiveFloat, PositiveInt, computed_field + +from clinicadl.monai_networks.nn.layers.utils import ( + ActivationParameters, + ConvNormalizationParameters, + ConvParameters, + UnpoolingParameters, +) +from clinicadl.utils.factories import DefaultFromLibrary + +from .base import ImplementedNetworks, NetworkConfig + + +class ConvDecoderOptions(BaseModel): + """ + Config class for ConvDecoder when it is a submodule. + See for example: :py:class:`clinicadl.monai_networks.nn.generator.Generator` + """ + + channels: Sequence[PositiveInt] + kernel_size: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + stride: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + padding: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + output_padding: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + dilation: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + unpooling: Union[ + Optional[UnpoolingParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + unpooling_indices: Union[ + Optional[Sequence[int]], DefaultFromLibrary + ] = DefaultFromLibrary.YES + act: Union[ + Optional[ActivationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + output_act: Union[ + Optional[ActivationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + norm: Union[ + Optional[ConvNormalizationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + dropout: Union[Optional[PositiveFloat], DefaultFromLibrary] = DefaultFromLibrary.YES + bias: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES + adn_ordering: Union[str, DefaultFromLibrary] = DefaultFromLibrary.YES + + # pydantic config + model_config = ConfigDict( + validate_assignment=True, + use_enum_values=True, + validate_default=True, + ) + + +class ConvDecoderConfig(NetworkConfig, ConvDecoderOptions): + """Config class for ConvDecoder.""" + + spatial_dims: PositiveInt + in_channels: PositiveInt + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.CONV_DECODER diff --git a/clinicadl/monai_networks/config/conv_encoder.py b/clinicadl/monai_networks/config/conv_encoder.py new file mode 100644 index 000000000..499f69b19 --- /dev/null +++ b/clinicadl/monai_networks/config/conv_encoder.py @@ -0,0 +1,64 @@ +from typing import Optional, Sequence, Union + +from pydantic import BaseModel, ConfigDict, PositiveFloat, PositiveInt, computed_field + +from clinicadl.monai_networks.nn.layers.utils import ( + ActivationParameters, + ConvNormalizationParameters, + ConvParameters, + PoolingParameters, +) +from clinicadl.utils.factories import DefaultFromLibrary + +from .base import ImplementedNetworks, NetworkConfig + + +class ConvEncoderOptions(BaseModel): + """ + Config class for ConvEncoder when it is a submodule. + See for example: :py:class:`clinicadl.monai_networks.nn.cnn.CNN` + """ + + channels: Sequence[PositiveInt] + kernel_size: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + stride: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + padding: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + dilation: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES + pooling: Union[ + Optional[PoolingParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + pooling_indices: Union[ + Optional[Sequence[int]], DefaultFromLibrary + ] = DefaultFromLibrary.YES + act: Union[ + Optional[ActivationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + output_act: Union[ + Optional[ActivationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + norm: Union[ + Optional[ConvNormalizationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + dropout: Union[Optional[PositiveFloat], DefaultFromLibrary] = DefaultFromLibrary.YES + bias: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES + adn_ordering: Union[str, DefaultFromLibrary] = DefaultFromLibrary.YES + + # pydantic config + model_config = ConfigDict( + validate_assignment=True, + use_enum_values=True, + validate_default=True, + ) + + +class ConvEncoderConfig(NetworkConfig, ConvEncoderOptions): + """Config class for ConvEncoder.""" + + spatial_dims: PositiveInt + in_channels: PositiveInt + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.CONV_ENCODER diff --git a/clinicadl/monai_networks/config/mlp.py b/clinicadl/monai_networks/config/mlp.py new file mode 100644 index 000000000..5d12f303f --- /dev/null +++ b/clinicadl/monai_networks/config/mlp.py @@ -0,0 +1,52 @@ +from typing import Optional, Sequence, Union + +from pydantic import BaseModel, ConfigDict, PositiveFloat, PositiveInt, computed_field + +from clinicadl.monai_networks.nn.layers.utils import ( + ActivationParameters, + NormalizationParameters, +) +from clinicadl.utils.factories import DefaultFromLibrary + +from .base import ImplementedNetworks, NetworkConfig + + +class MLPOptions(BaseModel): + """ + Config class for MLP when it is a submodule. + See for example: :py:class:`clinicadl.monai_networks.nn.cnn.CNN` + """ + + hidden_channels: Sequence[PositiveInt] + act: Union[ + Optional[ActivationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + output_act: Union[ + Optional[ActivationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + norm: Union[ + Optional[NormalizationParameters], DefaultFromLibrary + ] = DefaultFromLibrary.YES + dropout: Union[Optional[PositiveFloat], DefaultFromLibrary] = DefaultFromLibrary.YES + bias: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES + adn_ordering: Union[str, DefaultFromLibrary] = DefaultFromLibrary.YES + + # pydantic config + model_config = ConfigDict( + validate_assignment=True, + use_enum_values=True, + validate_default=True, + ) + + +class MLPConfig(NetworkConfig, MLPOptions): + """Config class for Multi Layer Perceptron.""" + + in_channels: PositiveInt + out_channels: PositiveInt + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.MLP diff --git a/clinicadl/monai_networks/config/senet.py b/clinicadl/monai_networks/config/senet.py new file mode 100644 index 000000000..79a356726 --- /dev/null +++ b/clinicadl/monai_networks/config/senet.py @@ -0,0 +1,60 @@ +from typing import Union + +from pydantic import PositiveInt, computed_field + +from clinicadl.utils.factories import DefaultFromLibrary + +from .base import ImplementedNetworks, NetworkType, PreTrainedConfig +from .resnet import ResNetConfig + + +class SEResNetConfig(ResNetConfig): + """Config class for Squeeze-and-Excitation ResNet.""" + + se_reduction: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.SE_RESNET + + +class PreTrainedSEResNetConfig(PreTrainedConfig): + """Base config class for SOTA SE-ResNets.""" + + @computed_field + @property + def _type(self) -> NetworkType: + """To know where to look for the network.""" + return NetworkType.SE_RESNET + + +class SEResNet50Config(PreTrainedSEResNetConfig): + """Config class for SE-ResNet-50.""" + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.SE_RESNET_50 + + +class SEResNet101Config(PreTrainedSEResNetConfig): + """Config class for SE-ResNet-101.""" + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.SE_RESNET_101 + + +class SEResNet152Config(PreTrainedSEResNetConfig): + """Config class for SE-ResNet-152.""" + + @computed_field + @property + def name(self) -> ImplementedNetworks: + """The name of the network.""" + return ImplementedNetworks.SE_RESNET_152 diff --git a/clinicadl/monai_networks/nn/__init__.py b/clinicadl/monai_networks/nn/__init__.py new file mode 100644 index 000000000..0e1c7054a --- /dev/null +++ b/clinicadl/monai_networks/nn/__init__.py @@ -0,0 +1,13 @@ +from .att_unet import AttentionUNet +from .autoencoder import AutoEncoder +from .cnn import CNN +from .conv_decoder import ConvDecoder +from .conv_encoder import ConvEncoder +from .densenet import DenseNet, get_densenet +from .generator import Generator +from .mlp import MLP +from .resnet import ResNet, get_resnet +from .senet import SEResNet, get_seresnet +from .unet import UNet +from .vae import VAE +from .vit import ViT, get_vit diff --git a/clinicadl/monai_networks/nn/att_unet.py b/clinicadl/monai_networks/nn/att_unet.py new file mode 100644 index 000000000..77ef02081 --- /dev/null +++ b/clinicadl/monai_networks/nn/att_unet.py @@ -0,0 +1,207 @@ +from typing import Any + +import torch +from monai.networks.nets.attentionunet import AttentionBlock + +from .layers.unet import ConvBlock, UpSample +from .unet import BaseUNet + + +class AttentionUNet(BaseUNet): + """ + Attention-UNet based on [Attention U-Net: Learning Where to Look for the Pancreas](https://arxiv.org/pdf/1804.03999). + + The user can customize the number of encoding blocks, the number of channels in each block, as well as other parameters + like the activation function. + + .. warning:: AttentionUNet works only with images whose dimensions are high enough powers of 2. More precisely, if n is the + number of max pooling operation in your AttentionUNet (which is equal to `len(channels)-1`), the image must have :math:`2^{k}` + pixels in each dimension, with :math:`k \\geq n` (e.g. shape (:math:`2^{n}`, :math:`2^{n+3}`) for a 2D image). + + Parameters + ---------- + spatial_dims : int + number of spatial dimensions of the input image. + in_channels : int + number of channels in the input image. + out_channels : int + number of output channels. + kwargs : Any + any optional argument accepted by (:py:class:`clinicadl.monai_networks.nn.unet.UNet`). + + Examples + -------- + >>> AttentionUNet( + spatial_dims=2, + in_channels=1, + out_channels=2, + channels=(4, 8), + act="elu", + output_act=("softmax", {"dim": 1}), + dropout=0.1, + ) + AttentionUNet( + (doubleconv): ConvBlock( + (0): Convolution( + (conv): Conv2d(1, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (1): Convolution( + (conv): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + ) + (down1): DownBlock( + (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) + (doubleconv): ConvBlock( + (0): Convolution( + (conv): Conv2d(4, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (1): Convolution( + (conv): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + ) + ) + (upsample1): UpSample( + (0): Upsample(scale_factor=2.0, mode='nearest') + (1): Convolution( + (conv): Conv2d(8, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + ) + (attention1): AttentionBlock( + (W_g): Sequential( + (0): Convolution( + (conv): Conv2d(4, 2, kernel_size=(1, 1), stride=(1, 1)) + ) + (1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + (W_x): Sequential( + (0): Convolution( + (conv): Conv2d(4, 2, kernel_size=(1, 1), stride=(1, 1)) + ) + (1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + (psi): Sequential( + (0): Convolution( + (conv): Conv2d(2, 1, kernel_size=(1, 1), stride=(1, 1)) + ) + (1): BatchNorm2d(1, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (2): Sigmoid() + ) + (relu): ReLU() + ) + (doubleconv1): ConvBlock( + (0): Convolution( + (conv): Conv2d(8, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (1): Convolution( + (conv): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + ) + (reduce_channels): Convolution( + (conv): Conv2d(4, 2, kernel_size=(1, 1), stride=(1, 1)) + ) + (output_act): Softmax(dim=1) + ) + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + **kwargs: Any, + ): + super().__init__( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + **kwargs, + ) + + def _build_decoder(self): + for i in range(len(self.channels) - 1, 0, -1): + self.add_module( + f"upsample{i}", + UpSample( + spatial_dims=self.spatial_dims, + in_channels=self.channels[i], + out_channels=self.channels[i - 1], + act=self.act, + dropout=self.dropout, + ), + ) + self.add_module( + f"attention{i}", + AttentionBlock( + spatial_dims=self.spatial_dims, + f_l=self.channels[i - 1], + f_g=self.channels[i - 1], + f_int=self.channels[i - 1] // 2, + dropout=self.dropout, + ), + ) + self.add_module( + f"doubleconv{i}", + ConvBlock( + spatial_dims=self.spatial_dims, + in_channels=self.channels[i - 1] * 2, + out_channels=self.channels[i - 1], + act=self.act, + dropout=self.dropout, + ), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x_history = [self.doubleconv(x)] + + for i in range(1, len(self.channels)): + x = self.get_submodule(f"down{i}")(x_history[-1]) + x_history.append(x) + + x_history.pop() # the output of bottelneck is not used as a gating signal + for i in range(len(self.channels) - 1, 0, -1): + up = self.get_submodule(f"upsample{i}")(x) + att_res = self.get_submodule(f"attention{i}")(g=x_history.pop(), x=up) + merged = torch.cat((att_res, up), dim=1) + x = self.get_submodule(f"doubleconv{i}")(merged) + + out = self.reduce_channels(x) + + if self.output_act is not None: + out = self.output_act(out) + + return out diff --git a/clinicadl/monai_networks/nn/autoencoder.py b/clinicadl/monai_networks/nn/autoencoder.py new file mode 100644 index 000000000..5cf823eeb --- /dev/null +++ b/clinicadl/monai_networks/nn/autoencoder.py @@ -0,0 +1,416 @@ +from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Union + +import numpy as np +import torch.nn as nn + +from .cnn import CNN +from .conv_encoder import ConvEncoder +from .generator import Generator +from .layers.utils import ( + ActivationParameters, + PoolingLayer, + SingleLayerPoolingParameters, + SingleLayerUnpoolingParameters, + UnpoolingLayer, + UnpoolingMode, +) +from .mlp import MLP +from .utils import ( + calculate_conv_out_shape, + calculate_convtranspose_out_shape, + calculate_pool_out_shape, +) + + +class AutoEncoder(nn.Sequential): + """ + An autoencoder with convolutional and fully connected layers. + + The user must pass the arguments to build an encoder, from its convolutional and + fully connected parts, and the decoder will be automatically built by taking the + symmetrical network. + + More precisely, to build the decoder, the order of the encoding layers is reverted, convolutions are + replaced by transposed convolutions and pooling layers are replaced by either upsampling or transposed + convolution layers. + Please note that the order of `Activation`, `Dropout` and `Normalization`, defined with the + argument `adn_ordering` in `conv_args`, is the same for the encoder and the decoder. + + Note that an `AutoEncoder` is an aggregation of a `CNN` (:py:class:`clinicadl.monai_networks.nn. + cnn.CNN`) and a `Generator` (:py:class:`clinicadl.monai_networks.nn.generator.Generator`). + + Parameters + ---------- + in_shape : Sequence[int] + sequence of integers stating the dimension of the input tensor (minus batch dimension). + latent_size : int + size of the latent vector. + conv_args : Dict[str, Any] + the arguments for the convolutional part of the encoder. The arguments are those accepted + by :py:class:`clinicadl.monai_networks.nn.conv_encoder.ConvEncoder`, except `in_shape` that + is specified here. So, the only mandatory argument is `channels`. + mlp_args : Optional[Dict[str, Any]] (optional, default=None) + the arguments for the MLP part of the encoder . The arguments are those accepted by + :py:class:`clinicadl.monai_networks.nn.mlp.MLP`, except `in_channels` that is inferred + from the output of the convolutional part, and `out_channels` that is set to `latent_size`. + So, the only mandatory argument is `hidden_channels`.\n + If None, the MLP part will be reduced to a single linear layer. + out_channels : Optional[int] (optional, default=None) + number of output channels. If None, the output will have the same number of channels as the + input. + output_act : Optional[ActivationParameters] (optional, default=None) + a potential activation layer applied to the output of the network, and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + unpooling_mode : Union[str, UnpoolingMode] (optional, default=UnpoolingMode.NEAREST) + type of unpooling. Can be either `"nearest"`, `"linear"`, `"bilinear"`, `"bicubic"`, `"trilinear"` or + `"convtranspose"`.\n + - `nearest`: unpooling is performed by upsampling with the :italic:`nearest` algorithm (see [PyTorch's Upsample layer] + (https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html)). + - `linear`: unpooling is performed by upsampling with the :italic:`linear` algorithm. Only works with 1D images (excluding the + channel dimension). + - `bilinear`: unpooling is performed by upsampling with the :italic:`bilinear` algorithm. Only works with 2D images. + - `bicubic`: unpooling is performed by upsampling with the :italic:`bicubic` algorithm. Only works with 2D images. + - `trilinear`: unpooling is performed by upsampling with the :italic:`trilinear` algorithm. Only works with 3D images. + - `convtranspose`: unpooling is performed with a transposed convolution, whose parameters (kernel size, stride, etc.) are + computed to reverse the pooling operation. + + Examples + -------- + >>> AutoEncoder( + in_shape=(1, 16, 16), + latent_size=8, + conv_args={ + "channels": [2, 4], + "pooling_indices": [0], + "pooling": ("avg", {"kernel_size": 2}), + }, + mlp_args={"hidden_channels": [32], "output_act": "relu"}, + out_channels=2, + output_act="sigmoid", + unpooling_mode="bilinear", + ) + AutoEncoder( + (encoder): CNN( + (convolutions): ConvEncoder( + (layer0): Convolution( + (conv): Conv2d(1, 2, kernel_size=(3, 3), stride=(1, 1)) + (adn): ADN( + (N): InstanceNorm2d(2, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False) + (A): PReLU(num_parameters=1) + ) + ) + (pool0): AvgPool2d(kernel_size=2, stride=2, padding=0) + (layer1): Convolution( + (conv): Conv2d(2, 4, kernel_size=(3, 3), stride=(1, 1)) + ) + ) + (mlp): MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (hidden0): Sequential( + (linear): Linear(in_features=100, out_features=32, bias=True) + (adn): ADN( + (N): BatchNorm1d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (A): PReLU(num_parameters=1) + ) + ) + (output): Sequential( + (linear): Linear(in_features=32, out_features=8, bias=True) + (output_act): ReLU() + ) + ) + ) + (decoder): Generator( + (mlp): MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (hidden0): Sequential( + (linear): Linear(in_features=8, out_features=32, bias=True) + (adn): ADN( + (N): BatchNorm1d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (A): PReLU(num_parameters=1) + ) + ) + (output): Sequential( + (linear): Linear(in_features=32, out_features=100, bias=True) + (output_act): ReLU() + ) + ) + (reshape): Reshape() + (convolutions): ConvDecoder( + (layer0): Convolution( + (conv): ConvTranspose2d(4, 4, kernel_size=(3, 3), stride=(1, 1)) + (adn): ADN( + (N): InstanceNorm2d(4, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False) + (A): PReLU(num_parameters=1) + ) + ) + (unpool0): Upsample(size=(14, 14), mode=) + (layer1): Convolution( + (conv): ConvTranspose2d(4, 2, kernel_size=(3, 3), stride=(1, 1)) + ) + (output_act): Sigmoid() + ) + ) + ) + + """ + + def __init__( + self, + in_shape: Sequence[int], + latent_size: int, + conv_args: Dict[str, Any], + mlp_args: Optional[Dict[str, Any]] = None, + out_channels: Optional[int] = None, + output_act: Optional[ActivationParameters] = None, + unpooling_mode: Union[str, UnpoolingMode] = UnpoolingMode.NEAREST, + ) -> None: + super().__init__() + self.in_shape = in_shape + self.latent_size = latent_size + self.out_channels = out_channels if out_channels else self.in_shape[0] + self._output_act = output_act + self.unpooling_mode = self._check_unpooling_mode(unpooling_mode) + self.spatial_dims = len(in_shape[1:]) + + self.encoder = CNN( + in_shape=self.in_shape, + num_outputs=latent_size, + conv_args=conv_args, + mlp_args=mlp_args, + ) + inter_channels = ( + conv_args["channels"][-1] if len(conv_args["channels"]) > 0 else in_shape[0] + ) + inter_shape = (inter_channels, *self.encoder.convolutions.final_size) + self.decoder = Generator( + latent_size=latent_size, + start_shape=inter_shape, + conv_args=self._invert_conv_args(conv_args, self.encoder.convolutions), + mlp_args=self._invert_mlp_args(mlp_args, self.encoder.mlp), + ) + + @classmethod + def _invert_mlp_args(cls, args: Dict[str, Any], mlp: MLP) -> Dict[str, Any]: + """ + Inverts arguments passed for the MLP part of the encoder, to get the MLP part of + the decoder. + """ + if args is None: + args = {} + args["hidden_channels"] = cls._invert_list_arg(mlp.hidden_channels) + + return args + + def _invert_conv_args( + self, args: Dict[str, Any], conv: ConvEncoder + ) -> Dict[str, Any]: + """ + Inverts arguments passed for the convolutional part of the encoder, to get the convolutional + part of the decoder. + """ + if len(args["channels"]) == 0: + args["channels"] = [] + else: + args["channels"] = self._invert_list_arg(conv.channels[:-1]) + [ + self.out_channels + ] + args["kernel_size"] = self._invert_list_arg(conv.kernel_size) + args["stride"] = self._invert_list_arg(conv.stride) + args["dilation"] = self._invert_list_arg(conv.dilation) + args["padding"], args["output_padding"] = self._get_paddings_list(conv) + + args["unpooling_indices"] = ( + conv.n_layers - np.array(conv.pooling_indices) - 2 + ).astype(int) + args["unpooling"] = [] + sizes_before_pooling = [ + size + for size, (layer_name, _) in zip(conv.size_details, conv.named_children()) + if "pool" in layer_name + ] + for size, pooling in zip(sizes_before_pooling[::-1], conv.pooling[::-1]): + args["unpooling"].append(self._invert_pooling_layer(size, pooling)) + + if "pooling" in args: + del args["pooling"] + if "pooling_indices" in args: + del args["pooling_indices"] + + args["output_act"] = self._output_act if self._output_act else None + + return args + + @classmethod + def _invert_list_arg(cls, arg: Union[Any, List[Any]]) -> Union[Any, List[Any]]: + """ + Reverses lists. + """ + return list(arg[::-1]) if isinstance(arg, Sequence) else arg + + def _invert_pooling_layer( + self, + size_before_pool: Sequence[int], + pooling: SingleLayerPoolingParameters, + ) -> SingleLayerUnpoolingParameters: + """ + Gets the unpooling layer. + """ + if self.unpooling_mode == UnpoolingMode.CONV_TRANS: + return ( + UnpoolingLayer.CONV_TRANS, + self._invert_pooling_with_convtranspose(size_before_pool, pooling), + ) + else: + return ( + UnpoolingLayer.UPSAMPLE, + {"size": size_before_pool, "mode": self.unpooling_mode}, + ) + + @classmethod + def _invert_pooling_with_convtranspose( + cls, + size_before_pool: Sequence[int], + pooling: SingleLayerPoolingParameters, + ) -> Dict[str, Any]: + """ + Computes the arguments of the transposed convolution, based on the pooling layer. + """ + pooling_mode, pooling_args = pooling + if ( + pooling_mode == PoolingLayer.ADAPT_AVG + or pooling_mode == PoolingLayer.ADAPT_MAX + ): + input_size_np = np.array(size_before_pool) + output_size_np = np.array(pooling_args["output_size"]) + stride_np = input_size_np // output_size_np # adaptive pooling formulas + kernel_size_np = ( + input_size_np - (output_size_np - 1) * stride_np + ) # adaptive pooling formulas + args = { + "kernel_size": tuple(int(k) for k in kernel_size_np), + "stride": tuple(int(s) for s in stride_np), + } + padding, output_padding = cls._find_convtranspose_paddings( + pooling_mode, + size_before_pool, + output_size=pooling_args["output_size"], + **args, + ) + + elif pooling_mode == PoolingLayer.MAX or pooling_mode == PoolingLayer.AVG: + if "stride" not in pooling_args: + pooling_args["stride"] = pooling_args["kernel_size"] + args = { + arg: value + for arg, value in pooling_args.items() + if arg in ["kernel_size", "stride", "padding", "dilation"] + } + padding, output_padding = cls._find_convtranspose_paddings( + pooling_mode, + size_before_pool, + **pooling_args, + ) + + args["padding"] = padding # pylint: disable=possibly-used-before-assignment + args["output_padding"] = output_padding # pylint: disable=possibly-used-before-assignment + + return args + + @classmethod + def _get_paddings_list(cls, conv: ConvEncoder) -> List[Tuple[int, ...]]: + """ + Finds output padding list. + """ + padding = [] + output_padding = [] + size_before_convs = [ + size + for size, (layer_name, _) in zip(conv.size_details, conv.named_children()) + if "layer" in layer_name + ] + for size, k, s, p, d in zip( + size_before_convs, + conv.kernel_size, + conv.stride, + conv.padding, + conv.dilation, + ): + p, out_p = cls._find_convtranspose_paddings( + "conv", size, kernel_size=k, stride=s, padding=p, dilation=d + ) + padding.append(p) + output_padding.append(out_p) + + return cls._invert_list_arg(padding), cls._invert_list_arg(output_padding) + + @classmethod + def _find_convtranspose_paddings( + cls, + layer_type: Union[Literal["conv"], PoolingLayer], + in_shape: Union[Sequence[int], int], + padding: Union[Sequence[int], int] = 0, + **kwargs, + ) -> Tuple[Tuple[int, ...], Tuple[int, ...]]: + """ + Finds padding and output padding necessary to recover the right image size after + a transposed convolution. + """ + if layer_type == "conv": + layer_out_shape = calculate_conv_out_shape(in_shape, **kwargs) + elif layer_type in list(PoolingLayer): + layer_out_shape = calculate_pool_out_shape(layer_type, in_shape, **kwargs) + + convt_out_shape = calculate_convtranspose_out_shape(layer_out_shape, **kwargs) # pylint: disable=possibly-used-before-assignment + output_padding = np.atleast_1d(in_shape) - np.atleast_1d(convt_out_shape) + + if ( + output_padding < 0 + ).any(): # can happen with ceil_mode=True for maxpool. Then, add some padding + padding = np.atleast_1d(padding) * np.ones_like( + output_padding + ) # to have the same shape as output_padding + padding[output_padding < 0] += np.maximum(np.abs(output_padding) // 2, 1)[ + output_padding < 0 + ] # //2 because 2*padding pixels are removed + + convt_out_shape = calculate_convtranspose_out_shape( + layer_out_shape, padding=padding, **kwargs + ) + output_padding = np.atleast_1d(in_shape) - np.atleast_1d(convt_out_shape) + padding = tuple(int(s) for s in padding) + + return padding, tuple(int(s) for s in output_padding) + + def _check_unpooling_mode( + self, unpooling_mode: Union[str, UnpoolingMode] + ) -> UnpoolingMode: + """ + Checks consistency between data shape and unpooling mode. + """ + unpooling_mode = UnpoolingMode(unpooling_mode) + if unpooling_mode == UnpoolingMode.LINEAR and len(self.in_shape) != 2: + raise ValueError( + f"unpooling mode `linear` only works with 2D data (counting the channel dimension). " + f"Got in_shape={self.in_shape}, which is understood as {len(self.in_shape)}D data." + ) + elif unpooling_mode == UnpoolingMode.BILINEAR and len(self.in_shape) != 3: + raise ValueError( + f"unpooling mode `bilinear` only works with 3D data (counting the channel dimension). " + f"Got in_shape={self.in_shape}, which is understood as {len(self.in_shape)}D data." + ) + elif unpooling_mode == UnpoolingMode.BICUBIC and len(self.in_shape) != 3: + raise ValueError( + f"unpooling mode `bicubic` only works with 3D data (counting the channel dimension). " + f"Got in_shape={self.in_shape}, which is understood as {len(self.in_shape)}D data." + ) + elif unpooling_mode == UnpoolingMode.TRILINEAR and len(self.in_shape) != 4: + raise ValueError( + f"unpooling mode `trilinear` only works with 4D data (counting the channel dimension). " + f"Got in_shape={self.in_shape}, which is understood as {len(self.in_shape)}D data." + ) + + return unpooling_mode diff --git a/clinicadl/monai_networks/nn/cnn.py b/clinicadl/monai_networks/nn/cnn.py new file mode 100644 index 000000000..1479ecaea --- /dev/null +++ b/clinicadl/monai_networks/nn/cnn.py @@ -0,0 +1,124 @@ +from typing import Any, Dict, Optional, Sequence + +import numpy as np +import torch.nn as nn + +from .conv_encoder import ConvEncoder +from .mlp import MLP +from .utils import check_conv_args, check_mlp_args + + +class CNN(nn.Sequential): + """ + A regressor/classifier with first convolutional layers and then fully connected layers. + + This network is a simple aggregation of a Fully Convolutional Network (:py:class:`clinicadl. + monai_networks.nn.conv_encoder.ConvEncoder`) and a Multi Layer Perceptron (:py:class:`clinicadl. + monai_networks.nn.mlp.MLP`). + + Parameters + ---------- + in_shape : Sequence[int] + sequence of integers stating the dimension of the input tensor (minus batch dimension). + num_outputs : int + number of variables to predict. + conv_args : Dict[str, Any] + the arguments for the convolutional part. The arguments are those accepted by + :py:class:`clinicadl.monai_networks.nn.conv_encoder.ConvEncoder`, except `in_shape` + that is specified here. So, the only mandatory argument is `channels`. + mlp_args : Optional[Dict[str, Any]] (optional, default=None) + the arguments for the MLP part. The arguments are those accepted by + :py:class:`clinicadl.monai_networks.nn.mlp.MLP`, except `in_channels` that is inferred + from the output of the convolutional part, and `out_channels` that is set to `num_outputs`. + So, the only mandatory argument is `hidden_channels`.\n + If None, the MLP part will be reduced to a single linear layer. + + Examples + -------- + # a classifier + >>> CNN( + in_shape=(1, 10, 10), + num_outputs=2, + conv_args={"channels": [2, 4], "norm": None, "act": None}, + mlp_args={"hidden_channels": [5], "act": "elu", "norm": None, "output_act": "softmax"}, + ) + CNN( + (convolutions): ConvEncoder( + (layer0): Convolution( + (conv): Conv2d(1, 2, kernel_size=(3, 3), stride=(1, 1)) + ) + (layer1): Convolution( + (conv): Conv2d(2, 4, kernel_size=(3, 3), stride=(1, 1)) + ) + ) + (mlp): MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (hidden0): Sequential( + (linear): Linear(in_features=144, out_features=5, bias=True) + (adn): ADN( + (A): ELU(alpha=1.0) + ) + ) + (output): Sequential( + (linear): Linear(in_features=5, out_features=2, bias=True) + (output_act): Softmax(dim=None) + ) + ) + ) + + # a regressor + >>> CNN( + in_shape=(1, 10, 10), + num_outputs=2, + conv_args={"channels": [2, 4], "norm": None, "act": None}, + ) + CNN( + (convolutions): ConvEncoder( + (layer0): Convolution( + (conv): Conv2d(1, 2, kernel_size=(3, 3), stride=(1, 1)) + ) + (layer1): Convolution( + (conv): Conv2d(2, 4, kernel_size=(3, 3), stride=(1, 1)) + ) + ) + (mlp): MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (output): Linear(in_features=144, out_features=2, bias=True) + ) + ) + """ + + def __init__( + self, + in_shape: Sequence[int], + num_outputs: int, + conv_args: Dict[str, Any], + mlp_args: Optional[Dict[str, Any]] = None, + ) -> None: + super().__init__() + check_conv_args(conv_args) + check_mlp_args(mlp_args) + self.in_shape = in_shape + self.num_outputs = num_outputs + + in_channels, *input_size = in_shape + spatial_dims = len(input_size) + + self.convolutions = ConvEncoder( + in_channels=in_channels, + spatial_dims=spatial_dims, + _input_size=tuple(input_size), + **conv_args, + ) + + n_channels = ( + conv_args["channels"][-1] if len(conv_args["channels"]) > 0 else in_shape[0] + ) + flatten_shape = int(np.prod(self.convolutions.final_size) * n_channels) + if mlp_args is None: + mlp_args = {"hidden_channels": []} + self.mlp = MLP( + in_channels=flatten_shape, + out_channels=num_outputs, + **mlp_args, + ) diff --git a/clinicadl/monai_networks/nn/conv_decoder.py b/clinicadl/monai_networks/nn/conv_decoder.py new file mode 100644 index 000000000..28c9be96f --- /dev/null +++ b/clinicadl/monai_networks/nn/conv_decoder.py @@ -0,0 +1,388 @@ +from typing import Callable, Optional, Sequence, Tuple + +import torch.nn as nn +from monai.networks.blocks import Convolution +from monai.networks.layers.utils import get_act_layer +from monai.utils.misc import ensure_tuple + +from .layers.unpool import get_unpool_layer +from .layers.utils import ( + ActFunction, + ActivationParameters, + ConvNormalizationParameters, + ConvNormLayer, + ConvParameters, + NormLayer, + SingleLayerUnpoolingParameters, + UnpoolingLayer, + UnpoolingParameters, +) +from .utils import ( + calculate_convtranspose_out_shape, + calculate_unpool_out_shape, + check_adn_ordering, + check_norm_layer, + check_pool_indices, + ensure_list_of_tuples, +) + + +class ConvDecoder(nn.Sequential): + """ + Fully convolutional decoder network with transposed convolutions, unpooling, normalization, activation + and dropout layers. + + Parameters + ---------- + spatial_dims : int + number of spatial dimensions of the input image. + in_channels : int + number of channels in the input image. + channels : Sequence[int] + sequence of integers stating the output channels of each transposed convolution. Thus, this + parameter also controls the number of transposed convolutions. + kernel_size : ConvParameters (optional, default=3) + the kernel size of the transposed convolutions. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the kernel sizes for each layer. + The length of the list must be equal to the number of transposed convolution layers (i.e. + `len(channels)`). + stride : ConvParameters (optional, default=1) + the stride of the transposed convolutions. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the strides for each layer. + The length of the list must be equal to the number of transposed convolution layers (i.e. + `len(channels)`). + padding : ConvParameters (optional, default=0) + the padding of the transposed convolutions. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the paddings for each layer. + The length of the list must be equal to the number of transposed convolution layers (i.e. + `len(channels)`). + output_padding : ConvParameters (optional, default=0) + the output padding of the transposed convolutions. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the output paddings for each layer. + The length of the list must be equal to the number of transposed convolution layers (i.e. + `len(channels)`). + dilation : ConvParameters (optional, default=1) + the dilation factor of the transposed convolutions. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the dilations for each layer. + The length of the list must be equal to the number of transposed convolution layers (i.e. + `len(channels)`). + unpooling : Optional[UnpoolingParameters] (optional, default=(UnpoolingLayer.UPSAMPLE, {"scale_factor": 2})) + the unpooling mode and the arguments of the unpooling layer, passed as `(unpooling_mode, arguments)`. + If None, no unpooling will be performed in the network.\n + `unpooling_mode` can be either `upsample` or `convtranspose`. Please refer to PyTorch's [Upsample] + (https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html) or [ConvTranspose](https:// + pytorch.org/docs/stable/generated/torch.nn.ConvTranspose2d.html) to know the mandatory and optional + arguments.\n + If a list is passed, it will be understood as `(unpooling_mode, arguments)` for each unpooling layer.\n + Note: no need to pass `in_channels` and `out_channels` for `convtranspose` because the unpooling + layers are not intended to modify the number of channels. + unpooling_indices : Optional[Sequence[int]] (optional, default=None) + indices of the transposed convolution layers after which unpooling should be performed. + If None, no unpooling will be performed. An index equal to -1 will be understood as a pooling layer before + the first transposed convolution. + act : Optional[ActivationParameters] (optional, default=ActFunction.PRELU) + the activation function used after a transposed convolution layer, and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + output_act : Optional[ActivationParameters] (optional, default=None) + a potential activation layer applied to the output of the network. Should be pass in the same way as `act`. + If None, no last activation will be applied. + norm : Optional[ConvNormalizationParameters] (optional, default=NormLayer.INSTANCE) + the normalization type used after a transposed convolution layer, and optionally the arguments of the normalization + layer. Should be passed as `norm_type` or `(norm_type, parameters)`. If None, no normalization will be + performed.\n + `norm_type` can be any value in {`batch`, `group`, `instance`, `syncbatch`}. Please refer to PyTorch's + [normalization layers](https://pytorch.org/docs/stable/nn.html#normalization-layers) to know the mandatory and + optional arguments for each of them.\n + Please note that arguments `num_channels`, `num_features` of the normalization layer + should not be passed, as they are automatically inferred from the output of the previous layer in the network. + dropout : Optional[float] (optional, default=None) + dropout ratio. If None, no dropout. + bias : bool (optional, default=True) + whether to have a bias term in transposed convolutions. + adn_ordering : str (optional, default="NDA") + order of operations `Activation`, `Dropout` and `Normalization` after a transposed convolutional layer (except the + last one).\n + For example if "ND" is passed, `Normalization` and then `Dropout` will be performed (without `Activation`).\n + Note: ADN will not be applied after the last convolution. + + Examples + -------- + >>> ConvDecoder( + in_channels=16, + spatial_dims=2, + channels=[8, 4, 1], + kernel_size=(3, 5), + stride=2, + padding=[1, 0, 0], + output_padding=[0, 0, (1, 2)], + dilation=1, + unpooling=[("upsample", {"scale_factor": 2}), ("upsample", {"size": (32, 32)})], + unpooling_indices=[0, 1], + act="elu", + output_act="relu", + norm=("batch", {"eps": 1e-05}), + dropout=0.1, + bias=True, + adn_ordering="NDA", + ) + ConvDecoder( + (layer0): Convolution( + (conv): ConvTranspose2d(16, 8, kernel_size=(3, 5), stride=(2, 2), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (unpool0): Upsample(scale_factor=2.0, mode='nearest') + (layer1): Convolution( + (conv): ConvTranspose2d(8, 4, kernel_size=(3, 5), stride=(2, 2)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (unpool1): Upsample(size=(32, 32), mode='nearest') + (layer2): Convolution( + (conv): ConvTranspose2d(4, 1, kernel_size=(3, 5), stride=(2, 2), output_padding=(1, 2)) + ) + (output_act): ReLU() + ) + + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + channels: Sequence[int], + kernel_size: ConvParameters = 3, + stride: ConvParameters = 1, + padding: ConvParameters = 0, + output_padding: ConvParameters = 0, + dilation: ConvParameters = 1, + unpooling: Optional[UnpoolingParameters] = ( + UnpoolingLayer.UPSAMPLE, + {"scale_factor": 2}, + ), + unpooling_indices: Optional[Sequence[int]] = None, + act: Optional[ActivationParameters] = ActFunction.PRELU, + output_act: Optional[ActivationParameters] = None, + norm: Optional[ConvNormalizationParameters] = ConvNormLayer.INSTANCE, + dropout: Optional[float] = None, + bias: bool = True, + adn_ordering: str = "NDA", + _input_size: Optional[Sequence[int]] = None, + ) -> None: + super().__init__() + + self._current_size = _input_size if _input_size else None + + self.spatial_dims = spatial_dims + self.in_channels = in_channels + self.channels = ensure_tuple(channels) + self.n_layers = len(self.channels) + + self.kernel_size = ensure_list_of_tuples( + kernel_size, self.spatial_dims, self.n_layers, "kernel_size" + ) + self.stride = ensure_list_of_tuples( + stride, self.spatial_dims, self.n_layers, "stride" + ) + self.padding = ensure_list_of_tuples( + padding, self.spatial_dims, self.n_layers, "padding" + ) + self.output_padding = ensure_list_of_tuples( + output_padding, self.spatial_dims, self.n_layers, "output_padding" + ) + self.dilation = ensure_list_of_tuples( + dilation, self.spatial_dims, self.n_layers, "dilation" + ) + + self.unpooling_indices = check_pool_indices(unpooling_indices, self.n_layers) + self.unpooling = self._check_unpool_layers(unpooling) + self.act = act + self.norm = check_norm_layer(norm) + if self.norm == NormLayer.LAYER: + raise ValueError("Layer normalization not implemented in ConvDecoder.") + self.dropout = dropout + self.bias = bias + self.adn_ordering = check_adn_ordering(adn_ordering) + + n_unpoolings = 0 + if self.unpooling and -1 in self.unpooling_indices: + unpooling_layer = self._get_unpool_layer( + self.unpooling[n_unpoolings], n_channels=self.in_channels + ) + self.add_module("init_unpool", unpooling_layer) + n_unpoolings += 1 + + echannel = self.in_channels + for i, (c, k, s, p, o_p, d) in enumerate( + zip( + self.channels, + self.kernel_size, + self.stride, + self.padding, + self.output_padding, + self.dilation, + ) + ): + conv_layer = self._get_convtranspose_layer( + in_channels=echannel, + out_channels=c, + kernel_size=k, + stride=s, + padding=p, + output_padding=o_p, + dilation=d, + is_last=(i == len(channels) - 1), + ) + self.add_module(f"layer{i}", conv_layer) + echannel = c # use the output channel number as the input for the next loop + if self.unpooling and i in self.unpooling_indices: + unpooling_layer = self._get_unpool_layer( + self.unpooling[n_unpoolings], n_channels=c + ) + self.add_module(f"unpool{i}", unpooling_layer) + n_unpoolings += 1 + + self.output_act = get_act_layer(output_act) if output_act else None + + @property + def final_size(self): + """ + To know the size of an image at the end of the network. + """ + return self._current_size + + @final_size.setter + def final_size(self, fct: Callable[[Tuple[int, ...]], Tuple[int, ...]]): + """ + Takes as input the function used to update the current image size. + """ + if self._current_size is not None: + self._current_size = fct(self._current_size) + + def _get_convtranspose_layer( + self, + in_channels: int, + out_channels: int, + kernel_size: Tuple[int, ...], + stride: Tuple[int, ...], + padding: Tuple[int, ...], + output_padding: Tuple[int, ...], + dilation: Tuple[int, ...], + is_last: bool, + ) -> Convolution: + """ + Gets the parametrized TransposedConvolution-ADN block and updates the current output size. + """ + self.final_size = lambda size: calculate_convtranspose_out_shape( + size, kernel_size, stride, padding, output_padding, dilation + ) + + return Convolution( + is_transposed=True, + conv_only=is_last, + spatial_dims=self.spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + strides=stride, + kernel_size=kernel_size, + padding=padding, + output_padding=output_padding, + dilation=dilation, + act=self.act, + norm=self.norm, + dropout=self.dropout, + bias=self.bias, + adn_ordering=self.adn_ordering, + ) + + def _get_unpool_layer( + self, unpooling: SingleLayerUnpoolingParameters, n_channels: int + ) -> nn.Module: + """ + Gets the parametrized unpooling layer and updates the current output size. + """ + unpool_layer = get_unpool_layer( + unpooling, + spatial_dims=self.spatial_dims, + in_channels=n_channels, + out_channels=n_channels, + ) + self.final_size = lambda size: calculate_unpool_out_shape( + unpool_mode=unpooling[0], + in_shape=size, + **unpool_layer.__dict__, + ) + return unpool_layer + + @classmethod + def _check_single_unpool_layer( + cls, unpooling: SingleLayerUnpoolingParameters + ) -> SingleLayerUnpoolingParameters: + """ + Checks unpooling arguments for a single pooling layer. + """ + if not isinstance(unpooling, tuple) or len(unpooling) != 2: + raise ValueError( + "unpooling must be double (or a list of doubles) with first the type of unpooling and then the parameters of " + f"the unpooling layer in a dict. Got {unpooling}" + ) + _ = UnpoolingLayer(unpooling[0]) # check unpooling mode + args = unpooling[1] + if not isinstance(args, dict): + raise ValueError( + f"The arguments of the unpooling layer must be passed in a dict. Got {args}" + ) + + return unpooling + + def _check_unpool_layers( + self, unpooling: UnpoolingParameters + ) -> UnpoolingParameters: + """ + Checks argument unpooling. + """ + if unpooling is None: + return unpooling + if isinstance(unpooling, list): + for unpool_layer in unpooling: + self._check_single_unpool_layer(unpool_layer) + if len(unpooling) != len(self.unpooling_indices): + raise ValueError( + "If you pass a list for unpooling, the size of that list must match " + f"the size of unpooling_indices. Got: unpooling={unpooling} and " + f"unpooling_indices={self.unpooling_indices}" + ) + elif isinstance(unpooling, tuple): + self._check_single_unpool_layer(unpooling) + unpooling = (unpooling,) * len(self.unpooling_indices) + else: + raise ValueError( + f"unpooling can be either None, a double (string, dictionary) or a list of such doubles. Got {unpooling}" + ) + + return unpooling diff --git a/clinicadl/monai_networks/nn/conv_encoder.py b/clinicadl/monai_networks/nn/conv_encoder.py new file mode 100644 index 000000000..f3ec66484 --- /dev/null +++ b/clinicadl/monai_networks/nn/conv_encoder.py @@ -0,0 +1,392 @@ +from typing import Callable, List, Optional, Sequence, Tuple + +import numpy as np +import torch.nn as nn +from monai.networks.blocks import Convolution +from monai.networks.layers.utils import get_act_layer, get_pool_layer +from monai.utils.misc import ensure_tuple + +from .layers.utils import ( + ActFunction, + ActivationParameters, + ConvNormalizationParameters, + ConvNormLayer, + ConvParameters, + NormLayer, + PoolingLayer, + PoolingParameters, + SingleLayerPoolingParameters, +) +from .utils import ( + calculate_conv_out_shape, + calculate_pool_out_shape, + check_adn_ordering, + check_norm_layer, + check_pool_indices, + ensure_list_of_tuples, +) + + +class ConvEncoder(nn.Sequential): + """ + Fully convolutional encoder network with convolutional, pooling, normalization, activation + and dropout layers. + + Parameters + ---------- + spatial_dims : int + number of spatial dimensions of the input image. + in_channels : int + number of channels in the input image. + channels : Sequence[int] + sequence of integers stating the output channels of each convolutional layer. Thus, this + parameter also controls the number of convolutional layers. + kernel_size : ConvParameters (optional, default=3) + the kernel size of the convolutional layers. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the kernel sizes for each layer. + The length of the list must be equal to the number of convolutional layers (i.e. `len(channels)`). + stride : ConvParameters (optional, default=1) + the stride of the convolutional layers. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the strides for each layer. + The length of the list must be equal to the number of convolutional layers (i.e. `len(channels)`). + padding : ConvParameters (optional, default=0) + the padding of the convolutional layers. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the paddings for each layer. + The length of the list must be equal to the number of convolutional layers (i.e. `len(channels)`). + dilation : ConvParameters (optional, default=1) + the dilation factor of the convolutional layers. Can be an integer, a tuple or a list.\n + If integer, the value will be used for all layers and all dimensions.\n + If tuple (of integers), it will be interpreted as the values for each dimension. These values + will be used for all the layers.\n + If list (of tuples or integers), it will be interpreted as the dilations for each layer. + The length of the list must be equal to the number of convolutional layers (i.e. `len(channels)`). + pooling : Optional[PoolingParameters] (optional, default=(PoolingLayer.MAX, {"kernel_size": 2})) + the pooling mode and the arguments of the pooling layer, passed as `(pooling_mode, arguments)`. + If None, no pooling will be performed in the network.\n + `pooling_mode` can be either `max`, `avg`, `adaptivemax` or `adaptiveavg`. Please refer to PyTorch's [documentation] + (https://pytorch.org/docs/stable/nn.html#pooling-layers) to know the mandatory and optional arguments.\n + If a list is passed, it will be understood as `(pooling_mode, arguments)` for each pooling layer. + pooling_indices : Optional[Sequence[int]] (optional, default=None) + indices of the convolutional layers after which pooling should be performed. + If None, no pooling will be performed. An index equal to -1 will be understood as an unpooling layer before + the first convolution. + act : Optional[ActivationParameters] (optional, default=ActFunction.PRELU) + the activation function used after a convolutional layer, and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + output_act : Optional[ActivationParameters] (optional, default=None) + a potential activation layer applied to the output of the network. Should be pass in the same way as `act`. + If None, no last activation will be applied. + norm : Optional[ConvNormalizationParameters] (optional, default=NormLayer.INSTANCE) + the normalization type used after a convolutional layer, and optionally the arguments of the normalization + layer. Should be passed as `norm_type` or `(norm_type, parameters)`. If None, no normalization will be + performed.\n + `norm_type` can be any value in {`batch`, `group`, `instance`, `syncbatch`}. Please refer to PyTorch's + [normalization layers](https://pytorch.org/docs/stable/nn.html#normalization-layers) to know the mandatory and + optional arguments for each of them.\n + Please note that arguments `num_channels`, `num_features` of the normalization layer + should not be passed, as they are automatically inferred from the output of the previous layer in the network. + dropout : Optional[float] (optional, default=None) + dropout ratio. If None, no dropout. + bias : bool (optional, default=True) + whether to have a bias term in convolutions. + adn_ordering : str (optional, default="NDA") + order of operations `Activation`, `Dropout` and `Normalization` after a convolutional layer (except the last + one). + For example if "ND" is passed, `Normalization` and then `Dropout` will be performed (without `Activation`).\n + Note: ADN will not be applied after the last convolution. + + Examples + -------- + >>> ConvEncoder( + spatial_dims=2, + in_channels=1, + channels=[2, 4, 8], + kernel_size=(3, 5), + stride=1, + padding=[1, (0, 1), 0], + dilation=1, + pooling=[("max", {"kernel_size": 2}), ("avg", {"kernel_size": 2})], + pooling_indices=[0, 1], + act="elu", + output_act="relu", + norm=("batch", {"eps": 1e-05}), + dropout=0.1, + bias=True, + adn_ordering="NDA", + ) + ConvEncoder( + (layer0): Convolution( + (conv): Conv2d(1, 2, kernel_size=(3, 5), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (pool0): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) + (layer1): Convolution( + (conv): Conv2d(2, 4, kernel_size=(3, 5), stride=(1, 1), padding=(0, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (pool1): AvgPool2d(kernel_size=2, stride=2, padding=0) + (layer2): Convolution( + (conv): Conv2d(4, 8, kernel_size=(3, 5), stride=(1, 1)) + ) + (output_act): ReLU() + ) + + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + channels: Sequence[int], + kernel_size: ConvParameters = 3, + stride: ConvParameters = 1, + padding: ConvParameters = 0, + dilation: ConvParameters = 1, + pooling: Optional[PoolingParameters] = ( + PoolingLayer.MAX, + {"kernel_size": 2}, + ), + pooling_indices: Optional[Sequence[int]] = None, + act: Optional[ActivationParameters] = ActFunction.PRELU, + output_act: Optional[ActivationParameters] = None, + norm: Optional[ConvNormalizationParameters] = ConvNormLayer.INSTANCE, + dropout: Optional[float] = None, + bias: bool = True, + adn_ordering: str = "NDA", + _input_size: Optional[Sequence[int]] = None, + ) -> None: + super().__init__() + + self._current_size = _input_size if _input_size else None + self._size_details = [self._current_size] if _input_size else None + + self.spatial_dims = spatial_dims + self.in_channels = in_channels + self.channels = ensure_tuple(channels) + self.n_layers = len(self.channels) + + self.kernel_size = ensure_list_of_tuples( + kernel_size, self.spatial_dims, self.n_layers, "kernel_size" + ) + self.stride = ensure_list_of_tuples( + stride, self.spatial_dims, self.n_layers, "stride" + ) + self.padding = ensure_list_of_tuples( + padding, self.spatial_dims, self.n_layers, "padding" + ) + self.dilation = ensure_list_of_tuples( + dilation, self.spatial_dims, self.n_layers, "dilation" + ) + + self.pooling_indices = check_pool_indices(pooling_indices, self.n_layers) + self.pooling = self._check_pool_layers(pooling) + self.act = act + self.norm = check_norm_layer(norm) + if self.norm == NormLayer.LAYER: + raise ValueError("Layer normalization not implemented in ConvEncoder.") + self.dropout = dropout + self.bias = bias + self.adn_ordering = check_adn_ordering(adn_ordering) + + n_poolings = 0 + if self.pooling and -1 in self.pooling_indices: + pooling_layer = self._get_pool_layer(self.pooling[n_poolings]) + self.add_module("init_pool", pooling_layer) + n_poolings += 1 + + echannel = self.in_channels + for i, (c, k, s, p, d) in enumerate( + zip( + self.channels, + self.kernel_size, + self.stride, + self.padding, + self.dilation, + ) + ): + conv_layer = self._get_conv_layer( + in_channels=echannel, + out_channels=c, + kernel_size=k, + stride=s, + padding=p, + dilation=d, + is_last=(i == len(channels) - 1), + ) + self.add_module(f"layer{i}", conv_layer) + echannel = c # use the output channel number as the input for the next loop + if self.pooling and i in self.pooling_indices: + pooling_layer = self._get_pool_layer(self.pooling[n_poolings]) + self.add_module(f"pool{i}", pooling_layer) + n_poolings += 1 + + self.output_act = get_act_layer(output_act) if output_act else None + + @property + def final_size(self): + """ + To know the size of an image at the end of the network. + """ + return self._current_size + + @property + def size_details(self): + """ + To know the sizes of intermediate images. + """ + return self._size_details + + @final_size.setter + def final_size(self, fct: Callable[[Tuple[int, ...]], Tuple[int, ...]]): + """ + Takes as input the function used to update the current image size. + """ + if self._current_size is not None: + self._current_size = fct(self._current_size) + self._size_details.append(self._current_size) + self._check_size() + + def _get_conv_layer( + self, + in_channels: int, + out_channels: int, + kernel_size: Tuple[int, ...], + stride: Tuple[int, ...], + padding: Tuple[int, ...], + dilation: Tuple[int, ...], + is_last: bool, + ) -> Convolution: + """ + Gets the parametrized Convolution-ADN block and updates the current output size. + """ + self.final_size = lambda size: calculate_conv_out_shape( + size, kernel_size, stride, padding, dilation + ) + + return Convolution( + conv_only=is_last, + spatial_dims=self.spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + strides=stride, + kernel_size=kernel_size, + padding=padding, + dilation=dilation, + act=self.act, + norm=self.norm, + dropout=self.dropout, + bias=self.bias, + adn_ordering=self.adn_ordering, + ) + + def _get_pool_layer(self, pooling: SingleLayerPoolingParameters) -> nn.Module: + """ + Gets the parametrized pooling layer and updates the current output size. + """ + pool_layer = get_pool_layer(pooling, spatial_dims=self.spatial_dims) + old_size = self.final_size + self.final_size = lambda size: calculate_pool_out_shape( + pool_mode=pooling[0], in_shape=size, **pool_layer.__dict__ + ) + + if ( + self.final_size is not None + and (np.array(old_size) < np.array(self.final_size)).any() + ): + raise ValueError( + f"You passed {pooling} as a pooling layer. But before this layer, the size of the image " + f"was {old_size}. So, pooling can't be performed." + ) + + return pool_layer + + def _check_size(self) -> None: + """ + Checks that image size never reaches 0. + """ + if self._current_size is not None and (np.array(self._current_size) <= 0).any(): + raise ValueError( + f"Failed to build the network. An image of size 0 or less has been reached. Stopped at:\n {self}" + ) + + @classmethod + def _check_single_pool_layer( + cls, pooling: SingleLayerPoolingParameters + ) -> SingleLayerPoolingParameters: + """ + Checks pooling arguments for a single pooling layer. + """ + if not isinstance(pooling, tuple) or len(pooling) != 2: + raise ValueError( + "pooling must be a double (or a list of doubles) with first the type of pooling and then the parameters " + f"of the pooling layer in a dict. Got {pooling}" + ) + pooling_type = PoolingLayer(pooling[0]) + args = pooling[1] + if not isinstance(args, dict): + raise ValueError( + f"The arguments of the pooling layer must be passed in a dict. Got {args}" + ) + if ( + pooling_type == PoolingLayer.MAX or pooling_type == PoolingLayer.AVG + ) and "kernel_size" not in args: + raise ValueError( + f"For {pooling_type} pooling mode, `kernel_size` argument must be passed. " + f"Got {args}" + ) + elif ( + pooling_type == PoolingLayer.ADAPT_AVG + or pooling_type == PoolingLayer.ADAPT_MAX + ) and "output_size" not in args: + raise ValueError( + f"For {pooling_type} pooling mode, `output_size` argument must be passed. " + f"Got {args}" + ) + + def _check_pool_layers( + self, pooling: PoolingParameters + ) -> List[SingleLayerPoolingParameters]: + """ + Check argument pooling. + """ + if pooling is None: + return pooling + if isinstance(pooling, list): + for pool_layer in pooling: + self._check_single_pool_layer(pool_layer) + if len(pooling) != len(self.pooling_indices): + raise ValueError( + "If you pass a list for pooling, the size of that list must match " + f"the size of pooling_indices. Got: pooling={pooling} and " + f"pooling_indices={self.pooling_indices}" + ) + elif isinstance(pooling, tuple): + self._check_single_pool_layer(pooling) + pooling = [pooling] * len(self.pooling_indices) + else: + raise ValueError( + f"pooling can be either None, a double (string, dictionary) or a list of such doubles. Got {pooling}" + ) + + return pooling diff --git a/clinicadl/monai_networks/nn/densenet.py b/clinicadl/monai_networks/nn/densenet.py new file mode 100644 index 000000000..45d99cc71 --- /dev/null +++ b/clinicadl/monai_networks/nn/densenet.py @@ -0,0 +1,312 @@ +import re +from collections import OrderedDict +from enum import Enum +from typing import Any, Mapping, Optional, Sequence, Union + +import torch.nn as nn +from monai.networks.layers.utils import get_act_layer +from monai.networks.nets import DenseNet as BaseDenseNet +from torch.hub import load_state_dict_from_url +from torchvision.models.densenet import ( + DenseNet121_Weights, + DenseNet161_Weights, + DenseNet169_Weights, + DenseNet201_Weights, +) + +from .layers.utils import ActivationParameters + + +class DenseNet(nn.Sequential): + """ + DenseNet based on the [Densely Connected Convolutional Networks](https://arxiv.org/pdf/1608.06993) paper. + Adapted from [MONAI's implementation](https://docs.monai.io/en/stable/networks.html#densenet). + + The user can customize the number of dense blocks, the number of dense layers in each block, as well as + other parameters like the growth rate. + + DenseNet is a fully convolutional network that can work with input of any size, provided that is it large + enough not to be reduced to a 1-pixel image (before the adaptative average pooling). + + Parameters + ---------- + spatial_dims : int + number of spatial dimensions of the input image. + in_channels : int + number of channels in the input image. + num_outputs : Optional[int] + number of output variables after the last linear layer.\n + If None, the features before the last fully connected layer will be returned. + n_dense_layers : Sequence[int] (optional, default=(6, 12, 24, 16)) + number of dense layers in each dense block. Thus, this parameter also defines the number of dense blocks. + Default is set to DenseNet-121 parameter. + init_features : int (optional, default=64) + number of feature maps after the initial convolution. Default is set to 64, as in the original paper. + growth_rate : int (optional, default=32) + how many feature maps to add at each dense layer. Default is set to 32, as in the original paper. + bottleneck_factor : int (optional, default=4) + multiplicative factor for bottleneck layers (1x1 convolutions). The output of of these bottleneck layers will + have `bottleneck_factor * growth_rate` feature maps. Default is 4, as in the original paper. + act : ActivationParameters (optional, default=("relu", {"inplace": True})) + the activation function used in the convolutional part, and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them.\n + Default is "relu", as in the original paper. + output_act : Optional[ActivationParameters] (optional, default=None) + if `num_outputs` is not None, a potential activation layer applied to the outputs of the network. + Should be pass in the same way as `act`. + If None, no last activation will be applied. + dropout : Optional[float] (optional, default=None) + dropout ratio. If None, no dropout. + + Examples + -------- + >>> DenseNet(spatial_dims=2, in_channels=1, num_outputs=2, output_act="softmax", n_dense_layers=(2, 2)) + DenseNet( + (features): Sequential( + (conv0): Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) + (norm0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act0): ReLU(inplace=True) + (pool0): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) + (denseblock1): _DenseBlock( + (denselayer1): _DenseLayer( + (layers): Sequential( + (norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv1): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act2): ReLU(inplace=True) + (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + ) + ) + (denselayer2): _DenseLayer( + (layers): Sequential( + (norm1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv1): Conv2d(96, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act2): ReLU(inplace=True) + (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + ) + ) + ) + (transition1): _Transition( + (norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act): ReLU(inplace=True) + (conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) + (pool): AvgPool2d(kernel_size=2, stride=2, padding=0) + ) + (denseblock2): _DenseBlock( + (denselayer1): _DenseLayer( + (layers): Sequential( + (norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv1): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act2): ReLU(inplace=True) + (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + ) + ) + (denselayer2): _DenseLayer( + (layers): Sequential( + (norm1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv1): Conv2d(96, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act2): ReLU(inplace=True) + (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + ) + ) + ) + (norm5): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + (fc): Sequential( + (act): ReLU(inplace=True) + (pool): AdaptiveAvgPool2d(output_size=1) + (flatten): Flatten(start_dim=1, end_dim=-1) + (out): Linear(in_features=128, out_features=2, bias=True) + (output_act): Softmax(dim=None) + ) + ) + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + num_outputs: Optional[int], + n_dense_layers: Sequence[int] = (6, 12, 24, 16), + init_features: int = 64, + growth_rate: int = 32, + bottleneck_factor: int = 4, + act: ActivationParameters = ("relu", {"inplace": True}), + output_act: Optional[ActivationParameters] = None, + dropout: Optional[float] = None, + ) -> None: + super().__init__() + self.spatial_dims = spatial_dims + self.in_channels = in_channels + self.num_outputs = num_outputs + self.n_dense_layers = n_dense_layers + self.init_features = init_features + self.growth_rate = growth_rate + self.bottleneck_factor = bottleneck_factor + self.act = act + self.dropout = dropout + + base_densenet = BaseDenseNet( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=num_outputs if num_outputs else 1, + init_features=init_features, + growth_rate=growth_rate, + block_config=n_dense_layers, + bn_size=bottleneck_factor, + act=act, + dropout_prob=dropout if dropout else 0.0, + ) + self.features = base_densenet.features + self.fc = base_densenet.class_layers if num_outputs else None + if self.fc: + self.fc.output_act = get_act_layer(output_act) if output_act else None + + self._rename_act(self) + + @classmethod + def _rename_act(cls, module: nn.Module) -> None: + """ + Rename activation layers from 'relu' to 'act'. + """ + for name, layer in list(module.named_children()): + if "relu" in name: + module._modules = OrderedDict( # pylint: disable=protected-access + [ + (key.replace("relu", "act"), sub_m) + for key, sub_m in module._modules.items() # pylint: disable=protected-access + ] + ) + else: + cls._rename_act(layer) + + +class SOTADenseNet(str, Enum): + """Supported DenseNet networks.""" + + DENSENET_121 = "DenseNet-121" + DENSENET_161 = "DenseNet-161" + DENSENET_169 = "DenseNet-169" + DENSENET_201 = "DenseNet-201" + + +def get_densenet( + name: Union[str, SOTADenseNet], + num_outputs: Optional[int], + output_act: ActivationParameters = None, + pretrained: bool = False, +) -> DenseNet: + """ + To get a DenseNet implemented in the [Densely Connected Convolutional Networks](https://arxiv.org/pdf/1608.06993) + paper. + + Only the last fully connected layer will be changed to match `num_outputs`. + + The user can also use the pretrained models from `torchvision`. Note that the last fully connected layer will not + used pretrained weights, as it is task specific. + + .. warning:: `DenseNet-121`, `DenseNet-161`, `DenseNet-169` and `DenseNet-201` only works with 2D images with 3 channels. + + Notes: `torchvision` does not provide an implementation for `DenseNet-264` but provides a `DenseNet-161` that is not + mentioned in the paper. + + Parameters + ---------- + name : Union[str, SOTADenseNet] + The name of the DenseNet. Available networks are `DenseNet-121`, `DenseNet-161`, `DenseNet-169` and `DenseNet-201`. + num_outputs : Optional[int] + number of output variables after the last linear layer.\n + If None, the features before the last fully connected layer will be returned. + output_act : ActivationParameters (optional, default=None) + if `num_outputs` is not None, a potential activation layer applied to the outputs of the network, + and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + pretrained : bool (optional, default=False) + whether to use pretrained weights. The pretrained weights used are the default ones from [torchvision](https:// + pytorch.org/vision/main/models/densenet.html). + + Returns + ------- + DenseNet + The network, with potentially pretrained weights. + """ + name = SOTADenseNet(name) + if name == SOTADenseNet.DENSENET_121: + n_dense_layers = (6, 12, 24, 16) + growth_rate = 32 + init_features = 64 + model_url = DenseNet121_Weights.DEFAULT.url + elif name == SOTADenseNet.DENSENET_161: + n_dense_layers = (6, 12, 36, 24) + growth_rate = 48 + init_features = 96 + model_url = DenseNet161_Weights.DEFAULT.url + elif name == SOTADenseNet.DENSENET_169: + n_dense_layers = (6, 12, 32, 32) + growth_rate = 32 + init_features = 64 + model_url = DenseNet169_Weights.DEFAULT.url + elif name == SOTADenseNet.DENSENET_201: + n_dense_layers = (6, 12, 48, 32) + growth_rate = 32 + init_features = 64 + model_url = DenseNet201_Weights.DEFAULT.url + + # pylint: disable=possibly-used-before-assignment + densenet = DenseNet( + spatial_dims=2, + in_channels=3, + num_outputs=num_outputs, + n_dense_layers=n_dense_layers, + growth_rate=growth_rate, + init_features=init_features, + output_act=output_act, + ) + if not pretrained: + return densenet + + pretrained_dict = load_state_dict_from_url(model_url, progress=True) + features_state_dict = { + k.replace("features.", ""): v + for k, v in pretrained_dict.items() + if "classifier" not in k + } + densenet.features.load_state_dict(_state_dict_adapter(features_state_dict)) + + return densenet + + +def _state_dict_adapter(state_dict: Mapping[str, Any]) -> Mapping[str, Any]: + """ + To update the old nomenclature in the pretrained state dict. + Adapted from `_load_state_dict` in [torchvision.models.densenet](https://pytorch.org/vision/main + /_modules/torchvision/models/densenet.html). + """ + pattern = re.compile( + r"^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$" + ) + + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + new_key = re.sub(r"^(.*denselayer\d+)\.", r"\1.layers.", new_key) + state_dict[new_key] = state_dict[key] + del state_dict[key] + + return state_dict diff --git a/clinicadl/monai_networks/nn/generator.py b/clinicadl/monai_networks/nn/generator.py new file mode 100644 index 000000000..5f68a2e58 --- /dev/null +++ b/clinicadl/monai_networks/nn/generator.py @@ -0,0 +1,131 @@ +from typing import Any, Dict, Optional, Sequence + +import numpy as np +import torch.nn as nn +from monai.networks.layers.simplelayers import Reshape + +from .conv_decoder import ConvDecoder +from .mlp import MLP +from .utils import check_conv_args, check_mlp_args + + +class Generator(nn.Sequential): + """ + A generator with first fully connected layers and then convolutional layers. + + This network is a simple aggregation of a Multi Layer Perceptron (:py:class: + `clinicadl.monai_networks.nn.mlp.MLP`) and a Fully Convolutional Network + (:py:class:`clinicadl.monai_networks.nn.conv_decoder.ConvDecoder`). + + Parameters + ---------- + latent_size : int + size of the latent vector. + start_shape : Sequence[int] + sequence of integers stating the initial shape of the image, i.e. the shape at the + beginning of the convolutional part (minus batch dimension, but including the number + of channels).\n + Thus, `start_shape` determines the dimension of the output of the generator (the exact + shape depends on the convolutional part and can be accessed via the class attribute + `output_shape`). + conv_args : Dict[str, Any] + the arguments for the convolutional part. The arguments are those accepted by + :py:class:`clinicadl.monai_networks.nn.conv_decoder.ConvDecoder`, except `in_shape` that + is specified here via `start_shape`. So, the only mandatory argument is `channels`. + mlp_args : Optional[Dict[str, Any]] (optional, default=None) + the arguments for the MLP part. The arguments are those accepted by + :py:class:`clinicadl.monai_networks.nn.mlp.MLP`, except `in_channels` that is specified + here via `latent_size`, and `out_channels` that is inferred from `start_shape`. + So, the only mandatory argument is `hidden_channels`.\n + If None, the MLP part will be reduced to a single linear layer. + + Examples + -------- + >>> Generator( + latent_size=8, + start_shape=(8, 2, 2), + conv_args={"channels": [4, 2], "norm": None, "act": None}, + mlp_args={"hidden_channels": [16], "act": "elu", "norm": None}, + ) + Generator( + (mlp): MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (hidden0): Sequential( + (linear): Linear(in_features=8, out_features=16, bias=True) + (adn): ADN( + (A): ELU(alpha=1.0) + ) + ) + (output): Linear(in_features=16, out_features=32, bias=True) + ) + (reshape): Reshape() + (convolutions): ConvDecoder( + (layer0): Convolution( + (conv): ConvTranspose2d(8, 4, kernel_size=(3, 3), stride=(1, 1)) + ) + (layer1): Convolution( + (conv): ConvTranspose2d(4, 2, kernel_size=(3, 3), stride=(1, 1)) + ) + ) + ) + + >>> Generator( + latent_size=8, + start_shape=(8, 2, 2), + conv_args={"channels": [4, 2], "norm": None, "act": None, "output_act": "relu"}, + ) + Generator( + (mlp): MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (output): Linear(in_features=8, out_features=32, bias=True) + ) + (reshape): Reshape() + (convolutions): ConvDecoder( + (layer0): Convolution( + (conv): ConvTranspose2d(8, 4, kernel_size=(3, 3), stride=(1, 1)) + ) + (layer1): Convolution( + (conv): ConvTranspose2d(4, 2, kernel_size=(3, 3), stride=(1, 1)) + ) + (output_act): ReLU() + ) + ) + """ + + def __init__( + self, + latent_size: int, + start_shape: Sequence[int], + conv_args: Dict[str, Any], + mlp_args: Optional[Dict[str, Any]] = None, + ) -> None: + super().__init__() + check_conv_args(conv_args) + check_mlp_args(mlp_args) + self.latent_size = latent_size + self.start_shape = start_shape + + flatten_shape = int(np.prod(start_shape)) + if mlp_args is None: + mlp_args = {"hidden_channels": []} + self.mlp = MLP( + in_channels=latent_size, + out_channels=flatten_shape, + **mlp_args, + ) + + self.reshape = Reshape(*start_shape) + inter_channels, *inter_size = start_shape + self.convolutions = ConvDecoder( + in_channels=inter_channels, + spatial_dims=len(inter_size), + _input_size=inter_size, + **conv_args, + ) + + n_channels = ( + conv_args["channels"][-1] + if len(conv_args["channels"]) > 0 + else start_shape[0] + ) + self.output_shape = (n_channels, *self.convolutions.final_size) diff --git a/clinicadl/monai_networks/nn/layers/__init__.py b/clinicadl/monai_networks/nn/layers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/clinicadl/monai_networks/nn/layers/resnet.py b/clinicadl/monai_networks/nn/layers/resnet.py new file mode 100644 index 000000000..c115da512 --- /dev/null +++ b/clinicadl/monai_networks/nn/layers/resnet.py @@ -0,0 +1,124 @@ +from collections.abc import Callable +from typing import Optional + +import torch +import torch.nn as nn +from monai.networks.layers.factories import Conv, Norm +from monai.networks.layers.utils import get_act_layer + +from .utils import ActivationParameters + + +class ResNetBlock(nn.Module): + """ + ResNet basic block. Adapted from MONAI's implementation: + https://github.com/Project-MONAI/MONAI/blob/59a7211070538586369afd4a01eca0a7fe2e742e/ + monai/networks/nets/resnet.py#L71 + """ + + expansion = 1 + + def __init__( + self, + in_planes: int, + planes: int, + spatial_dims: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + act: ActivationParameters = ("relu", {"inplace": True}), + ) -> None: + super().__init__() + + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + norm_type: Callable = Norm[Norm.BATCH, spatial_dims] + + self.conv1 = conv_type( # pylint: disable=not-callable + in_planes, planes, kernel_size=3, padding=1, stride=stride, bias=False + ) + self.norm1 = norm_type(planes) # pylint: disable=not-callable + self.act1 = get_act_layer(name=act) + self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, bias=False) # pylint: disable=not-callable + self.norm2 = norm_type(planes) # pylint: disable=not-callable + self.downsample = downsample + self.act2 = get_act_layer(name=act) + self.stride = stride + + def forward(self, x: torch.Tensor) -> torch.Tensor: + residual = x + + out: torch.Tensor = self.conv1(x) + out = self.norm1(out) + out = self.act1(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.act2(out) + + return out + + +class ResNetBottleneck(nn.Module): + """ + ResNet bottleneck block. Adapted from MONAI's implementation: + https://github.com/Project-MONAI/MONAI/blob/59a7211070538586369afd4a01eca0a7fe2e742e/ + monai/networks/nets/resnet.py#L124 + """ + + expansion = 4 + + def __init__( + self, + in_planes: int, + planes: int, + spatial_dims: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + act: ActivationParameters = ("relu", {"inplace": True}), + ) -> None: + super().__init__() + + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + norm_type: Callable = Norm[Norm.BATCH, spatial_dims] + + self.conv1 = conv_type(in_planes, planes, kernel_size=1, bias=False) # pylint: disable=not-callable + self.norm1 = norm_type(planes) # pylint: disable=not-callable + self.act1 = get_act_layer(name=act) + self.conv2 = conv_type( # pylint: disable=not-callable + planes, planes, kernel_size=3, stride=stride, padding=1, bias=False + ) + self.norm2 = norm_type(planes) # pylint: disable=not-callable + self.act2 = get_act_layer(name=act) + self.conv3 = conv_type( # pylint: disable=not-callable + planes, planes * self.expansion, kernel_size=1, bias=False + ) + self.norm3 = norm_type(planes * self.expansion) # pylint: disable=not-callable + self.downsample = downsample + self.act3 = get_act_layer(name=act) + self.stride = stride + + def forward(self, x: torch.Tensor) -> torch.Tensor: + residual = x + + out: torch.Tensor = self.conv1(x) + out = self.norm1(out) + out = self.act1(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.act2(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.act3(out) + + return out diff --git a/clinicadl/monai_networks/nn/layers/senet.py b/clinicadl/monai_networks/nn/layers/senet.py new file mode 100644 index 000000000..8847ef577 --- /dev/null +++ b/clinicadl/monai_networks/nn/layers/senet.py @@ -0,0 +1,142 @@ +from typing import Callable, Optional + +import torch +import torch.nn as nn +from monai.networks.blocks.squeeze_and_excitation import ChannelSELayer +from monai.networks.layers.factories import Conv, Norm +from monai.networks.layers.utils import get_act_layer + +from .utils import ActivationParameters + + +class SEResNetBlock(nn.Module): + """ + ResNet basic block. Adapted from MONAI's ResNetBlock: + https://github.com/Project-MONAI/MONAI/blob/59a7211070538586369afd4a01eca0a7fe2e742e/ + monai/networks/nets/resnet.py#L71 + """ + + expansion = 1 + reduction = 16 + + def __init__( + self, + in_planes: int, + planes: int, + spatial_dims: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + act: ActivationParameters = ("relu", {"inplace": True}), + ) -> None: + super().__init__() + + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + norm_type: Callable = Norm[Norm.BATCH, spatial_dims] + + self.conv1 = conv_type( # pylint: disable=not-callable + in_planes, planes, kernel_size=3, padding=1, stride=stride, bias=False + ) + self.norm1 = norm_type(planes) # pylint: disable=not-callable + self.act1 = get_act_layer(name=act) + self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, bias=False) # pylint: disable=not-callable + self.norm2 = norm_type(planes) # pylint: disable=not-callable + self.se_layer = ChannelSELayer( + spatial_dims=spatial_dims, + in_channels=planes, + r=self.reduction, + acti_type_1=("relu", {"inplace": True}), + acti_type_2="sigmoid", + ) + self.downsample = downsample + self.act2 = get_act_layer(name=act) + self.stride = stride + + def forward(self, x: torch.Tensor) -> torch.Tensor: + residual = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.act1(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out = self.se_layer(out) + out += residual + out = self.act2(out) + + return out + + +class SEResNetBottleneck(nn.Module): + """ + ResNet bottleneck block. Adapted from MONAI's ResNetBottleneck: + https://github.com/Project-MONAI/MONAI/blob/59a7211070538586369afd4a01eca0a7fe2e742e/ + monai/networks/nets/resnet.py#L124 + """ + + expansion = 4 + reduction = 16 + + def __init__( + self, + in_planes: int, + planes: int, + spatial_dims: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + act: ActivationParameters = ("relu", {"inplace": True}), + ) -> None: + super().__init__() + + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + norm_type: Callable = Norm[Norm.BATCH, spatial_dims] + + self.conv1 = conv_type(in_planes, planes, kernel_size=1, bias=False) # pylint: disable=not-callable + self.norm1 = norm_type(planes) # pylint: disable=not-callable + self.act1 = get_act_layer(name=act) + self.conv2 = conv_type( # pylint: disable=not-callable + planes, planes, kernel_size=3, stride=stride, padding=1, bias=False + ) + self.norm2 = norm_type(planes) # pylint: disable=not-callable + self.act2 = get_act_layer(name=act) + self.conv3 = conv_type( # pylint: disable=not-callable + planes, planes * self.expansion, kernel_size=1, bias=False + ) + self.norm3 = norm_type(planes * self.expansion) # pylint: disable=not-callable + self.se_layer = ChannelSELayer( + spatial_dims=spatial_dims, + in_channels=planes * self.expansion, + r=self.reduction, + acti_type_1=("relu", {"inplace": True}), + acti_type_2="sigmoid", + ) + self.downsample = downsample + self.act3 = get_act_layer(name=act) + self.stride = stride + + def forward(self, x: torch.Tensor) -> torch.Tensor: + residual = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.act1(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.act2(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out = self.se_layer(out) + out += residual + out = self.act3(out) + + return out diff --git a/clinicadl/monai_networks/nn/layers/unet.py b/clinicadl/monai_networks/nn/layers/unet.py new file mode 100644 index 000000000..2186425be --- /dev/null +++ b/clinicadl/monai_networks/nn/layers/unet.py @@ -0,0 +1,102 @@ +from typing import Optional + +import torch.nn as nn +from monai.networks.blocks.convolutions import Convolution +from monai.networks.layers.utils import get_pool_layer + +from .utils import ActFunction, ActivationParameters, NormLayer + + +class ConvBlock(nn.Sequential): + """UNet doouble convolution block.""" + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + act: ActivationParameters = ActFunction.RELU, + dropout: Optional[float] = None, + ): + super().__init__() + self.add_module( + "0", + Convolution( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + strides=1, + padding=None, + adn_ordering="NDA", + act=act, + norm=NormLayer.BATCH, + dropout=dropout, + ), + ) + self.add_module( + "1", + Convolution( + spatial_dims=spatial_dims, + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + strides=1, + padding=None, + adn_ordering="NDA", + act=act, + norm=NormLayer.BATCH, + dropout=dropout, + ), + ) + + +class UpSample(nn.Sequential): + """UNet up-conv block with first upsampling and then a convolution.""" + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + act: ActivationParameters = ActFunction.RELU, + dropout: Optional[float] = None, + ): + super().__init__() + self.add_module("0", nn.Upsample(scale_factor=2)) + self.add_module( + "1", + Convolution( + spatial_dims, + in_channels, + out_channels, + strides=1, + kernel_size=3, + act=act, + adn_ordering="NDA", + norm=NormLayer.BATCH, + dropout=dropout, + ), + ) + + +class DownBlock(nn.Sequential): + """UNet down block with first max pooling and then two convolutions.""" + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + act: ActivationParameters = ActFunction.RELU, + dropout: Optional[float] = None, + ): + super().__init__() + self.pool = get_pool_layer(("max", {"kernel_size": 2}), spatial_dims) + self.doubleconv = ConvBlock( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=out_channels, + act=act, + dropout=dropout, + ) diff --git a/clinicadl/monai_networks/nn/layers/unpool.py b/clinicadl/monai_networks/nn/layers/unpool.py new file mode 100644 index 000000000..1c90fde90 --- /dev/null +++ b/clinicadl/monai_networks/nn/layers/unpool.py @@ -0,0 +1,87 @@ +from typing import Any, Dict, Optional, Tuple, Type, Union + +import torch.nn as nn +from monai.networks.layers.factories import LayerFactory, split_args +from monai.utils import has_option + +from .utils import UnpoolingLayer + +Unpool = LayerFactory( + name="Unpooling layers", description="Factory for creating unpooling layers." +) + + +@Unpool.factory_function("upsample") +def upsample_factory(dim: int) -> Type[nn.Upsample]: + """ + Upsample layer. + """ + return nn.Upsample + + +@Unpool.factory_function("convtranspose") +def convtranspose_factory( + dim: int, +) -> Type[Union[nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d]]: + """ + Transposed convolutional layers in 1,2,3 dimensions. + + Parameters + ---------- + dim : int + desired dimension of the transposed convolutional layer. + + Returns + ------- + type[Union[nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d]] + ConvTranspose[dim]d + """ + types = (nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d) + return types[dim - 1] + + +def get_unpool_layer( + name: Union[UnpoolingLayer, Tuple[UnpoolingLayer, Dict[str, Any]]], + spatial_dims: int, + in_channels: Optional[int] = None, + out_channels: Optional[int] = None, +) -> nn.Module: + """ + Creates an unpooling layer instance. + + Parameters + ---------- + name : Union[UnpoolingLayer, Tuple[UnpoolingLayer, Dict[str, Any]]] + the unpooling type, potentially with arguments in a dict. + + Returns + ------- + nn.Module + the parametrized unpooling layer. + + Parameters + ---------- + name : Union[UnpoolingLayer, Tuple[UnpoolingLayer, Dict[str, Any]]] + the unpooling type, potentially with arguments in a dict. + spatial_dims : int + number of spatial dimensions of the input. + in_channels : Optional[int] (optional, default=None) + number of input channels if the unpool layer requires this parameter. + out_channels : Optional[int] (optional, default=None) + number of output channels if the unpool layer requires this parameter. + + Returns + ------- + nn.Module + the parametrized unpooling layer. + """ + unpool_name, unpool_args = split_args(name) + unpool_name = UnpoolingLayer(unpool_name) + unpool_type = Unpool[unpool_name, spatial_dims] + kw_args = dict(unpool_args) + if has_option(unpool_type, "in_channels") and "in_channels" not in kw_args: + kw_args["in_channels"] = in_channels + if has_option(unpool_type, "out_channels") and "out_channels" not in kw_args: + kw_args["out_channels"] = out_channels + + return unpool_type(**kw_args) # pylint: disable=not-callable diff --git a/clinicadl/monai_networks/nn/layers/utils/__init__.py b/clinicadl/monai_networks/nn/layers/utils/__init__.py new file mode 100644 index 000000000..5c080fffd --- /dev/null +++ b/clinicadl/monai_networks/nn/layers/utils/__init__.py @@ -0,0 +1,19 @@ +from .enum import ( + ActFunction, + ConvNormLayer, + NormLayer, + PoolingLayer, + UnpoolingLayer, + UnpoolingMode, +) +from .types import ( + ActivationParameters, + ConvNormalizationParameters, + ConvParameters, + NormalizationParameters, + PoolingParameters, + SingleLayerConvParameter, + SingleLayerPoolingParameters, + SingleLayerUnpoolingParameters, + UnpoolingParameters, +) diff --git a/clinicadl/monai_networks/nn/layers/utils/enum.py b/clinicadl/monai_networks/nn/layers/utils/enum.py new file mode 100644 index 000000000..695776551 --- /dev/null +++ b/clinicadl/monai_networks/nn/layers/utils/enum.py @@ -0,0 +1,65 @@ +from clinicadl.utils.enum import CaseInsensitiveEnum + + +class UnpoolingLayer(CaseInsensitiveEnum): + """Supported unpooling layers in ClinicaDL.""" + + CONV_TRANS = "convtranspose" + UPSAMPLE = "upsample" + + +class ActFunction(CaseInsensitiveEnum): + """Supported activation functions in ClinicaDL.""" + + ELU = "elu" + RELU = "relu" + LEAKY_RELU = "leakyrelu" + PRELU = "prelu" + RELU6 = "relu6" + SELU = "selu" + CELU = "celu" + GELU = "gelu" + SIGMOID = "sigmoid" + TANH = "tanh" + SOFTMAX = "softmax" + LOGSOFTMAX = "logsoftmax" + MISH = "mish" + + +class PoolingLayer(CaseInsensitiveEnum): + """Supported pooling layers in ClinicaDL.""" + + MAX = "max" + AVG = "avg" + ADAPT_AVG = "adaptiveavg" + ADAPT_MAX = "adaptivemax" + + +class NormLayer(CaseInsensitiveEnum): + """Supported normalization layers in ClinicaDL.""" + + GROUP = "group" + LAYER = "layer" + SYNCBATCH = "syncbatch" + BATCH = "batch" + INSTANCE = "instance" + + +class ConvNormLayer(CaseInsensitiveEnum): + """Supported normalization layers with convolutions in ClinicaDL.""" + + GROUP = "group" + SYNCBATCH = "syncbatch" + BATCH = "batch" + INSTANCE = "instance" + + +class UnpoolingMode(CaseInsensitiveEnum): + """Supported unpooling mode for AutoEncoders in ClinicaDL.""" + + NEAREST = "nearest" + LINEAR = "linear" + BILINEAR = "bilinear" + BICUBIC = "bicubic" + TRILINEAR = "trilinear" + CONV_TRANS = "convtranspose" diff --git a/clinicadl/monai_networks/nn/layers/utils/types.py b/clinicadl/monai_networks/nn/layers/utils/types.py new file mode 100644 index 000000000..f5ef18847 --- /dev/null +++ b/clinicadl/monai_networks/nn/layers/utils/types.py @@ -0,0 +1,37 @@ +from typing import Any, Dict, List, Tuple, Union + +from .enum import ( + ActFunction, + ConvNormLayer, + NormLayer, + PoolingLayer, + UnpoolingLayer, +) + +SingleLayerConvParameter = Union[int, Tuple[int, ...]] +ConvParameters = Union[SingleLayerConvParameter, List[SingleLayerConvParameter]] + +PoolingType = Union[str, PoolingLayer] +SingleLayerPoolingParameters = Tuple[PoolingType, Dict[str, Any]] +PoolingParameters = Union[ + SingleLayerPoolingParameters, List[SingleLayerPoolingParameters] +] + +UnpoolingType = Union[str, UnpoolingLayer] +SingleLayerUnpoolingParameters = Tuple[UnpoolingType, Dict[str, Any]] +UnpoolingParameters = Union[ + SingleLayerUnpoolingParameters, List[SingleLayerUnpoolingParameters] +] + +NormalizationType = Union[str, NormLayer] +NormalizationParameters = Union[ + NormalizationType, Tuple[NormalizationType, Dict[str, Any]] +] + +ConvNormalizationType = Union[str, ConvNormLayer] +ConvNormalizationParameters = Union[ + ConvNormalizationType, Tuple[ConvNormalizationType, Dict[str, Any]] +] + +ActivationType = Union[str, ActFunction] +ActivationParameters = Union[ActivationType, Tuple[ActivationType, Dict[str, Any]]] diff --git a/clinicadl/monai_networks/nn/layers/vit.py b/clinicadl/monai_networks/nn/layers/vit.py new file mode 100644 index 000000000..e485d6c6b --- /dev/null +++ b/clinicadl/monai_networks/nn/layers/vit.py @@ -0,0 +1,94 @@ +from functools import partial +from typing import Callable, Optional + +import torch +import torch.nn as nn +from torchvision.models.vision_transformer import MLPBlock + + +class EncoderBlock(nn.Module): + """Transformer encoder block.""" + + def __init__( + self, + num_heads: int, + hidden_dim: int, + mlp_dim: int, + dropout: float, + attention_dropout: float, + norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6), + ) -> None: + super().__init__() + self.num_heads = num_heads + + # Attention block + self.norm1 = norm_layer(hidden_dim) + self.self_attention = nn.MultiheadAttention( + hidden_dim, num_heads, dropout=attention_dropout, batch_first=True + ) + self.dropout = nn.Dropout(dropout) + + # MLP block + self.norm2 = norm_layer(hidden_dim) + self.mlp = MLPBlock(hidden_dim, mlp_dim, dropout) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + residual = x + + x = self.norm1(x) + x, _ = self.self_attention(x, x, x, need_weights=False) + x = self.dropout(x) + x += residual + + y = self.norm2(x) + y = self.mlp(y) + return x + y + + +class Encoder(nn.Module): + """Encoder with multiple transformer blocks.""" + + def __init__( + self, + seq_length: int, + num_layers: int, + num_heads: int, + hidden_dim: int, + mlp_dim: int, + dropout: float, + attention_dropout: float, + pos_embedding: Optional[nn.Parameter] = None, + norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6), + ) -> None: + super().__init__() + + if pos_embedding is not None: + self.pos_embedding = pos_embedding + else: + self.pos_embedding = nn.Parameter( + torch.empty(1, seq_length, hidden_dim).normal_(std=0.02) + ) # from BERT + self.dropout = nn.Dropout(dropout) + self.layers = nn.ModuleList( + [ + EncoderBlock( + num_heads, + hidden_dim, + mlp_dim, + dropout, + attention_dropout, + norm_layer, + ) + for _ in range(num_layers) + ] + ) + self.norm = norm_layer(hidden_dim) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = x + self.pos_embedding + + x = self.dropout(x) + for layer in self.layers: + x = layer(x) + + return self.norm(x) diff --git a/clinicadl/monai_networks/nn/mlp.py b/clinicadl/monai_networks/nn/mlp.py new file mode 100644 index 000000000..a27b2ad4e --- /dev/null +++ b/clinicadl/monai_networks/nn/mlp.py @@ -0,0 +1,146 @@ +from collections import OrderedDict +from typing import Optional, Sequence + +import torch.nn as nn +from monai.networks.blocks import ADN +from monai.networks.layers.utils import get_act_layer +from monai.networks.nets import FullyConnectedNet as BaseMLP + +from .layers.utils import ( + ActFunction, + ActivationParameters, + NormalizationParameters, + NormLayer, +) +from .utils import check_adn_ordering, check_norm_layer + + +class MLP(BaseMLP): + """Simple full-connected layer neural network (or Multi-Layer Perceptron) with linear, normalization, activation + and dropout layers. + + Parameters + ---------- + in_channels : int + number of input channels (i.e. number of features). + out_channels : int + number of output channels. + hidden_channels : Sequence[int] + number of output channels for each hidden layer. Thus, this parameter also controls the number of hidden layers. + act : Optional[ActivationParameters] (optional, default=ActFunction.PRELU) + the activation function used after a linear layer, and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + output_act : Optional[ActivationParameters] (optional, default=None) + a potential activation layer applied to the output of the network. Should be pass in the same way as `act`. + If None, no last activation will be applied. + norm : Optional[NormalizationParameters] (optional, default=NormLayer.BATCH) + the normalization type used after a linear layer, and optionally the arguments of the normalization + layer. Should be passed as `norm_type` or `(norm_type, parameters)`. If None, no normalization will be + performed.\n + `norm_type` can be any value in {`batch`, `group`, `instance`, `layer`, `syncbatch`}. Please refer to PyTorch's + [normalization layers](https://pytorch.org/docs/stable/nn.html#normalization-layers) to know the mandatory and + optional arguments for each of them.\n + Please note that arguments `num_channels`, `num_features` and `normalized_shape` of the normalization layer + should not be passed, as they are automatically inferred from the output of the previous layer in the network. + dropout : Optional[float] (optional, default=None) + dropout ratio. If None, no dropout. + bias : bool (optional, default=True) + whether to have a bias term in linear layers. + adn_ordering : str (optional, default="NDA") + order of operations `Activation`, `Dropout` and `Normalization` after a linear layer (except the last + one). + For example if "ND" is passed, `Normalization` and then `Dropout` will be performed (without `Activation`).\n + Note: ADN will not be applied after the last linear layer. + + Examples + -------- + >>> MLP(in_channels=12, out_channels=2, hidden_channels=[8, 4], dropout=0.1, act=("elu", {"alpha": 0.5}), + norm=("group", {"num_groups": 2}), bias=True, adn_ordering="ADN", output_act="softmax") + MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (hidden0): Sequential( + (linear): Linear(in_features=12, out_features=8, bias=True) + (adn): ADN( + (A): ELU(alpha=0.5) + (D): Dropout(p=0.1, inplace=False) + (N): GroupNorm(2, 8, eps=1e-05, affine=True) + ) + ) + (hidden1): Sequential( + (linear): Linear(in_features=8, out_features=4, bias=True) + (adn): ADN( + (A): ELU(alpha=0.5) + (D): Dropout(p=0.1, inplace=False) + (N): GroupNorm(2, 4, eps=1e-05, affine=True) + ) + ) + (output): Sequential( + (linear): Linear(in_features=4, out_features=2, bias=True) + (output_act): Softmax(dim=None) + ) + ) + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + hidden_channels: Sequence[int], + act: Optional[ActivationParameters] = ActFunction.PRELU, + output_act: Optional[ActivationParameters] = None, + norm: Optional[NormalizationParameters] = NormLayer.BATCH, + dropout: Optional[float] = None, + bias: bool = True, + adn_ordering: str = "NDA", + ) -> None: + self.norm = check_norm_layer(norm) + super().__init__( + in_channels, + out_channels, + hidden_channels, + dropout, + act, + bias, + check_adn_ordering(adn_ordering), + ) + self.output = nn.Sequential(OrderedDict([("linear", self.output)])) + self.output.output_act = get_act_layer(output_act) if output_act else None + # renaming + self._modules = OrderedDict( + [ + (key.replace("hidden_", "hidden"), sub_m) + for key, sub_m in self._modules.items() + ] + ) + + def _get_layer(self, in_channels: int, out_channels: int, bias: bool) -> nn.Module: + """ + Gets the parametrized Linear layer + ADN block. + """ + if self.norm == NormLayer.LAYER: + norm = ("layer", {"normalized_shape": out_channels}) + else: + norm = self.norm + seq = nn.Sequential( + OrderedDict( + [ + ("linear", nn.Linear(in_channels, out_channels, bias)), + ( + "adn", + ADN( + ordering=self.adn_ordering, + act=self.act, + norm=norm, + dropout=self.dropout, + dropout_dim=1, + in_channels=out_channels, + ), + ), + ] + ) + ) + return seq diff --git a/clinicadl/monai_networks/nn/resnet.py b/clinicadl/monai_networks/nn/resnet.py new file mode 100644 index 000000000..1ba90b30c --- /dev/null +++ b/clinicadl/monai_networks/nn/resnet.py @@ -0,0 +1,566 @@ +import re +from collections import OrderedDict +from copy import deepcopy +from enum import Enum +from typing import Any, Callable, Mapping, Optional, Sequence, Type, Union + +import torch +import torch.nn as nn +from monai.networks.layers.factories import Conv, Norm, Pool +from monai.networks.layers.utils import get_act_layer +from monai.utils import ensure_tuple_rep +from torch.hub import load_state_dict_from_url +from torchvision.models.resnet import ( + ResNet18_Weights, + ResNet34_Weights, + ResNet50_Weights, + ResNet101_Weights, + ResNet152_Weights, +) + +from .layers.resnet import ResNetBlock, ResNetBottleneck +from .layers.senet import SEResNetBlock, SEResNetBottleneck +from .layers.utils import ActivationParameters + + +class ResNetBlockType(str, Enum): + """Supported ResNet blocks.""" + + BASIC = "basic" + BOTTLENECK = "bottleneck" + + +class GeneralResNet(nn.Module): + """Common base class for ResNet and SEResNet.""" + + def __init__( + self, + spatial_dims: int, + in_channels: int, + num_outputs: Optional[int], + block_type: Union[str, ResNetBlockType], + n_res_blocks: Sequence[int], + n_features: Sequence[int], + init_conv_size: Union[Sequence[int], int], + init_conv_stride: Union[Sequence[int], int], + bottleneck_reduction: int, + se_reduction: Optional[int], + act: ActivationParameters, + output_act: ActivationParameters, + ) -> None: + super().__init__() + + self.spatial_dims = spatial_dims + self.in_channels = in_channels + self.num_outputs = num_outputs + self.block_type = block_type + self._check_args_consistency(n_res_blocks, n_features) + self.n_res_blocks = n_res_blocks + self.n_features = n_features + self.bottleneck_reduction = bottleneck_reduction + self.se_reduction = se_reduction + self.act = act + self.squeeze_excitation = True if se_reduction else False + + self.init_conv_size = ensure_tuple_rep(init_conv_size, spatial_dims) + self.init_conv_stride = ensure_tuple_rep(init_conv_stride, spatial_dims) + + block, in_planes = self._get_block(block_type) + + conv_type, norm_type, pool_type, avgp_type = self._get_layers() + + block_avgpool = [0, 1, (1, 1), (1, 1, 1)] + + self.in_planes = in_planes[0] + self.n_layers = len(in_planes) + self.bias_downsample = False + + self.conv0 = conv_type( # pylint: disable=not-callable + in_channels, + self.in_planes, + kernel_size=self.init_conv_size, + stride=self.init_conv_stride, + padding=tuple(k // 2 for k in self.init_conv_size), + bias=False, + ) + self.norm0 = norm_type(self.in_planes) # pylint: disable=not-callable + self.act0 = get_act_layer(name=act) + self.pool0 = pool_type(kernel_size=3, stride=2, padding=1) # pylint: disable=not-callable + self.layer1 = self._make_resnet_layer( + block, in_planes[0], n_res_blocks[0], spatial_dims, act + ) + for i, (n_blocks, n_feats) in enumerate( + zip(n_res_blocks[1:], in_planes[1:]), start=2 + ): + self.add_module( + f"layer{i}", + self._make_resnet_layer( + block, + planes=n_feats, + blocks=n_blocks, + spatial_dims=spatial_dims, + stride=2, + act=act, + ), + ) + self.fc = ( + nn.Sequential( + OrderedDict( + [ + ("pool", avgp_type(block_avgpool[spatial_dims])), # pylint: disable=not-callable + ("flatten", nn.Flatten(1)), + ("out", nn.Linear(n_features[-1], num_outputs)), + ] + ) + ) + if num_outputs + else None + ) + if self.fc: + self.fc.output_act = get_act_layer(output_act) if output_act else None + + self._init_module(conv_type, norm_type) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.conv0(x) + x = self.norm0(x) + x = self.act0(x) + x = self.pool0(x) + + for i in range(1, self.n_layers + 1): + x = self.get_submodule(f"layer{i}")(x) + + if self.fc is not None: + x = self.fc(x) + + return x + + def _get_block(self, block_type: Union[str, ResNetBlockType]) -> nn.Module: + """ + Gets the residual block, depending on the block choice made by the user and depending + on whether squeeze-excitation mode or not. + """ + block_type = ResNetBlockType(block_type) + if block_type == ResNetBlockType.BASIC: + in_planes = self.n_features + if self.squeeze_excitation: + block = SEResNetBlock + block.reduction = self.se_reduction + else: + block = ResNetBlock + elif block_type == ResNetBlockType.BOTTLENECK: + in_planes = self._bottleneck_reduce( + self.n_features, self.bottleneck_reduction + ) + if self.squeeze_excitation: + block = SEResNetBottleneck + block.reduction = self.se_reduction + else: + block = ResNetBottleneck + block.expansion = self.bottleneck_reduction + + return block, in_planes + + def _get_layers(self): + """ + Gets convolution, normalization, pooling and adaptative average pooling layers. + """ + conv_type: Type[Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]] = Conv[ + Conv.CONV, self.spatial_dims + ] + norm_type: Type[Union[nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[ + Norm.BATCH, self.spatial_dims + ] + pool_type: Type[Union[nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d]] = Pool[ + Pool.MAX, self.spatial_dims + ] + avgp_type: Type[ + Union[nn.AdaptiveAvgPool1d, nn.AdaptiveAvgPool2d, nn.AdaptiveAvgPool3d] + ] = Pool[Pool.ADAPTIVEAVG, self.spatial_dims] + + return conv_type, norm_type, pool_type, avgp_type + + def _make_resnet_layer( + self, + block: Type[Union[ResNetBlock, ResNetBottleneck]], + planes: int, + blocks: int, + spatial_dims: int, + act: ActivationParameters, + stride: int = 1, + ) -> nn.Sequential: + """ + Builds a ResNet layer. + """ + conv_type: Callable = Conv[Conv.CONV, spatial_dims] + norm_type: Callable = Norm[Norm.BATCH, spatial_dims] + + downsample = None + if stride != 1 or self.in_planes != planes * block.expansion: + downsample = nn.Sequential( + conv_type( # pylint: disable=not-callable + self.in_planes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=self.bias_downsample, + ), + norm_type(planes * block.expansion), # pylint: disable=not-callable + ) + + layers = [ + block( + in_planes=self.in_planes, + planes=planes, + spatial_dims=spatial_dims, + stride=stride, + downsample=downsample, + act=act, + ) + ] + + self.in_planes = planes * block.expansion + for _ in range(1, blocks): + layers.append( + block(self.in_planes, planes, spatial_dims=spatial_dims, act=act) + ) + + return nn.Sequential(*layers) + + def _init_module( + self, conv_type: Type[nn.Module], norm_type: Type[nn.Module] + ) -> None: + """ + Initializes the parameters. + """ + for m in self.modules(): + if isinstance(m, conv_type): + nn.init.kaiming_normal_( + torch.as_tensor(m.weight), mode="fan_out", nonlinearity="relu" + ) + elif isinstance(m, norm_type): + nn.init.constant_(torch.as_tensor(m.weight), 1) + nn.init.constant_(torch.as_tensor(m.bias), 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(torch.as_tensor(m.bias), 0) + + @classmethod + def _bottleneck_reduce( + cls, n_features: Sequence[int], bottleneck_reduction: int + ) -> Sequence[int]: + """ + Finds number of feature maps for the bottleneck layers. + """ + reduced_features = [] + for n in n_features: + if n % bottleneck_reduction != 0: + raise ValueError( + "All elements of n_features must be divisible by bottleneck_reduction. " + f"Got {n} in n_features and bottleneck_reduction={bottleneck_reduction}" + ) + reduced_features.append(n // bottleneck_reduction) + + return reduced_features + + @classmethod + def _check_args_consistency( + cls, n_res_blocks: Sequence[int], n_features: Sequence[int] + ) -> None: + """ + Checks consistency between `n_res_blocks` and `n_features`. + """ + if not isinstance(n_res_blocks, Sequence): + raise ValueError(f"n_res_blocks must be a sequence, got {n_res_blocks}") + if not isinstance(n_features, Sequence): + raise ValueError(f"n_features must be a sequence, got {n_features}") + if len(n_features) != len(n_res_blocks): + raise ValueError( + f"n_features and n_res_blocks must have the same length, got n_features={n_features} " + f"and n_res_blocks={n_res_blocks}" + ) + + +class ResNet(GeneralResNet): + """ + ResNet based on the [Deep Residual Learning for Image Recognition](https://arxiv.org/pdf/1512.03385) paper. + Adapted from [MONAI's implementation](https://docs.monai.io/en/stable/networks.html#resnet). + + The user can customize the number of residual blocks, the number of downsampling blocks, the number of channels + in each block, as well as other parameters like the type of residual block used. + + ResNet is a fully convolutional network that can work with input of any size, provided that is it large + enough not to be reduced to a 1-pixel image (before the adaptative average pooling). + + Parameters + ---------- + spatial_dims : int + number of spatial dimensions of the input image. + in_channels : int + number of channels in the input image. + num_outputs : Optional[int] + number of output variables after the last linear layer.\n + If None, the features before the last fully connected layer (including average pooling) will be returned. + block_type : Union[str, ResNetBlockType] (optional, default=ResNetBlockType.BASIC) + type of residual block. Either `basic` or `bottleneck`. Default to `basic`, as in `ResNet-18`. + n_res_blocks : Sequence[int] (optional, default=(2, 2, 2, 2)) + number of residual block in each ResNet layer. A ResNet layer refers here to the set of residual blocks + between two downsamplings. The length of `n_res_blocks` thus determines the number of ResNet layers. + Default to `(2, 2, 2, 2)`, as in `ResNet-18`. + n_features : Sequence[int] (optional, default=(64, 128, 256, 512)) + number of output feature maps for each ResNet layer. The length of `n_features` must be equal to the length + of `n_res_blocks`. Default to `(64, 128, 256, 512)`, as in `ResNet-18`. + init_conv_size : Union[Sequence[int], int] (optional, default=7) + kernel_size for the first convolution. + If tuple, it will be understood as the values for each dimension. + Default to 7, as in the original paper. + init_conv_stride : Union[Sequence[int], int] (optional, default=2) + stride for the first convolution. + If tuple, it will be understood as the values for each dimension. + Default to 2, as in the original paper. + bottleneck_reduction : int (optional, default=4) + if `block_type='bottleneck'`, `bottleneck_reduction` determines the reduction factor for the number + of feature maps in bottleneck layers (1x1 convolutions). Default to 4, as in the original paper. + act : ActivationParameters (optional, default=("relu", {"inplace": True})) + the activation function used in the convolutional part, and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them.\n + Default is "relu", as in the original paper. + output_act : Optional[ActivationParameters] (optional, default=None) + if `num_outputs` is not None, a potential activation layer applied to the outputs of the network. + Should be pass in the same way as `act`. + If None, no last activation will be applied. + + Examples + -------- + >>> ResNet( + spatial_dims=2, + in_channels=1, + num_outputs=2, + block_type="bottleneck", + bottleneck_reduction=4, + n_features=(8, 16), + n_res_blocks=(2, 2), + output_act="softmax", + init_conv_size=5, + ) + ResNet( + (conv0): Conv2d(1, 2, kernel_size=(5, 5), stride=(2, 2), padding=(2, 2), bias=False) + (norm0): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act0): ReLU(inplace=True) + (pool0): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) + (layer1): Sequential( + (0): ResNetBottleneck( + (conv1): Conv2d(2, 2, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv2): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (norm2): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act2): ReLU(inplace=True) + (conv3): Conv2d(2, 8, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm3): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (downsample): Sequential( + (0): Conv2d(2, 8, kernel_size=(1, 1), stride=(1, 1), bias=False) + (1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + (act3): ReLU(inplace=True) + ) + (1): ResNetBottleneck( + (conv1): Conv2d(8, 2, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv2): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (norm2): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act2): ReLU(inplace=True) + (conv3): Conv2d(2, 8, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm3): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act3): ReLU(inplace=True) + ) + ) + (layer2): Sequential( + (0): ResNetBottleneck( + (conv1): Conv2d(8, 4, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm1): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv2): Conv2d(4, 4, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) + (norm2): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act2): ReLU(inplace=True) + (conv3): Conv2d(4, 16, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm3): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (downsample): Sequential( + (0): Conv2d(8, 16, kernel_size=(1, 1), stride=(2, 2), bias=False) + (1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + ) + (act3): ReLU(inplace=True) + ) + (1): ResNetBottleneck( + (conv1): Conv2d(16, 4, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm1): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv2): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (norm2): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act2): ReLU(inplace=True) + (conv3): Conv2d(4, 16, kernel_size=(1, 1), stride=(1, 1), bias=False) + (norm3): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act3): ReLU(inplace=True) + ) + ) + (fc): Sequential( + (pool): AdaptiveAvgPool2d(output_size=(1, 1)) + (flatten): Flatten(start_dim=1, end_dim=-1) + (out): Linear(in_features=16, out_features=2, bias=True) + (output_act): Softmax(dim=None) + ) + ) + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + num_outputs: Optional[int], + block_type: Union[str, ResNetBlockType] = ResNetBlockType.BASIC, + n_res_blocks: Sequence[int] = (2, 2, 2, 2), + n_features: Sequence[int] = (64, 128, 256, 512), + init_conv_size: Union[Sequence[int], int] = 7, + init_conv_stride: Union[Sequence[int], int] = 2, + bottleneck_reduction: int = 4, + act: ActivationParameters = ("relu", {"inplace": True}), + output_act: Optional[ActivationParameters] = None, + ) -> None: + super().__init__( + spatial_dims=spatial_dims, + in_channels=in_channels, + num_outputs=num_outputs, + block_type=block_type, + n_res_blocks=n_res_blocks, + n_features=n_features, + init_conv_size=init_conv_size, + init_conv_stride=init_conv_stride, + bottleneck_reduction=bottleneck_reduction, + se_reduction=None, + act=act, + output_act=output_act, + ) + + +class SOTAResNet(str, Enum): + """Supported ResNet networks.""" + + RESNET_18 = "ResNet-18" + RESNET_34 = "ResNet-34" + RESNET_50 = "ResNet-50" + RESNET_101 = "ResNet-101" + RESNET_152 = "ResNet-152" + + +def get_resnet( + name: Union[str, SOTAResNet], + num_outputs: Optional[int], + output_act: ActivationParameters = None, + pretrained: bool = False, +) -> ResNet: + """ + To get a ResNet implemented in the [Deep Residual Learning for Image Recognition](https://arxiv.org/pdf/1512.03385) + paper. + + Only the last fully connected layer will be changed to match `num_outputs`. + + The user can also use the pretrained models from `torchvision`. Note that the last fully connected layer will not + used pretrained weights, as it is task specific. + + .. warning:: `ResNet-18`, `ResNet-34`, `ResNet-50`, `ResNet-101` and `ResNet-152` only works with 2D images with 3 + channels. + + Parameters + ---------- + model : Union[str, SOTAResNet] + The name of the ResNet. Available networks are `ResNet-18`, `ResNet-34`, `ResNet-50`, `ResNet-101` and `ResNet-152`. + num_outputs : Optional[int] + number of output variables after the last linear layer.\n + If None, the features before the last fully connected layer will be returned. + output_act : ActivationParameters (optional, default=None) + if `num_outputs` is not None, a potential activation layer applied to the outputs of the network, + and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + pretrained : bool (optional, default=False) + whether to use pretrained weights. The pretrained weights used are the default ones from [torchvision](https:// + pytorch.org/vision/main/models/resnet.html). + + Returns + ------- + ResNet + The network, with potentially pretrained weights. + """ + name = SOTAResNet(name) + if name == SOTAResNet.RESNET_18: + block_type = ResNetBlockType.BASIC + n_res_blocks = (2, 2, 2, 2) + n_features = (64, 128, 256, 512) + model_url = ResNet18_Weights.DEFAULT.url + elif name == SOTAResNet.RESNET_34: + block_type = ResNetBlockType.BASIC + n_res_blocks = (3, 4, 6, 3) + n_features = (64, 128, 256, 512) + model_url = ResNet34_Weights.DEFAULT.url + elif name == SOTAResNet.RESNET_50: + block_type = ResNetBlockType.BOTTLENECK + n_res_blocks = (3, 4, 6, 3) + n_features = (256, 512, 1024, 2048) + model_url = ResNet50_Weights.DEFAULT.url + elif name == SOTAResNet.RESNET_101: + block_type = ResNetBlockType.BOTTLENECK + n_res_blocks = (3, 4, 23, 3) + n_features = (256, 512, 1024, 2048) + model_url = ResNet101_Weights.DEFAULT.url + elif name == SOTAResNet.RESNET_152: + block_type = ResNetBlockType.BOTTLENECK + n_res_blocks = (3, 8, 36, 3) + n_features = (256, 512, 1024, 2048) + model_url = ResNet152_Weights.DEFAULT.url + + # pylint: disable=possibly-used-before-assignment + resnet = ResNet( + spatial_dims=2, + in_channels=3, + num_outputs=num_outputs, + n_res_blocks=n_res_blocks, + block_type=block_type, + n_features=n_features, + output_act=output_act, + ) + if pretrained: + fc_layers = deepcopy(resnet.fc) + resnet.fc = None + pretrained_dict = load_state_dict_from_url(model_url, progress=True) + resnet.load_state_dict(_state_dict_adapter(pretrained_dict)) + resnet.fc = fc_layers + + return resnet + + +def _state_dict_adapter(state_dict: Mapping[str, Any]) -> Mapping[str, Any]: + """ + A mapping between torchvision's layer names and ours. + """ + state_dict = {k: v for k, v in state_dict.items() if "fc" not in k} + + mappings = [ + (r"(?>> SEResNet( + spatial_dims=2, + in_channels=1, + num_outputs=2, + block_type="basic", + se_reduction=2, + n_features=(8,), + n_res_blocks=(2,), + output_act="softmax", + init_conv_size=5, + ) + SEResNet( + (conv0): Conv2d(1, 8, kernel_size=(5, 5), stride=(2, 2), padding=(2, 2), bias=False) + (norm0): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act0): ReLU(inplace=True) + (pool0): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) + (layer1): Sequential( + (0): SEResNetBlock( + (conv1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (norm1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv2): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (norm2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (se_layer): ChannelSELayer( + (avg_pool): AdaptiveAvgPool2d(output_size=1) + (fc): Sequential( + (0): Linear(in_features=8, out_features=4, bias=True) + (1): ReLU(inplace=True) + (2): Linear(in_features=4, out_features=8, bias=True) + (3): Sigmoid() + ) + ) + (act2): ReLU(inplace=True) + ) + (1): SEResNetBlock( + (conv1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (norm1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (act1): ReLU(inplace=True) + (conv2): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) + (norm2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (se_layer): ChannelSELayer( + (avg_pool): AdaptiveAvgPool2d(output_size=1) + (fc): Sequential( + (0): Linear(in_features=8, out_features=4, bias=True) + (1): ReLU(inplace=True) + (2): Linear(in_features=4, out_features=8, bias=True) + (3): Sigmoid() + ) + ) + (act2): ReLU(inplace=True) + ) + ) + (fc): Sequential( + (pool): AdaptiveAvgPool2d(output_size=(1, 1)) + (flatten): Flatten(start_dim=1, end_dim=-1) + (out): Linear(in_features=8, out_features=2, bias=True) + (output_act): Softmax(dim=None) + ) + ) + """ + + def __init__( + self, + spatial_dims: int, + in_channels: int, + num_outputs: Optional[int], + se_reduction: int = 16, + **kwargs: Any, + ) -> None: + # get defaults from resnet + _, default_resnet_args = get_args_and_defaults(ResNet.__init__) + for arg, value in default_resnet_args.items(): + if arg not in kwargs: + kwargs[arg] = value + + self._check_se_channels(kwargs["n_features"], se_reduction) + + super().__init__( + spatial_dims=spatial_dims, + in_channels=in_channels, + num_outputs=num_outputs, + se_reduction=se_reduction, + **kwargs, + ) + + @classmethod + def _check_se_channels(cls, n_features: Sequence[int], se_reduction: int) -> None: + """ + Checks that the output of residual blocks always have a number of channels greater + than squeeze-excitation bottleneck reduction factor. + """ + if not isinstance(n_features, Sequence): + raise ValueError(f"n_features must be a sequence. Got {n_features}") + for n in n_features: + if n < se_reduction: + raise ValueError( + f"elements of n_features must be greater or equal to se_reduction. Got {n} in n_features " + f"and se_reduction={se_reduction}" + ) + + +class SOTAResNet(str, Enum): + """Supported SEResNet networks.""" + + SE_RESNET_50 = "SEResNet-50" + SE_RESNET_101 = "SEResNet-101" + SE_RESNET_152 = "SEResNet-152" + + +def get_seresnet( + name: Union[str, SOTAResNet], + num_outputs: Optional[int], + output_act: ActivationParameters = None, + pretrained: bool = False, +) -> SEResNet: + """ + To get a Squeeze-and-Excitation ResNet implemented in the [Squeeze-and-Excitation Networks](https://arxiv.org/pdf/ + 1709.01507) paper. + + Only the last fully connected layer will be changed to match `num_outputs`. + + .. warning:: `SEResNet-50`, `SEResNet-101` and `SEResNet-152` only works with 2D images with 3 channels. + + Note: pretrained weights are not yet available for these networks. + + Parameters + ---------- + model : Union[str, SOTAResNet] + the name of the SEResNet. Available networks are `SEResNet-50`, `SEResNet-101` and `SEResNet-152`. + num_outputs : Optional[int] + number of output variables after the last linear layer.\n + If None, the features before the last fully connected layer will be returned. + output_act : ActivationParameters (optional, default=None) + if `num_outputs` is not None, a potential activation layer applied to the outputs of the network, + and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + pretrained : bool (optional, default=False) + pretrained networks are not yet available for SE-ResNets. Leave this argument to False. + + Returns + ------- + SEResNet + the network. + """ + if pretrained is not False: + raise ValueError( + "Pretrained networks are not yet available for SE-ResNets. Please leave " + "'pretrained' to False." + ) + + name = SOTAResNet(name) + if name == SOTAResNet.SE_RESNET_50: + block_type = ResNetBlockType.BOTTLENECK + n_res_blocks = (3, 4, 6, 3) + n_features = (256, 512, 1024, 2048) + elif name == SOTAResNet.SE_RESNET_101: + block_type = ResNetBlockType.BOTTLENECK + n_res_blocks = (3, 4, 23, 3) + n_features = (256, 512, 1024, 2048) + elif name == SOTAResNet.SE_RESNET_152: + block_type = ResNetBlockType.BOTTLENECK + n_res_blocks = (3, 8, 36, 3) + n_features = (256, 512, 1024, 2048) + + # pylint: disable=possibly-used-before-assignment + resnet = SEResNet( + spatial_dims=2, + in_channels=3, + num_outputs=num_outputs, + n_res_blocks=n_res_blocks, + block_type=block_type, + n_features=n_features, + output_act=output_act, + ) + + return resnet diff --git a/clinicadl/monai_networks/nn/unet.py b/clinicadl/monai_networks/nn/unet.py new file mode 100644 index 000000000..dd1e59141 --- /dev/null +++ b/clinicadl/monai_networks/nn/unet.py @@ -0,0 +1,250 @@ +from abc import ABC, abstractmethod +from typing import Optional, Sequence + +import torch +import torch.nn as nn +from monai.networks.blocks.convolutions import Convolution +from monai.networks.layers.utils import get_act_layer + +from .layers.unet import ConvBlock, DownBlock, UpSample +from .layers.utils import ActFunction, ActivationParameters + + +class BaseUNet(nn.Module, ABC): + """Base class for UNet and AttentionUNet.""" + + def __init__( + self, + spatial_dims: int, + in_channels: int, + out_channels: int, + channels: Sequence[int] = (64, 128, 256, 512, 1024), + act: ActivationParameters = ActFunction.RELU, + output_act: Optional[ActivationParameters] = None, + dropout: Optional[float] = None, + ): + super().__init__() + if not isinstance(channels, Sequence) or len(channels) < 2: + raise ValueError( + f"channels should be a sequence, whose length is no less than 2. Got {channels}" + ) + self.spatial_dims = spatial_dims + self.in_channels = in_channels + self.out_channels = out_channels + self.channels = channels + self.act = act + self.dropout = dropout + + self.doubleconv = ConvBlock( + spatial_dims=spatial_dims, + in_channels=in_channels, + out_channels=channels[0], + act=act, + dropout=dropout, + ) + self._build_encoder() + self._build_decoder() + self.reduce_channels = Convolution( + spatial_dims=spatial_dims, + in_channels=channels[0], + out_channels=out_channels, + kernel_size=1, + strides=1, + padding=0, + conv_only=True, + ) + self.output_act = get_act_layer(output_act) if output_act else None + + @abstractmethod + def forward(self, x: torch.Tensor) -> torch.Tensor: + pass + + def _build_encoder(self) -> None: + for i in range(1, len(self.channels)): + self.add_module( + f"down{i}", + DownBlock( + spatial_dims=self.spatial_dims, + in_channels=self.channels[i - 1], + out_channels=self.channels[i], + act=self.act, + dropout=self.dropout, + ), + ) + + @abstractmethod + def _build_decoder(self) -> None: + pass + + +class UNet(BaseUNet): + """ + UNet based on [U-Net: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/pdf/1505.04597). + + The user can customize the number of encoding blocks, the number of channels in each block, as well as other parameters + like the activation function. + + .. warning:: UNet works only with images whose dimensions are high enough powers of 2. More precisely, if n is the number + of max pooling operation in your UNet (which is equal to `len(channels)-1`), the image must have :math:`2^{k}` + pixels in each dimension, with :math:`k \\geq n` (e.g. shape (:math:`2^{n}`, :math:`2^{n+3}`) for a 2D image). + + Note: the implementation proposed here is not exactly the one described in the original paper. Padding is added to + convolutions so that the feature maps keep a constant size (except when they are passed to `max pool` or `up-sample` + layers), batch normalization is used, and `up-conv` layers are here made with an [Upsample](https://pytorch.org/docs/ + stable/generated/torch.nn.Upsample.html) layer followed by a 3x3 convolution. + + Parameters + ---------- + spatial_dims : int + number of spatial dimensions of the input image. + in_channels : int + number of channels in the input image. + out_channels : int + number of output channels. + channels : Sequence[int] (optional, default=(64, 128, 256, 512, 1024)) + sequence of integers stating the number of channels in each UNet block. Thus, this parameter also controls + the number of UNet blocks. The length `channels` should be nos less than 2.\n + Default to `(64, 128, 256, 512, 1024)`, as in the original paper. + act : ActivationParameters (optional, default=ActFunction.RELU) + the activation function used in the convolutional part, and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them.\n + Default is "relu", as in the original paper. + output_act : Optional[ActivationParameters] (optional, default=None) + a potential activation layer applied to the output of the network. Should be pass in the same way as `act`. + If None, no last activation will be applied. + dropout : Optional[float] (optional, default=None) + dropout ratio. If None, no dropout. + + Examples + -------- + >>> UNet( + spatial_dims=2, + in_channels=1, + out_channels=2, + channels=(4, 8), + act="elu", + output_act=("softmax", {"dim": 1}), + dropout=0.1, + ) + UNet( + (doubleconv): ConvBlock( + (0): Convolution( + (conv): Conv2d(1, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (1): Convolution( + (conv): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + ) + (down1): DownBlock( + (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) + (doubleconv): ConvBlock( + (0): Convolution( + (conv): Conv2d(4, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (1): Convolution( + (conv): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + ) + ) + (upsample1): UpSample( + (0): Upsample(scale_factor=2.0, mode='nearest') + (1): Convolution( + (conv): Conv2d(8, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + ) + (doubleconv1): ConvBlock( + (0): Convolution( + (conv): Conv2d(8, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + (1): Convolution( + (conv): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) + (adn): ADN( + (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (D): Dropout(p=0.1, inplace=False) + (A): ELU(alpha=1.0) + ) + ) + ) + (reduce_channels): Convolution( + (conv): Conv2d(4, 2, kernel_size=(1, 1), stride=(1, 1)) + ) + (output_act): Softmax(dim=1) + ) + """ + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x_history = [self.doubleconv(x)] + + for i in range(1, len(self.channels)): + x = self.get_submodule(f"down{i}")(x_history[-1]) + x_history.append(x) + + x_history.pop() # the output of bottelneck is not used as a residual + for i in range(len(self.channels) - 1, 0, -1): + up = self.get_submodule(f"upsample{i}")(x) + merged = torch.cat((x_history.pop(), up), dim=1) + x = self.get_submodule(f"doubleconv{i}")(merged) + + out = self.reduce_channels(x) + + if self.output_act is not None: + out = self.output_act(out) + + return out + + def _build_decoder(self): + for i in range(len(self.channels) - 1, 0, -1): + self.add_module( + f"upsample{i}", + UpSample( + spatial_dims=self.spatial_dims, + in_channels=self.channels[i], + out_channels=self.channels[i - 1], + act=self.act, + dropout=self.dropout, + ), + ) + self.add_module( + f"doubleconv{i}", + ConvBlock( + spatial_dims=self.spatial_dims, + in_channels=self.channels[i - 1] * 2, + out_channels=self.channels[i - 1], + act=self.act, + dropout=self.dropout, + ), + ) diff --git a/clinicadl/monai_networks/nn/utils/__init__.py b/clinicadl/monai_networks/nn/utils/__init__.py new file mode 100644 index 000000000..ce603f205 --- /dev/null +++ b/clinicadl/monai_networks/nn/utils/__init__.py @@ -0,0 +1,14 @@ +from .checks import ( + check_adn_ordering, + check_conv_args, + check_mlp_args, + check_norm_layer, + check_pool_indices, + ensure_list_of_tuples, +) +from .shapes import ( + calculate_conv_out_shape, + calculate_convtranspose_out_shape, + calculate_pool_out_shape, + calculate_unpool_out_shape, +) diff --git a/clinicadl/monai_networks/nn/utils/checks.py b/clinicadl/monai_networks/nn/utils/checks.py new file mode 100644 index 000000000..1917a2894 --- /dev/null +++ b/clinicadl/monai_networks/nn/utils/checks.py @@ -0,0 +1,167 @@ +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union + +from ..layers.utils import ( + ConvParameters, + NormalizationParameters, + NormLayer, + PoolingLayer, +) + +__all__ = [ + "ensure_list_of_tuples", + "check_norm_layer", + "check_conv_args", + "check_mlp_args", + "check_pool_indices", +] + + +def ensure_list_of_tuples( + parameter: ConvParameters, dim: int, n_layers: int, name: str +) -> List[Tuple[int, ...]]: + """ + Checks spatial parameters (e.g. kernel_size) and returns a list of tuples. + Each element of the list corresponds to the parameters of one layer, and + each element of the tuple corresponds to the parameters for one dimension. + """ + parameter = _check_conv_parameter(parameter, dim, n_layers, name) + if isinstance(parameter, tuple): + return [parameter] * n_layers + else: + return parameter + + +def check_norm_layer( + norm: Optional[NormalizationParameters], +) -> Optional[NormalizationParameters]: + """ + Checks that the argument for normalization layers has the right format (i.e. + `norm_type` or (`norm_type`, `norm_layer_parameters`)) and checks potential + mandatory arguments in `norm_layer_parameters`. + """ + if norm is None: + return norm + + if not isinstance(norm, str) and not isinstance(norm, PoolingLayer): + if ( + not isinstance(norm, tuple) + or len(norm) != 2 + or not isinstance(norm[1], dict) + ): + raise ValueError( + "norm must be either the name of the normalization layer or a double with first the name and then the " + f"arguments of the layer in a dict. Got {norm}" + ) + norm_mode = NormLayer(norm[0]) + args = norm[1] + else: + norm_mode = NormLayer(norm) + args = {} + if norm_mode == NormLayer.GROUP and "num_groups" not in args: + raise ValueError( + f"num_groups is a mandatory argument for GroupNorm and must be passed in `norm`. Got `norm`={norm}" + ) + + return norm + + +def check_adn_ordering(adn: str) -> str: + """ + Checks ADN sequence. + """ + if not isinstance(adn, str): + raise ValueError(f"adn_ordering must be a string. Got {adn}") + + for letter in adn: + if letter not in { + "A", + "D", + "N", + }: + raise ValueError( + f"adn_ordering must be composed by 'A', 'D' or/and 'N'. Got {letter}" + ) + if len(adn) != len(set(adn)): + raise ValueError(f"adn_ordering cannot contain duplicated letter. Got {adn}") + + return adn + + +def check_conv_args(conv_args: Dict[str, Any]) -> None: + """ + Checks that `conv_args` is a dict with at least the mandatory argument `channels`. + """ + if not isinstance(conv_args, dict): + raise ValueError( + f"conv_args must be a dict with the arguments for the convolutional part. Got: {conv_args}" + ) + if "channels" not in conv_args: + raise ValueError( + "channels is a mandatory argument for the convolutional part and must therefore be " + f"passed in conv_args. Got conv_args={conv_args}" + ) + + +def check_mlp_args(mlp_args: Optional[Dict[str, Any]]) -> None: + """ + Checks that `mlp_args` is a dict with at least the mandatory argument `hidden_channels`. + """ + if mlp_args is not None: + if not isinstance(mlp_args, dict): + raise ValueError( + f"mlp_args must be a dict with the arguments for the MLP part. Got: {mlp_args}" + ) + if "hidden_channels" not in mlp_args: + raise ValueError( + "hidden_channels is a mandatory argument for the MLP part and must therefore be " + f"passed in mlp_args. Got mlp_args={mlp_args}" + ) + + +def check_pool_indices( + pooling_indices: Optional[Sequence[int]], n_layers: int +) -> Sequence[int]: + """ + Checks that the (un)pooling indices are consistent with the number of layers. + """ + if pooling_indices is not None: + for idx in pooling_indices: + if idx > n_layers - 1: + raise ValueError( + f"indices in (un)pooling_indices must be smaller than len(channels)-1, got (un)pooling_indices={pooling_indices} and len(channels)={n_layers}" + ) + elif idx < -1: + raise ValueError( + f"indices in (un)pooling_indices must be greater or equal to -1, got (un)pooling_indices={pooling_indices}" + ) + return sorted(pooling_indices) + else: + return [] + + +def _check_conv_parameter( + parameter: ConvParameters, dim: int, n_layers: int, name: str +) -> Union[Tuple[int, ...], List[Tuple[int, ...]]]: + """ + Checks spatial parameters (e.g. kernel_size). + """ + if isinstance(parameter, int): + return (parameter,) * dim + elif isinstance(parameter, tuple): + if len(parameter) != dim: + raise ValueError( + f"If a tuple is passed for {name}, its dimension must be {dim}. Got {parameter}" + ) + return parameter + elif isinstance(parameter, list): + if len(parameter) != n_layers: + raise ValueError( + f"If a list is passed, {name} must contain as many elements as there are layers. " + f"There are {n_layers} layers, but got {parameter}" + ) + checked_params = [] + for param in parameter: + checked_params.append(_check_conv_parameter(param, dim, n_layers, name)) + return checked_params + else: + raise ValueError(f"{name} must be an int, a tuple or a list. Got {name}") diff --git a/clinicadl/monai_networks/nn/utils/shapes.py b/clinicadl/monai_networks/nn/utils/shapes.py new file mode 100644 index 000000000..a649af076 --- /dev/null +++ b/clinicadl/monai_networks/nn/utils/shapes.py @@ -0,0 +1,203 @@ +from math import ceil +from typing import Optional, Sequence, Tuple, Union + +import numpy as np + +from ..layers.utils import PoolingLayer, UnpoolingLayer + +__all__ = [ + "calculate_conv_out_shape", + "calculate_convtranspose_out_shape", + "calculate_pool_out_shape", + "calculate_unpool_out_shape", +] + + +def calculate_conv_out_shape( + in_shape: Union[Sequence[int], int], + kernel_size: Union[Sequence[int], int], + stride: Union[Sequence[int], int] = 1, + padding: Union[Sequence[int], int] = 0, + dilation: Union[Sequence[int], int] = 1, + **kwargs, # for uniformization +) -> Tuple[int, ...]: + """ + Calculates the output shape of a convolution layer. All arguments can be scalars or multiple + values. Always return a tuple. + """ + in_shape_np = np.atleast_1d(in_shape) + kernel_size_np = np.atleast_1d(kernel_size) + stride_np = np.atleast_1d(stride) + padding_np = np.atleast_1d(padding) + dilation_np = np.atleast_1d(dilation) + + out_shape_np = ( + (in_shape_np + 2 * padding_np - dilation_np * (kernel_size_np - 1) - 1) + / stride_np + ) + 1 + + return tuple(int(s) for s in out_shape_np) + + +def calculate_convtranspose_out_shape( + in_shape: Union[Sequence[int], int], + kernel_size: Union[Sequence[int], int], + stride: Union[Sequence[int], int] = 1, + padding: Union[Sequence[int], int] = 0, + output_padding: Union[Sequence[int], int] = 0, + dilation: Union[Sequence[int], int] = 1, + **kwargs, # for uniformization +) -> Tuple[int, ...]: + """ + Calculates the output shape of a transposed convolution layer. All arguments can be scalars or + multiple values. Always return a tuple. + """ + in_shape_np = np.atleast_1d(in_shape) + kernel_size_np = np.atleast_1d(kernel_size) + stride_np = np.atleast_1d(stride) + padding_np = np.atleast_1d(padding) + dilation_np = np.atleast_1d(dilation) + output_padding_np = np.atleast_1d(output_padding) + + out_shape_np = ( + (in_shape_np - 1) * stride_np + - 2 * padding_np + + dilation_np * (kernel_size_np - 1) + + output_padding_np + + 1 + ) + + return tuple(int(s) for s in out_shape_np) + + +def calculate_pool_out_shape( + pool_mode: Union[str, PoolingLayer], + in_shape: Union[Sequence[int], int], + **kwargs, +) -> Tuple[int, ...]: + """ + Calculates the output shape of a pooling layer. The first argument is the type of pooling + performed (`max` or `avg`). All other arguments can be scalars or multiple values, except + `ceil_mode`. + Always return a tuple. + """ + pool_mode = PoolingLayer(pool_mode) + if pool_mode == PoolingLayer.MAX: + return _calculate_maxpool_out_shape(in_shape, **kwargs) + elif pool_mode == PoolingLayer.AVG: + return _calculate_avgpool_out_shape(in_shape, **kwargs) + elif pool_mode == PoolingLayer.ADAPT_MAX or pool_mode == PoolingLayer.ADAPT_AVG: + return _calculate_adaptivepool_out_shape(in_shape, **kwargs) + + +def calculate_unpool_out_shape( + unpool_mode: Union[str, UnpoolingLayer], + in_shape: Union[Sequence[int], int], + **kwargs, +) -> Tuple[int, ...]: + """ + Calculates the output shape of an unpooling layer. The first argument is the type of unpooling + performed (`upsample` or `convtranspose`). + Always return a tuple. + """ + unpool_mode = UnpoolingLayer(unpool_mode) + if unpool_mode == UnpoolingLayer.UPSAMPLE: + return _calculate_upsample_out_shape(in_shape, **kwargs) + elif unpool_mode == UnpoolingLayer.CONV_TRANS: + return calculate_convtranspose_out_shape(in_shape, **kwargs) + + +def _calculate_maxpool_out_shape( + in_shape: Union[Sequence[int], int], + kernel_size: Union[Sequence[int], int], + stride: Optional[Union[Sequence[int], int]] = None, + padding: Union[Sequence[int], int] = 0, + dilation: Union[Sequence[int], int] = 1, + ceil_mode: bool = False, + **kwargs, # for uniformization +) -> Tuple[int, ...]: + """ + Calculates the output shape of a MaxPool layer. + """ + if stride is None: + stride = kernel_size + + in_shape_np = np.atleast_1d(in_shape) + kernel_size_np = np.atleast_1d(kernel_size) + stride_np = np.atleast_1d(stride) + padding_np = np.atleast_1d(padding) + dilation_np = np.atleast_1d(dilation) + + out_shape_np = ( + (in_shape_np + 2 * padding_np - dilation_np * (kernel_size_np - 1) - 1) + / stride_np + ) + 1 + if ceil_mode: + out_shape = tuple(ceil(s) for s in out_shape_np) + else: + out_shape = tuple(int(s) for s in out_shape_np) + + return out_shape + + +def _calculate_avgpool_out_shape( + in_shape: Union[Sequence[int], int], + kernel_size: Union[Sequence[int], int], + stride: Optional[Union[Sequence[int], int]] = None, + padding: Union[Sequence[int], int] = 0, + ceil_mode: bool = False, + **kwargs, # for uniformization +) -> Tuple[int, ...]: + """ + Calculates the output shape of an AvgPool layer. + """ + if stride is None: + stride = kernel_size + + in_shape_np = np.atleast_1d(in_shape) + kernel_size_np = np.atleast_1d(kernel_size) + stride_np = np.atleast_1d(stride) + padding_np = np.atleast_1d(padding) + + out_shape_np = ((in_shape_np + 2 * padding_np - kernel_size_np) / stride_np) + 1 + if ceil_mode: + out_shape_np = np.ceil(out_shape_np) + out_shape_np[(out_shape_np - 1) * stride_np >= in_shape_np + padding_np] -= 1 + + return tuple(int(s) for s in out_shape_np) + + +def _calculate_adaptivepool_out_shape( + in_shape: Union[Sequence[int], int], + output_size: Union[Sequence[int], int], + **kwargs, # for uniformization +) -> Tuple[int, ...]: + """ + Calculates the output shape of an AdaptiveMaxPool or AdaptiveAvgPool layer. + """ + in_shape_np = np.atleast_1d(in_shape) + out_shape_np = np.ones_like(in_shape_np) * np.atleast_1d(output_size) + + return tuple(int(s) for s in out_shape_np) + + +def _calculate_upsample_out_shape( + in_shape: Union[Sequence[int], int], + scale_factor: Optional[Union[Sequence[int], int]] = None, + size: Optional[Union[Sequence[int], int]] = None, + **kwargs, # for uniformization +) -> Tuple[int, ...]: + """ + Calculates the output shape of an Upsample layer. + """ + in_shape_np = np.atleast_1d(in_shape) + if size and scale_factor: + raise ValueError("Pass either size or scale_factor, not both.") + elif size: + out_shape_np = np.ones_like(in_shape_np) * np.atleast_1d(size) + elif scale_factor: + out_shape_np = in_shape_np * scale_factor + else: + raise ValueError("Pass one of size or scale_factor.") + + return tuple(int(s) for s in out_shape_np) diff --git a/clinicadl/monai_networks/nn/vae.py b/clinicadl/monai_networks/nn/vae.py new file mode 100644 index 000000000..9dac6b43b --- /dev/null +++ b/clinicadl/monai_networks/nn/vae.py @@ -0,0 +1,200 @@ +from copy import deepcopy +from typing import Any, Dict, Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn + +from .autoencoder import AutoEncoder +from .layers.utils import ActivationParameters, UnpoolingMode + + +class VAE(nn.Module): + """ + A Variational AutoEncoder with convolutional and fully connected layers. + + The user must pass the arguments to build an encoder, from its convolutional and + fully connected parts, and the decoder will be automatically built by taking the + symmetrical network. + + More precisely, to build the decoder, the order of the encoding layers is reverted, convolutions are + replaced by transposed convolutions and pooling layers are replaced by either upsampling or transposed + convolution layers. + Please note that the order of `Activation`, `Dropout` and `Normalization`, defined with the + argument `adn_ordering` in `conv_args`, is the same for the encoder and the decoder. + + Note that an `AutoEncoder` is an aggregation of a `CNN` (:py:class:`clinicadl.monai_networks.nn. + cnn.CNN`), whose last linear layer is duplicated to infer both the mean and the log variance, + and a `Generator` (:py:class:`clinicadl.monai_networks.nn.generator.Generator`). + + Parameters + ---------- + in_shape : Sequence[int] + sequence of integers stating the dimension of the input tensor (minus batch dimension). + latent_size : int + size of the latent vector. + conv_args : Dict[str, Any] + the arguments for the convolutional part of the encoder. The arguments are those accepted + by :py:class:`clinicadl.monai_networks.nn.conv_encoder.ConvEncoder`, except `in_shape` that + is specified here. So, the only mandatory argument is `channels`. + mlp_args : Optional[Dict[str, Any]] (optional, default=None) + the arguments for the MLP part of the encoder . The arguments are those accepted by + :py:class:`clinicadl.monai_networks.nn.mlp.MLP`, except `in_channels` that is inferred + from the output of the convolutional part, and `out_channels` that is set to `latent_size`. + So, the only mandatory argument is `hidden_channels`.\n + If None, the MLP part will be reduced to a single linear layer.\n + The last linear layer will be duplicated to infer both the mean and the log variance. + out_channels : Optional[int] (optional, default=None) + number of output channels. If None, the output will have the same number of channels as the + input. + output_act : Optional[ActivationParameters] (optional, default=None) + a potential activation layer applied to the output of the network, and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + unpooling_mode : Union[str, UnpoolingMode] (optional, default=UnpoolingMode.NEAREST) + type of unpooling. Can be either `"nearest"`, `"linear"`, `"bilinear"`, `"bicubic"`, `"trilinear"` or + `"convtranspose"`.\n + - `nearest`: unpooling is performed by upsampling with the :italic:`nearest` algorithm (see [PyTorch's Upsample layer] + (https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html)). + - `linear`: unpooling is performed by upsampling with the :italic:`linear` algorithm. Only works with 1D images (excluding the + channel dimension). + - `bilinear`: unpooling is performed by upsampling with the :italic:`bilinear` algorithm. Only works with 2D images. + - `bicubic`: unpooling is performed by upsampling with the :italic:`bicubic` algorithm. Only works with 2D images. + - `trilinear`: unpooling is performed by upsampling with the :italic:`trilinear` algorithm. Only works with 3D images. + - `convtranspose`: unpooling is performed with a transposed convolution, whose parameters (kernel size, stride, etc.) are + computed to reverse the pooling operation. + + Examples + -------- + >>> VAE( + in_shape=(1, 16, 16), + latent_size=4, + conv_args={"channels": [2]}, + mlp_args={"hidden_channels": [16], "output_act": "relu"}, + out_channels=2, + output_act="sigmoid", + unpooling_mode="bilinear", + ) + VAE( + (encoder): CNN( + (convolutions): ConvEncoder( + (layer0): Convolution( + (conv): Conv2d(1, 2, kernel_size=(3, 3), stride=(1, 1)) + ) + ) + (mlp): MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (hidden0): Sequential( + (linear): Linear(in_features=392, out_features=16, bias=True) + (adn): ADN( + (N): BatchNorm1d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (A): PReLU(num_parameters=1) + ) + ) + (output): Identity() + ) + ) + (mu): Sequential( + (linear): Linear(in_features=16, out_features=4, bias=True) + (output_act): ReLU() + ) + (log_var): Sequential( + (linear): Linear(in_features=16, out_features=4, bias=True) + (output_act): ReLU() + ) + (decoder): Generator( + (mlp): MLP( + (flatten): Flatten(start_dim=1, end_dim=-1) + (hidden0): Sequential( + (linear): Linear(in_features=4, out_features=16, bias=True) + (adn): ADN( + (N): BatchNorm1d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) + (A): PReLU(num_parameters=1) + ) + ) + (output): Sequential( + (linear): Linear(in_features=16, out_features=392, bias=True) + (output_act): ReLU() + ) + ) + (reshape): Reshape() + (convolutions): ConvDecoder( + (layer0): Convolution( + (conv): ConvTranspose2d(2, 2, kernel_size=(3, 3), stride=(1, 1)) + ) + (output_act): Sigmoid() + ) + ) + ) + """ + + def __init__( + self, + in_shape: Sequence[int], + latent_size: int, + conv_args: Dict[str, Any], + mlp_args: Optional[Dict[str, Any]] = None, + out_channels: Optional[int] = None, + output_act: Optional[ActivationParameters] = None, + unpooling_mode: Union[str, UnpoolingMode] = UnpoolingMode.NEAREST, + ) -> None: + super().__init__() + ae = AutoEncoder( + in_shape, + latent_size, + conv_args, + mlp_args, + out_channels, + output_act, + unpooling_mode, + ) + + # replace last mlp layer by two parallel layers + mu_layers = deepcopy(ae.encoder.mlp.output) + log_var_layers = deepcopy(ae.encoder.mlp.output) + self._reset_weights( + log_var_layers + ) # to have different initialization for the two layers + ae.encoder.mlp.output = nn.Identity() + + self.encoder = ae.encoder + self.mu = mu_layers + self.log_var = log_var_layers + self.decoder = ae.decoder + + def forward( + self, x: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Encoding, sampling and decoding. + """ + feature = self.encoder(x) + mu = self.mu(feature) + log_var = self.log_var(feature) + z = self.reparameterize(mu, log_var) + + return self.decoder(z), mu, log_var + + def reparameterize(self, mu: torch.Tensor, log_var: torch.Tensor) -> torch.Tensor: + """ + Samples a random vector from a gaussian distribution, given the mean and log-variance + of this distribution. + """ + std = torch.exp(0.5 * log_var) + + if self.training: # multiply random noise with std only during training + std = torch.randn_like(std).mul(std) + + return std.add_(mu) + + @classmethod + def _reset_weights(cls, layer: Union[nn.Sequential, nn.Linear]) -> None: + """ + Resets the output layer(s) of an MLP. + """ + if isinstance(layer, nn.Linear): + layer.reset_parameters() + else: + layer.linear.reset_parameters() diff --git a/clinicadl/monai_networks/nn/vit.py b/clinicadl/monai_networks/nn/vit.py new file mode 100644 index 000000000..372e1728a --- /dev/null +++ b/clinicadl/monai_networks/nn/vit.py @@ -0,0 +1,420 @@ +import math +import re +from collections import OrderedDict +from copy import deepcopy +from enum import Enum +from typing import Any, Mapping, Optional, Sequence, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +from monai.networks.blocks.pos_embed_utils import build_sincos_position_embedding +from monai.networks.layers import Conv +from monai.networks.layers.utils import get_act_layer +from monai.utils import ensure_tuple_rep +from torch.hub import load_state_dict_from_url +from torchvision.models.vision_transformer import ( + ViT_B_16_Weights, + ViT_B_32_Weights, + ViT_L_16_Weights, + ViT_L_32_Weights, +) + +from .layers.utils import ActFunction, ActivationParameters +from .layers.vit import Encoder + + +class PosEmbedType(str, Enum): + """Available position embedding types for ViT.""" + + LEARN = "learnable" + SINCOS = "sincos" + + +class ViT(nn.Module): + """ + Vision Transformer based on the [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale] + (https://arxiv.org/pdf/2010.11929) paper. + Adapted from [torchvision's implementation](https://pytorch.org/vision/main/models/vision_transformer.html). + + The user can customize the patch size, the embedding dimension, the number of transformer blocks, the number of + attention heads, as well as other parameters like the type of position embedding. + + Parameters + ---------- + in_shape : Sequence[int] + sequence of integers stating the dimension of the input tensor (minus batch dimension). + patch_size : Union[Sequence[int], int] + sequence of integers stating the patch size (minus batch and channel dimensions). If int, the same + patch size will be used for all dimensions. + Patch size must divide image size in all dimensions. + num_outputs : Optional[int] + number of output variables after the last linear layer.\n + If None, the patch embeddings after the last transformer block will be returned. + embedding_dim : int (optional, default=768) + size of the embedding vectors. Must be divisible by `num_heads` as each head will be responsible for + a part of the embedding vectors. Default to 768, as for 'ViT-Base' in the original paper. + num_layers : int (optional, default=12) + number of consecutive transformer blocks. Default to 12, as for 'ViT-Base' in the original paper. + num_heads : int (optional, default=12) + number of heads in the self-attention block. Must divide `embedding_size`. + Default to 12, as for 'ViT-Base' in the original paper. + mlp_dim : int (optional, default=3072) + size of the hidden layer in the MLP part of the transformer block. Default to 3072, as for 'ViT-Base' + in the original paper. + pos_embed_type : Optional[Union[str, PosEmbedType]] (optional, default="learnable") + type of position embedding. Can be either `"learnable"`, `"sincos"` or `None`.\n + - `learnable`: the position embeddings are parameters that will be learned during the training + process. + - `sincos`: the position embeddings are fixed and determined with sinus and cosinus formulas (based on Dosovitskiy et al., + 'Attention Is All You Need, https://arxiv.org/pdf/1706.03762). Only implemented for 2D and 3D images. With `sincos` + position embedding, `embedding_dim` must be divisible by 4 for 2D images and by 6 for 3D images. + - `None`: no position embeddings are used.\n + Default to `"learnable"`, as in the original paper. + output_act : Optional[ActivationParameters] (optional, default=ActFunction.TANH) + if `num_outputs` is not None, a potential activation layer applied to the outputs of the network, + and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them.\n + Default to `"tanh"`, as in the original paper. + dropout : Optional[float] (optional, default=None) + dropout ratio. If None, no dropout. + + Examples + -------- + >>> ViT( + in_shape=(3, 60, 64), + patch_size=4, + num_outputs=2, + embedding_dim=32, + num_layers=2, + num_heads=4, + mlp_dim=128, + output_act="softmax", + ) + ViT( + (conv_proj): Conv2d(3, 32, kernel_size=(4, 4), stride=(4, 4)) + (encoder): Encoder( + (dropout): Dropout(p=0.0, inplace=False) + (layers): ModuleList( + (0-1): 2 x EncoderBlock( + (norm1): LayerNorm((32,), eps=1e-06, elementwise_affine=True) + (self_attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=32, out_features=32, bias=True) + ) + (dropout): Dropout(p=0.0, inplace=False) + (norm2): LayerNorm((32,), eps=1e-06, elementwise_affine=True) + (mlp): MLPBlock( + (0): Linear(in_features=32, out_features=128, bias=True) + (1): GELU(approximate='none') + (2): Dropout(p=0.0, inplace=False) + (3): Linear(in_features=128, out_features=32, bias=True) + (4): Dropout(p=0.0, inplace=False) + ) + ) + ) + (norm): LayerNorm((32,), eps=1e-06, elementwise_affine=True) + ) + (fc): Sequential( + (out): Linear(in_features=32, out_features=2, bias=True) + (output_act): Softmax(dim=None) + ) + ) + """ + + def __init__( + self, + in_shape: Sequence[int], + patch_size: Union[Sequence[int], int], + num_outputs: Optional[int], + embedding_dim: int = 768, + num_layers: int = 12, + num_heads: int = 12, + mlp_dim: int = 3072, + pos_embed_type: Optional[Union[str, PosEmbedType]] = PosEmbedType.LEARN, + output_act: Optional[ActivationParameters] = ActFunction.TANH, + dropout: Optional[float] = None, + ) -> None: + super().__init__() + + self.in_channels, *self.img_size = in_shape + self.spatial_dims = len(self.img_size) + self.patch_size = ensure_tuple_rep(patch_size, self.spatial_dims) + + self._check_embedding_dim(embedding_dim, num_heads) + self._check_patch_size(self.img_size, self.patch_size) + self.embedding_dim = embedding_dim + self.classification = True if num_outputs else False + dropout = dropout if dropout else 0.0 + + self.conv_proj = Conv[Conv.CONV, self.spatial_dims]( # pylint: disable=not-callable + in_channels=self.in_channels, + out_channels=self.embedding_dim, + kernel_size=self.patch_size, + stride=self.patch_size, + ) + self.seq_length = int( + np.prod(np.array(self.img_size) // np.array(self.patch_size)) + ) + + # Add a class token + if self.classification: + self.class_token = nn.Parameter(torch.zeros(1, 1, self.embedding_dim)) + self.seq_length += 1 + + pos_embedding = self._get_pos_embedding(pos_embed_type) + self.encoder = Encoder( + self.seq_length, + num_layers, + num_heads, + self.embedding_dim, + mlp_dim, + dropout=dropout, + attention_dropout=dropout, + pos_embedding=pos_embedding, + ) + + if self.classification: + self.class_token = nn.Parameter(torch.zeros(1, 1, embedding_dim)) + self.fc = nn.Sequential( + OrderedDict([("out", nn.Linear(embedding_dim, num_outputs))]) + ) + self.fc.output_act = get_act_layer(output_act) if output_act else None + else: + self.fc = None + + self._init_layers() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.conv_proj(x) + # (n, hidden_dim, n_h, n_w) -> (n, (h * w * d), hidden_dim) + x = x.flatten(2).transpose(-1, -2) + n = x.shape[0] + + # Expand the class token to the full batch + if self.fc: + batch_class_token = self.class_token.expand(n, -1, -1) + x = torch.cat([batch_class_token, x], dim=1) + + x = self.encoder(x) + + # Classifier "token" as used by standard language architectures + if self.fc: + x = x[:, 0] + x = self.fc(x) + + return x + + def _get_pos_embedding( + self, pos_embed_type: Optional[Union[str, PosEmbedType]] + ) -> Optional[nn.Parameter]: + """ + Gets position embeddings. If `pos_embed_type` is "learnable", will return None as it will be handled + by the encoder module. + """ + if pos_embed_type is None: + pos_embed = nn.Parameter( + torch.zeros(1, self.seq_length, self.embedding_dim) + ) + pos_embed.requires_grad = False + return pos_embed + + pos_embed_type = PosEmbedType(pos_embed_type) + + if pos_embed_type == PosEmbedType.LEARN: + return None # will be initialized inside the Encoder + + elif pos_embed_type == PosEmbedType.SINCOS: + if self.spatial_dims != 2 and self.spatial_dims != 3: + raise ValueError( + f"{self.spatial_dims}D sincos position embedding not implemented" + ) + elif self.spatial_dims == 2 and self.embedding_dim % 4: + raise ValueError( + f"embedding_dim must be divisible by 4 for 2D sincos position embedding. Got embedding_dim={self.embedding_dim}" + ) + elif self.spatial_dims == 3 and self.embedding_dim % 6: + raise ValueError( + f"embedding_dim must be divisible by 6 for 3D sincos position embedding. Got embedding_dim={self.embedding_dim}" + ) + grid_size = [] + for in_size, pa_size in zip(self.img_size, self.patch_size): + grid_size.append(in_size // pa_size) + pos_embed = build_sincos_position_embedding( + grid_size, self.embedding_dim, self.spatial_dims + ) + if self.classification: + pos_embed = torch.nn.Parameter( + torch.cat([torch.zeros(1, 1, self.embedding_dim), pos_embed], dim=1) + ) # add 0 for class token pos embedding + pos_embed.requires_grad = False + return pos_embed + + def _init_layers(self): + """ + Initializes some layers, based on torchvision's implementation: https://pytorch.org/vision/main/ + _modules/torchvision/models/vision_transformer.html + """ + fan_in = self.conv_proj.in_channels * np.prod(self.conv_proj.kernel_size) + nn.init.trunc_normal_(self.conv_proj.weight, std=math.sqrt(1 / fan_in)) + nn.init.zeros_(self.conv_proj.bias) + + @classmethod + def _check_embedding_dim(cls, embedding_dim: int, num_heads: int) -> None: + """ + Checks consistency between embedding dimension and number of heads. + """ + if embedding_dim % num_heads != 0: + raise ValueError( + f"embedding_dim should be divisible by num_heads. Got embedding_dim={embedding_dim} " + f" and num_heads={num_heads}" + ) + + @classmethod + def _check_patch_size( + cls, img_size: Tuple[int, ...], patch_size: Tuple[int, ...] + ) -> None: + """ + Checks consistency between image size and patch size. + """ + for i, p in zip(img_size, patch_size): + if i % p != 0: + raise ValueError( + f"img_size should be divisible by patch_size. Got img_size={img_size} " + f" and patch_size={patch_size}" + ) + + +class SOTAViT(str, Enum): + """Supported ViT networks.""" + + B_16 = "ViT-B/16" + B_32 = "ViT-B/32" + L_16 = "ViT-L/16" + L_32 = "ViT-L/32" + + +def get_vit( + name: Union[str, SOTAViT], + num_outputs: Optional[int], + output_act: ActivationParameters = None, + pretrained: bool = False, +) -> ViT: + """ + To get a Vision Transformer implemented in the [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale] + (https://arxiv.org/pdf/2010.11929) paper. + + Only the last fully connected layer will be changed to match `num_outputs`. + + The user can also use the pretrained models from `torchvision`. Note that the last fully connected layer will not + used pretrained weights, as it is task specific. + + .. warning:: `ViT-B/16`, `ViT-B/32`, `ViT-L/16` and `ViT-L/32` work with 2D images of size (224, 224), with 3 channels. + + Parameters + ---------- + model : Union[str, SOTAViT] + The name of the Vision Transformer. Available networks are `ViT-B/16`, `ViT-B/32`, `ViT-L/16` and `ViT-L/32`. + num_outputs : Optional[int] + number of output variables after the last linear layer.\n + If None, the features before the last fully connected layer will be returned. + output_act : ActivationParameters (optional, default=None) + if `num_outputs` is not None, a potential activation layer applied to the outputs of the network, + and optionally its arguments. + Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n + `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, + `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] + (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional + arguments for each of them. + pretrained : bool (optional, default=False) + whether to use pretrained weights. The pretrained weights used are the default ones from [torchvision](https:// + pytorch.org/vision/main/models/vision_transformer.html). + + Returns + ------- + ViT + The network, with potentially pretrained weights. + """ + name = SOTAViT(name) + if name == SOTAViT.B_16: + in_shape = (3, 224, 224) + patch_size = 16 + embedding_dim = 768 + mlp_dim = 3072 + num_layers = 12 + num_heads = 12 + model_url = ViT_B_16_Weights.DEFAULT.url + elif name == SOTAViT.B_32: + in_shape = (3, 224, 224) + patch_size = 32 + embedding_dim = 768 + mlp_dim = 3072 + num_layers = 12 + num_heads = 12 + model_url = ViT_B_32_Weights.DEFAULT.url + elif name == SOTAViT.L_16: + in_shape = (3, 224, 224) + patch_size = 16 + embedding_dim = 1024 + mlp_dim = 4096 + num_layers = 24 + num_heads = 16 + model_url = ViT_L_16_Weights.DEFAULT.url + elif name == SOTAViT.L_32: + in_shape = (3, 224, 224) + patch_size = 32 + embedding_dim = 1024 + mlp_dim = 4096 + num_layers = 24 + num_heads = 16 + model_url = ViT_L_32_Weights.DEFAULT.url + + # pylint: disable=possibly-used-before-assignment + vit = ViT( + in_shape=in_shape, + patch_size=patch_size, + num_outputs=num_outputs, + embedding_dim=embedding_dim, + mlp_dim=mlp_dim, + num_heads=num_heads, + num_layers=num_layers, + output_act=output_act, + ) + + if pretrained: + pretrained_dict = load_state_dict_from_url(model_url, progress=True) + if num_outputs is None: + del pretrained_dict["class_token"] + pretrained_dict["encoder.pos_embedding"] = pretrained_dict[ + "encoder.pos_embedding" + ][:, 1:] # remove class token position embedding + fc_layers = deepcopy(vit.fc) + vit.fc = None + vit.load_state_dict(_state_dict_adapter(pretrained_dict)) + vit.fc = fc_layers + + return vit + + +def _state_dict_adapter(state_dict: Mapping[str, Any]) -> Mapping[str, Any]: + """ + A mapping between torchvision's layer names and ours. + """ + state_dict = {k: v for k, v in state_dict.items() if "heads" not in k} + + mappings = [ + ("ln_", "norm"), + ("ln", "norm"), + (r"encoder_layer_(\d+)", r"\1"), + ] + + for key in list(state_dict.keys()): + new_key = key + for transform in mappings: + new_key = re.sub(transform[0], transform[1], new_key) + state_dict[new_key] = state_dict.pop(key) + + return state_dict From c0b79da6b50f40aa5b44447b353c51a7241f3361 Mon Sep 17 00:00:00 2001 From: camillebrianceau <57992134+camillebrianceau@users.noreply.github.com> Date: Fri, 18 Oct 2024 13:26:19 +0200 Subject: [PATCH 15/16] Cb extract validator (#666) --- clinicadl/predictor/old_predictor.py | 18 + clinicadl/predictor/predictor.py | 1174 ++++++++++++++++++++++++ clinicadl/utils/iotools/train_utils.py | 8 + 3 files changed, 1200 insertions(+) diff --git a/clinicadl/predictor/old_predictor.py b/clinicadl/predictor/old_predictor.py index 8314ce9d9..360917e36 100644 --- a/clinicadl/predictor/old_predictor.py +++ b/clinicadl/predictor/old_predictor.py @@ -17,12 +17,22 @@ ) from clinicadl.experiment_manager.maps_manager import MapsManager from clinicadl.interpret.config import InterpretConfig +<<<<<<<< HEAD:clinicadl/predictor/old_predictor.py from clinicadl.metrics.old_metrics.metric_module import MetricModule from clinicadl.metrics.old_metrics.utils import ( check_selection_metric, find_selection_metrics, ) from clinicadl.networks.old_network.network import Network +======== +from clinicadl.maps_manager.maps_manager import MapsManager +from clinicadl.metrics.metric_module import MetricModule +from clinicadl.metrics.utils import ( + check_selection_metric, + find_selection_metrics, +) +from clinicadl.network.network import Network +>>>>>>>> 1ae72275 (Cb extract validator (#666)):clinicadl/predictor/predictor.py from clinicadl.predictor.config import PredictConfig from clinicadl.trainer.tasks_utils import ( columns, @@ -49,7 +59,11 @@ def __init__(self, _config: Union[PredictConfig, InterpretConfig]) -> None: self._config = _config from clinicadl.splitter.config import SplitterConfig +<<<<<<<< HEAD:clinicadl/predictor/old_predictor.py from clinicadl.splitter.old_splitter import Splitter +======== + from clinicadl.splitter.splitter import Splitter +>>>>>>>> 1ae72275 (Cb extract validator (#666)):clinicadl/predictor/predictor.py self.maps_manager = MapsManager(_config.maps_manager.maps_dir) self._config.adapt_with_maps_manager_info(self.maps_manager) @@ -1060,7 +1074,11 @@ def _compute_output_tensors( Compute the output tensors and saves them in the MAPS. Args: +<<<<<<<< HEAD:clinicadl/predictor/old_predictor.py dataset (clinicadl.dataset.caps_dataset.CapsDataset): wrapper of the data set. +======== + dataset (clinicadl.caps_dataset.data.CapsDataset): wrapper of the data set. +>>>>>>>> 1ae72275 (Cb extract validator (#666)):clinicadl/predictor/predictor.py data_group (str): name of the data group used for the task. split (int): split number. selection_metrics (list[str]): metrics used for model selection. diff --git a/clinicadl/predictor/predictor.py b/clinicadl/predictor/predictor.py index f173e3dde..1d6e32e90 100644 --- a/clinicadl/predictor/predictor.py +++ b/clinicadl/predictor/predictor.py @@ -1,3 +1,4 @@ +<<<<<<< HEAD from clinicadl.dataset.caps_dataset import CapsDataset from clinicadl.experiment_manager.experiment_manager import ExperimentManager @@ -10,3 +11,1176 @@ def __init__(self, manager: ExperimentManager): def predict(self, dataset_test: CapsDataset, split: int): """TO COMPLETE""" pass +======= +import json +import shutil +from logging import getLogger +from pathlib import Path +from typing import Any, Dict, List, Optional, Set, Tuple, Union + +import pandas as pd +import torch +import torch.distributed as dist +from torch.amp import autocast +from torch.nn.modules.loss import _Loss +from torch.utils.data import DataLoader +from torch.utils.data.distributed import DistributedSampler + +from clinicadl.dataset.caps_dataset import ( + return_dataset, +) +from clinicadl.experiment_manager.maps_manager import MapsManager +from clinicadl.interpret.config import InterpretConfig +<<<<<<<< HEAD:clinicadl/predictor/old_predictor.py +from clinicadl.metrics.old_metrics.metric_module import MetricModule +from clinicadl.metrics.old_metrics.utils import ( + check_selection_metric, + find_selection_metrics, +) +from clinicadl.networks.old_network.network import Network +======== +from clinicadl.maps_manager.maps_manager import MapsManager +from clinicadl.metrics.metric_module import MetricModule +from clinicadl.metrics.utils import ( + check_selection_metric, + find_selection_metrics, +) +from clinicadl.network.network import Network +>>>>>>>> 1ae72275 (Cb extract validator (#666)):clinicadl/predictor/predictor.py +from clinicadl.predictor.config import PredictConfig +from clinicadl.trainer.tasks_utils import ( + columns, + compute_metrics, + generate_label_code, + generate_test_row, + get_criterion, +) +from clinicadl.transforms.config import TransformsConfig +from clinicadl.utils.computational.ddp import DDP, cluster +from clinicadl.utils.enum import Task +from clinicadl.utils.exceptions import ( + ClinicaDLArgumentError, + ClinicaDLDataLeakageError, + MAPSError, +) + +logger = getLogger("clinicadl.predict_manager") +level_list: List[str] = ["warning", "info", "debug"] + + +class Predictor: + def __init__(self, _config: Union[PredictConfig, InterpretConfig]) -> None: + self._config = _config + + from clinicadl.splitter.config import SplitterConfig +<<<<<<<< HEAD:clinicadl/predictor/old_predictor.py + from clinicadl.splitter.old_splitter import Splitter +======== + from clinicadl.splitter.splitter import Splitter +>>>>>>>> 1ae72275 (Cb extract validator (#666)):clinicadl/predictor/predictor.py + + self.maps_manager = MapsManager(_config.maps_manager.maps_dir) + self._config.adapt_with_maps_manager_info(self.maps_manager) + tmp = self._config.data.model_dump( + exclude=set(["preprocessing_dict", "mode", "caps_dict"]) + ) + tmp.update(self._config.split.model_dump()) + tmp.update(self._config.validation.model_dump()) + self.splitter = Splitter(SplitterConfig(**tmp)) + + def predict( + self, + label_code: Union[str, dict[str, int]] = "default", + ): + """Performs the prediction task on a subset of caps_directory defined in a TSV file.""" + + group_df = self._config.data.create_groupe_df() + self._check_data_group(group_df) + criterion = get_criterion( + self.maps_manager.network_task, self.maps_manager.loss + ) + + for split in self.splitter.split_iterator(): + logger.info(f"Prediction of split {split}") + group_df, group_parameters = self.get_group_info( + self._config.maps_manager.data_group, split + ) + # Find label code if not given + if self._config.data.is_given_label_code( + self.maps_manager.label, label_code + ): + generate_label_code( + self.maps_manager.network_task, group_df, self._config.data.label + ) + # Erase previous TSV files on master process + if not self._config.validation.selection_metrics: + split_selection_metrics = find_selection_metrics( + self.maps_manager.maps_path, + split, + ) + else: + split_selection_metrics = self._config.validation.selection_metrics + for selection in split_selection_metrics: + tsv_dir = ( + self.maps_manager.maps_path + / f"split-{split}" + / f"best-{selection}" + / self._config.maps_manager.data_group + ) + tsv_pattern = f"{self._config.maps_manager.data_group}*.tsv" + for tsv_file in tsv_dir.glob(tsv_pattern): + tsv_file.unlink() + + self._config.data.check_label(self.maps_manager.label) + if self.maps_manager.multi_network: + for network in range(self.maps_manager.num_networks): + self._predict_single( + group_parameters, + group_df, + self._config.transforms, + label_code, + criterion, + split, + split_selection_metrics, + network, + ) + else: + self._predict_single( + group_parameters, + group_df, + self._config.transforms, + label_code, + criterion, + split, + split_selection_metrics, + ) + if cluster.master: + self._ensemble_prediction( + self.maps_manager, + self._config.maps_manager.data_group, + split, + self._config.validation.selection_metrics, + self._config.data.use_labels, + self._config.validation.skip_leak_check, + ) + + def _predict_single( + self, + group_parameters, + group_df, + transforms, + label_code, + criterion, + split, + split_selection_metrics, + network: Optional[int] = None, + ): + """_summary_""" + + assert isinstance(self._config, PredictConfig) + # assert self._config.data.label + + data_test = return_dataset( + group_parameters["caps_directory"], + group_df, + self.maps_manager.preprocessing_dict, + transforms_config=self._config.transforms, + multi_cohort=group_parameters["multi_cohort"], + label_presence=self._config.data.use_labels, + label=self._config.data.label, + label_code=( + self.maps_manager.label_code if label_code == "default" else label_code + ), + cnn_index=network, + ) + test_loader = DataLoader( + data_test, + batch_size=( + self._config.dataloader.batch_size + if self._config.dataloader.batch_size is not None + else self.maps_manager.batch_size + ), + shuffle=False, + sampler=DistributedSampler( + data_test, + num_replicas=cluster.world_size, + rank=cluster.rank, + shuffle=False, + ), + num_workers=self._config.dataloader.n_proc + if self._config.dataloader.n_proc is not None + else self.maps_manager.n_proc, + ) + self._test_loader( + maps_manager=self.maps_manager, + dataloader=test_loader, + criterion=criterion, + data_group=self._config.maps_manager.data_group, + split=split, + selection_metrics=split_selection_metrics, + use_labels=self._config.data.use_labels, + gpu=self._config.computational.gpu, + amp=self._config.computational.amp, + network=network, + ) + if self._config.maps_manager.save_tensor: + logger.debug("Saving tensors") + self._compute_output_tensors( + maps_manager=self.maps_manager, + dataset=data_test, + data_group=self._config.maps_manager.data_group, + split=split, + selection_metrics=self._config.validation.selection_metrics, + gpu=self._config.computational.gpu, + network=network, + ) + if self._config.maps_manager.save_nifti: + self._compute_output_nifti( + dataset=data_test, + split=split, + network=network, + ) + if self._config.maps_manager.save_latent_tensor: + self._compute_latent_tensors( + dataset=data_test, + split=split, + network=network, + ) + + def _compute_latent_tensors( + self, + dataset, + split: int, + nb_images: Optional[int] = None, + network: Optional[int] = None, + ): + """ + Compute the output tensors and saves them in the MAPS. + Parameters + ---------- + dataset : _type_ + wrapper of the data set. + data_group : _type_ + name of the data group used for the task. + split : _type_ + split number. + selection_metrics : _type_ + metrics used for model selection. + nb_images : _type_ (optional, default=None) + number of full images to write. Default computes the outputs of the whole data set. + gpu : _type_ (optional, default=None) + If given, a new value for the device of the model will be computed. + network : _type_ (optional, default=None) + Index of the network tested (only used in multi-network setting). + """ + for selection_metric in self._config.validation.selection_metrics: + # load the best trained model during the training + model, _ = self.maps_manager._init_model( + transfer_path=self.maps_manager.maps_path, + split=split, + transfer_selection=selection_metric, + gpu=self._config.computational.gpu, + network=network, + nb_unfrozen_layer=self.maps_manager.nb_unfrozen_layer, + ) + model = DDP( + model, + fsdp=self.maps_manager.fully_sharded_data_parallel, + amp=self.maps_manager.amp, + ) + model.eval() + tensor_path = ( + self.maps_manager.maps_path + / f"split-{split}" + / f"best-{selection_metric}" + / self._config.maps_manager.data_group + / "latent_tensors" + ) + if cluster.master: + tensor_path.mkdir(parents=True, exist_ok=True) + dist.barrier() + if nb_images is None: # Compute outputs for the whole data set + nb_modes = len(dataset) + else: + nb_modes = nb_images * dataset.elem_per_image + for i in [ + *range(cluster.rank, nb_modes, cluster.world_size), + *range(int(nb_modes % cluster.world_size <= cluster.rank)), + ]: + data = dataset[i] + image = data["image"] + logger.debug(f"Image for latent representation {image}") + with autocast("cuda", enabled=self.maps_manager.std_amp): + _, latent, _ = model.module._forward( + image.unsqueeze(0).to(model.device) + ) + latent = latent.squeeze(0).cpu().float() + participant_id = data["participant_id"] + session_id = data["session_id"] + mode_id = data[f"{self.maps_manager.mode}_id"] + output_filename = f"{participant_id}_{session_id}_{self.maps_manager.mode}-{mode_id}_latent.pt" + torch.save(latent, tensor_path / output_filename) + + @torch.no_grad() + def _compute_output_nifti( + self, + dataset, + split: int, + network: Optional[int] = None, + ): + """Computes the output nifti images and saves them in the MAPS. + Parameters + ---------- + dataset : _type_ + _description_ + data_group : str + name of the data group used for the task. + split : int + split number. + selection_metrics : list[str] + metrics used for model selection. + gpu : bool (optional, default=None) + If given, a new value for the device of the model will be computed. + network : int (optional, default=None) + Index of the network tested (only used in multi-network setting). + Raises + -------- + ClinicaDLException if not an image + """ + import nibabel as nib + from numpy import eye + + for selection_metric in self._config.validation.selection_metrics: + # load the best trained model during the training + model, _ = self.maps_manager._init_model( + transfer_path=self.maps_manager.maps_path, + split=split, + transfer_selection=selection_metric, + gpu=self._config.computational.gpu, + network=network, + nb_unfrozen_layer=self.maps_manager.nb_unfrozen_layer, + ) + model = DDP( + model, + fsdp=self.maps_manager.fully_sharded_data_parallel, + amp=self.maps_manager.amp, + ) + model.eval() + nifti_path = ( + self.maps_manager.maps_path + / f"split-{split}" + / f"best-{selection_metric}" + / self._config.maps_manager.data_group + / "nifti_images" + ) + if cluster.master: + nifti_path.mkdir(parents=True, exist_ok=True) + dist.barrier() + nb_imgs = len(dataset) + for i in [ + *range(cluster.rank, nb_imgs, cluster.world_size), + *range(int(nb_imgs % cluster.world_size <= cluster.rank)), + ]: + data = dataset[i] + image = data["image"] + x = image.unsqueeze(0).to(model.device) + with autocast("cuda", enabled=self.maps_manager.std_amp): + output = model(x) + output = output.squeeze(0).detach().cpu().float() + # Convert tensor to nifti image with appropriate affine + input_nii = nib.nifti1.Nifti1Image( + image[0].detach().cpu().numpy(), eye(4) + ) + output_nii = nib.nifti1.Nifti1Image(output[0].numpy(), eye(4)) + # Create file name according to participant and session id + participant_id = data["participant_id"] + session_id = data["session_id"] + input_filename = f"{participant_id}_{session_id}_image_input.nii.gz" + output_filename = f"{participant_id}_{session_id}_image_output.nii.gz" + nib.loadsave.save(input_nii, nifti_path / input_filename) + nib.loadsave.save(output_nii, nifti_path / output_filename) + + def interpret(self): + """Performs the interpretation task on a subset of caps_directory defined in a TSV file. + The mean interpretation is always saved, to save the individual interpretations set save_individual to True. + """ + assert isinstance(self._config, InterpretConfig) + + self._config.adapt_with_maps_manager_info(self.maps_manager) + + if self.maps_manager.multi_network: + raise NotImplementedError( + "The interpretation of multi-network framework is not implemented." + ) + transforms = TransformsConfig( + normalize=self.maps_manager.normalize, + data_augmentation=self.maps_manager.data_augmentation, + size_reduction=self.maps_manager.size_reduction, + size_reduction_factor=self.maps_manager.size_reduction_factor, + ) + group_df = self._config.data.create_groupe_df() + self._check_data_group(group_df) + + for split in self.splitter.split_iterator(): + logger.info(f"Interpretation of split {split}") + df_group, parameters_group = self.get_group_info( + self._config.maps_manager.data_group, split + ) + data_test = return_dataset( + parameters_group["caps_directory"], + df_group, + self.maps_manager.preprocessing_dict, + transforms_config=transforms, + multi_cohort=parameters_group["multi_cohort"], + label_presence=False, + label_code=self.maps_manager.label_code, + label=self.maps_manager.label, + ) + test_loader = DataLoader( + data_test, + batch_size=self._config.dataloader.batch_size, + shuffle=False, + num_workers=self._config.dataloader.n_proc, + ) + if not self._config.validation.selection_metrics: + self._config.validation.selection_metrics = find_selection_metrics( + self.maps_manager.maps_path, + split, + ) + for selection_metric in self._config.validation.selection_metrics: + logger.info(f"Interpretation of metric {selection_metric}") + results_path = ( + self.maps_manager.maps_path + / f"split-{split}" + / f"best-{selection_metric}" + / self._config.maps_manager.data_group + / f"interpret-{self._config.interpret.name}" + ) + if (results_path).is_dir(): + if self._config.interpret.overwrite_name: + shutil.rmtree(results_path) + else: + raise MAPSError( + f"Interpretation name {self._config.interpret.name} is already written. " + f"Please choose another name or set overwrite_name to True." + ) + results_path.mkdir(parents=True) + model, _ = self.maps_manager._init_model( + transfer_path=self.maps_manager.maps_path, + split=split, + transfer_selection=selection_metric, + gpu=self._config.computational.gpu, + ) + interpreter = self._config.interpret.get_method()(model) + cum_maps = [0] * data_test.elem_per_image + for data in test_loader: + images = data["image"].to(model.device) + map_pt = interpreter.generate_gradients( + images, + self._config.interpret.target_node, + level=self._config.interpret.level, + amp=self._config.computational.amp, + ) + for i in range(len(data["participant_id"])): + mode_id = data[f"{self.maps_manager.mode}_id"][i] + cum_maps[mode_id] += map_pt[i] + if self._config.interpret.save_individual: + single_path = ( + results_path + / f"{data['participant_id'][i]}_{data['session_id'][i]}_{self.maps_manager.mode}-{data[f'{self.maps_manager.mode}_id'][i]}_map.pt" + ) + torch.save(map_pt[i], single_path) + if self._config.maps_manager.save_nifti: + import nibabel as nib + from numpy import eye + + single_nifti_path = ( + results_path + / f"{data['participant_id'][i]}_{data['session_id'][i]}_{self.maps_manager.mode}-{data[f'{self.maps_manager.mode}_id'][i]}_map.nii.gz" + ) + output_nii = nib.nifti1.Nifti1Image( + map_pt[i].numpy(), eye(4) + ) + nib.loadsave.save(output_nii, single_nifti_path) + for i, mode_map in enumerate(cum_maps): + mode_map /= len(data_test) + torch.save( + mode_map, + results_path / f"mean_{self.maps_manager.mode}-{i}_map.pt", + ) + if self._config.maps_manager.save_nifti: + import nibabel as nib + from numpy import eye + + output_nii = nib.nifti1.Nifti1Image(mode_map.numpy(), eye(4)) + nib.loadsave.save( + output_nii, + results_path + / f"mean_{self.maps_manager.mode}-{i}_map.nii.gz", + ) + + def _check_data_group( + self, + df: Optional[pd.DataFrame] = None, + ): + """Check if a data group is already available if other arguments are None. + Else creates a new data_group. + + Parameters + ---------- + + Raises + ------ + MAPSError + when trying to overwrite train or validation data groups + ClinicaDLArgumentError + when caps_directory or df are given but data group already exists + ClinicaDLArgumentError + when caps_directory or df are not given and data group does not exist + + """ + group_dir = ( + self.maps_manager.maps_path + / "groups" + / self._config.maps_manager.data_group + ) + logger.debug(f"Group path {group_dir}") + if group_dir.is_dir(): # Data group already exists + if self._config.maps_manager.overwrite: + if self._config.maps_manager.data_group in ["train", "validation"]: + raise MAPSError("Cannot overwrite train or validation data group.") + else: + if not self._config.split.split: + self._config.split.split = self.maps_manager.find_splits() + assert self._config.split + for split in self._config.split.split: + selection_metrics = find_selection_metrics( + self.maps_manager.maps_path, + split, + ) + for selection in selection_metrics: + results_path = ( + self.maps_manager.maps_path + / f"split-{split}" + / f"best-{selection}" + / self._config.maps_manager.data_group + ) + if results_path.is_dir(): + shutil.rmtree(results_path) + elif df is not None or ( + self._config.data.caps_directory is not None + and self._config.data.caps_directory != Path("") + ): + raise ClinicaDLArgumentError( + f"Data group {self._config.maps_manager.data_group} is already defined. " + f"Please do not give any caps_directory, tsv_path or multi_cohort to use it. " + f"To erase {self._config.maps_manager.data_group} please set overwrite to True." + ) + + elif not group_dir.is_dir() and ( + self._config.data.caps_directory is None or df is None + ): # Data group does not exist yet / was overwritten + missing data + raise ClinicaDLArgumentError( + f"The data group {self._config.maps_manager.data_group} does not already exist. " + f"Please specify a caps_directory and a tsv_path to create this data group." + ) + elif ( + not group_dir.is_dir() + ): # Data group does not exist yet / was overwritten + all data is provided + if self._config.validation.skip_leak_check: + logger.info("Skipping data leakage check") + else: + self._check_leakage(self._config.maps_manager.data_group, df) + self._write_data_group( + self._config.maps_manager.data_group, + df, + self._config.data.caps_directory, + self._config.data.multi_cohort, + label=self._config.data.label, + ) + + def get_group_info( + self, data_group: str, split: int = None + ) -> Tuple[pd.DataFrame, Dict[str, Any]]: + """Gets information from corresponding data group + (list of participant_id / session_id + configuration parameters). + split is only needed if data_group is train or validation. + + Parameters + ---------- + data_group : str + _description_ + split : int (optional, default=None) + _description_ + + Returns + ------- + Tuple[pd.DataFrame, Dict[str, Any]] + _description_ + + Raises + ------ + MAPSError + _description_ + MAPSError + _description_ + MAPSError + _description_ + """ + group_path = self.maps_manager.maps_path / "groups" / data_group + if not group_path.is_dir(): + raise MAPSError( + f"Data group {data_group} is not defined. " + f"Please run a prediction to create this data group." + ) + if data_group in ["train", "validation"]: + if split is None: + raise MAPSError( + "Information on train or validation data can only be " + "loaded if a split number is given" + ) + elif not (group_path / f"split-{split}").is_dir(): + raise MAPSError( + f"Split {split} is not available for data group {data_group}." + ) + else: + group_path = group_path / f"split-{split}" + + df = pd.read_csv(group_path / "data.tsv", sep="\t") + json_path = group_path / "maps.json" + from clinicadl.utils.iotools.utils import path_decoder + + with json_path.open(mode="r") as f: + parameters = json.load(f, object_hook=path_decoder) + return df, parameters + + def _check_leakage(self, data_group: str, test_df: pd.DataFrame): + """Checks that no intersection exist between the participants used for training and those used for testing. + + Parameters + ---------- + data_group : str + name of the data group + test_df : pd.DataFrame + Table of participant_id / session_id of the data group + + Raises + ------ + ClinicaDLDataLeakageError + if data_group not in ["train", "validation"] and there is an intersection + between the participant IDs in test_df and the ones used for training. + """ + if data_group not in ["train", "validation"]: + train_path = self.maps_manager.maps_path / "groups" / "train+validation.tsv" + train_df = pd.read_csv(train_path, sep="\t") + participants_train = set(train_df.participant_id.values) + participants_test = set(test_df.participant_id.values) + intersection = participants_test & participants_train + + if len(intersection) > 0: + raise ClinicaDLDataLeakageError( + "Your evaluation set contains participants who were already seen during " + "the training step. The list of common participants is the following: " + f"{intersection}." + ) + + def _write_data_group( + self, + data_group, + df, + caps_directory: Path = None, + multi_cohort: bool = None, + label=None, + ): + """Check that a data_group is not already written and writes the characteristics of the data group + (TSV file with a list of participant / session + JSON file containing the CAPS and the preprocessing). + + Parameters + ---------- + data_group : _type_ + name whose presence is checked. + df : _type_ + DataFrame containing the participant_id and session_id (and label if use_labels is True) + caps_directory : Path (optional, default=None) + caps_directory if different from the training caps_directory, + multi_cohort : bool (optional, default=None) + multi_cohort used if different from the training multi_cohort. + label : _type_ (optional, default=None) + _description_ + """ + group_path = self.maps_manager.maps_path / "groups" / data_group + group_path.mkdir(parents=True) + + columns = ["participant_id", "session_id", "cohort"] + if self._config.data.label in df.columns.values: + columns += [self._config.data.label] + if label is not None and label in df.columns.values: + columns += [label] + + df.to_csv(group_path / "data.tsv", sep="\t", columns=columns, index=False) + self.maps_manager.write_parameters( + group_path, + { + "caps_directory": ( + caps_directory + if caps_directory is not None + else self._config.caps_directory + ), + "multi_cohort": ( + multi_cohort + if multi_cohort is not None + else self._config.multi_cohort + ), + }, + ) + + # this function is never used ??? + + def get_interpretation( + self, + data_group: str, + name: str, + split: int = 0, + selection_metric: Optional[str] = None, + verbose: bool = True, + participant_id: Optional[str] = None, + session_id: Optional[str] = None, + mode_id: int = 0, + ) -> torch.Tensor: + """ + Get the individual interpretation maps for one session if participant_id and session_id are filled. + Else load the mean interpretation map. + + Args: + data_group (str): Name of the data group used for the interpretation task. + name (str): name of the interpretation task. + split (int): Index of the split used for training. + selection_metric (str): Metric used for best weights selection. + verbose (bool): if True will print associated prediction.log. + participant_id (str): ID of the participant (if not given load mean map). + session_id (str): ID of the session (if not give load the mean map). + mode_id (int): Index of the mode used. + Returns: + (torch.Tensor): Tensor of the interpretability map. + """ + + selection_metric = check_selection_metric( + self.maps_manager.maps_path, + split, + selection_metric, + ) + if verbose: + self.maps_manager._print_description_log( + data_group, split, selection_metric + ) + map_dir = ( + self.maps_manager.maps_path + / f"split-{split}" + / f"best-{selection_metric}" + / data_group + / f"interpret-{name}" + ) + if not map_dir.is_dir(): + raise MAPSError( + f"No prediction corresponding to data group {data_group} and " + f"interpretation {name} was found." + ) + if participant_id is None and session_id is None: + map_pt = torch.load( + map_dir / f"mean_{self.maps_manager.mode}-{mode_id}_map.pt", + weights_only=True, + ) + elif participant_id is None or session_id is None: + raise ValueError( + "To load the mean interpretation map, " + "please do not give any participant_id or session_id.\n " + "Else specify both parameters" + ) + else: + map_pt = torch.load( + map_dir + / f"{participant_id}_{session_id}_{self.maps_manager.mode}-{mode_id}_map.pt", + weights_only=True, + ) + return map_pt + + def test( + self, + mode: str, + metrics_module: MetricModule, + n_classes: int, + network_task, + model: Network, + dataloader: DataLoader, + criterion: _Loss, + use_labels: bool = True, + amp: bool = False, + report_ci=False, + ) -> Tuple[pd.DataFrame, Dict[str, float]]: + """ + Computes the predictions and evaluation metrics. + + Parameters + ---------- + model: Network + The model trained. + dataloader: DataLoader + Wrapper of a CapsDataset. + criterion: _Loss + Function to calculate the loss. + use_labels: bool + If True the true_label will be written in output DataFrame + and metrics dict will be created. + amp: bool + If True, enables Pytorch's automatic mixed precision. + + Returns + ------- + the results and metrics on the image level. + """ + model.eval() + dataloader.dataset.eval() + + results_df = pd.DataFrame(columns=columns(network_task, mode, n_classes)) + total_loss = {} + with torch.no_grad(): + for i, data in enumerate(dataloader): + # initialize the loss list to save the loss components + with autocast("cuda", enabled=amp): + outputs, loss_dict = model(data, criterion, use_labels=use_labels) + + if i == 0: + for loss_component in loss_dict.keys(): + total_loss[loss_component] = 0 + for loss_component in total_loss.keys(): + total_loss[loss_component] += loss_dict[loss_component].float() + + # Generate detailed DataFrame + for idx in range(len(data["participant_id"])): + row = generate_test_row( + network_task, + mode, + metrics_module, + n_classes, + idx, + data, + outputs.float(), + ) + row_df = pd.DataFrame( + row, columns=columns(network_task, mode, n_classes) + ) + results_df = pd.concat([results_df, row_df]) + + del outputs, loss_dict + dataframes = [None] * dist.get_world_size() + dist.gather_object( + results_df, dataframes if dist.get_rank() == 0 else None, dst=0 + ) + if dist.get_rank() == 0: + results_df = pd.concat(dataframes) + del dataframes + results_df.reset_index(inplace=True, drop=True) + + if not use_labels: + metrics_dict = None + else: + metrics_dict = compute_metrics( + network_task, results_df, metrics_module, report_ci=report_ci + ) + for loss_component in total_loss.keys(): + dist.reduce(total_loss[loss_component], dst=0) + loss_value = total_loss[loss_component].item() / cluster.world_size + + if report_ci: + metrics_dict["Metric_names"].append(loss_component) + metrics_dict["Metric_values"].append(loss_value) + metrics_dict["Lower_CI"].append("N/A") + metrics_dict["Upper_CI"].append("N/A") + metrics_dict["SE"].append("N/A") + + else: + metrics_dict[loss_component] = loss_value + + torch.cuda.empty_cache() + + return results_df, metrics_dict + + def test_da( + self, + mode: str, + metrics_module: MetricModule, + n_classes: int, + network_task: Union[str, Task], + model: Network, + dataloader: DataLoader, + criterion: _Loss, + alpha: float = 0, + use_labels: bool = True, + target: bool = True, + report_ci=False, + ) -> Tuple[pd.DataFrame, Dict[str, float]]: + """ + Computes the predictions and evaluation metrics. + + Args: + model: the model trained. + dataloader: wrapper of a CapsDataset. + criterion: function to calculate the loss. + use_labels: If True the true_label will be written in output DataFrame + and metrics dict will be created. + Returns: + the results and metrics on the image level. + """ + model.eval() + dataloader.dataset.eval() + results_df = pd.DataFrame(columns=columns(network_task, mode, n_classes)) + total_loss = 0 + with torch.no_grad(): + for i, data in enumerate(dataloader): + outputs, loss_dict = model.compute_outputs_and_loss_test( + data, criterion, alpha, target + ) + total_loss += loss_dict["loss"].item() + + # Generate detailed DataFrame + for idx in range(len(data["participant_id"])): + row = generate_test_row( + network_task, + mode, + metrics_module, + n_classes, + idx, + data, + outputs, + ) + row_df = pd.DataFrame( + row, columns=columns(network_task, mode, n_classes) + ) + results_df = pd.concat([results_df, row_df]) + + del outputs, loss_dict + results_df.reset_index(inplace=True, drop=True) + + if not use_labels: + metrics_dict = None + else: + metrics_dict = compute_metrics( + network_task, results_df, metrics_module, report_ci=report_ci + ) + if report_ci: + metrics_dict["Metric_names"].append("loss") + metrics_dict["Metric_values"].append(total_loss) + metrics_dict["Lower_CI"].append("N/A") + metrics_dict["Upper_CI"].append("N/A") + metrics_dict["SE"].append("N/A") + + else: + metrics_dict["loss"] = total_loss + + torch.cuda.empty_cache() + + return results_df, metrics_dict + + def _test_loader( + self, + maps_manager: MapsManager, + dataloader, + criterion, + data_group: str, + split: int, + selection_metrics, + use_labels=True, + gpu=None, + amp=False, + network=None, + report_ci=True, + ): + """ + Launches the testing task on a dataset wrapped by a DataLoader and writes prediction TSV files. + + Args: + dataloader (torch.utils.data.DataLoader): DataLoader wrapping the test CapsDataset. + criterion (torch.nn.modules.loss._Loss): optimization criterion used during training. + data_group (str): name of the data group used for the testing task. + split (int): Index of the split used to train the model tested. + selection_metrics (list[str]): List of metrics used to select the best models which are tested. + use_labels (bool): If True, the labels must exist in test meta-data and metrics are computed. + gpu (bool): If given, a new value for the device of the model will be computed. + amp (bool): If enabled, uses Automatic Mixed Precision (requires GPU usage). + network (int): Index of the network tested (only used in multi-network setting). + """ + for selection_metric in selection_metrics: + if cluster.master: + log_dir = ( + maps_manager.maps_path + / f"split-{split}" + / f"best-{selection_metric}" + / data_group + ) + maps_manager.write_description_log( + log_dir, + data_group, + dataloader.dataset.config.data.caps_dict, + dataloader.dataset.config.data.data_df, + ) + + # load the best trained model during the training + model, _ = maps_manager._init_model( + transfer_path=maps_manager.maps_path, + split=split, + transfer_selection=selection_metric, + gpu=gpu, + network=network, + ) + model = DDP( + model, + fsdp=maps_manager.fully_sharded_data_parallel, + amp=maps_manager.amp, + ) + + prediction_df, metrics = self.test( + mode=maps_manager.mode, + metrics_module=maps_manager.metrics_module, + n_classes=maps_manager.n_classes, + network_task=maps_manager.network_task, + model=model, + dataloader=dataloader, + criterion=criterion, + use_labels=use_labels, + amp=amp, + report_ci=report_ci, + ) + if use_labels: + if network is not None: + metrics[f"{maps_manager.mode}_id"] = network + + loss_to_log = ( + metrics["Metric_values"][-1] if report_ci else metrics["loss"] + ) + + logger.info( + f"{maps_manager.mode} level {data_group} loss is {loss_to_log} for model selected on {selection_metric}" + ) + + if cluster.master: + # Replace here + maps_manager._mode_level_to_tsv( + prediction_df, + metrics, + split, + selection_metric, + data_group=data_group, + ) + + @torch.no_grad() + def _compute_output_tensors( + self, + maps_manager: MapsManager, + dataset, + data_group, + split, + selection_metrics, + nb_images=None, + gpu=None, + network=None, + ): + """ + Compute the output tensors and saves them in the MAPS. + + Args: +<<<<<<<< HEAD:clinicadl/predictor/old_predictor.py + dataset (clinicadl.dataset.caps_dataset.CapsDataset): wrapper of the data set. +======== + dataset (clinicadl.caps_dataset.data.CapsDataset): wrapper of the data set. +>>>>>>>> 1ae72275 (Cb extract validator (#666)):clinicadl/predictor/predictor.py + data_group (str): name of the data group used for the task. + split (int): split number. + selection_metrics (list[str]): metrics used for model selection. + nb_images (int): number of full images to write. Default computes the outputs of the whole data set. + gpu (bool): If given, a new value for the device of the model will be computed. + network (int): Index of the network tested (only used in multi-network setting). + """ + for selection_metric in selection_metrics: + # load the best trained model during the training + model, _ = maps_manager._init_model( + transfer_path=maps_manager.maps_path, + split=split, + transfer_selection=selection_metric, + gpu=gpu, + network=network, + nb_unfrozen_layer=maps_manager.nb_unfrozen_layer, + ) + model = DDP( + model, + fsdp=maps_manager.fully_sharded_data_parallel, + amp=maps_manager.amp, + ) + model.eval() + + tensor_path = ( + maps_manager.maps_path + / f"split-{split}" + / f"best-{selection_metric}" + / data_group + / "tensors" + ) + if cluster.master: + tensor_path.mkdir(parents=True, exist_ok=True) + dist.barrier() + + if nb_images is None: # Compute outputs for the whole data set + nb_modes = len(dataset) + else: + nb_modes = nb_images * dataset.elem_per_image + + for i in [ + *range(cluster.rank, nb_modes, cluster.world_size), + *range(int(nb_modes % cluster.world_size <= cluster.rank)), + ]: + data = dataset[i] + image = data["image"] + x = image.unsqueeze(0).to(model.device) + with autocast("cuda", enabled=maps_manager.std_amp): + output = model(x) + output = output.squeeze(0).cpu().float() + participant_id = data["participant_id"] + session_id = data["session_id"] + mode_id = data[f"{maps_manager.mode}_id"] + input_filename = f"{participant_id}_{session_id}_{maps_manager.mode}-{mode_id}_input.pt" + output_filename = f"{participant_id}_{session_id}_{maps_manager.mode}-{mode_id}_output.pt" + torch.save(image, tensor_path / input_filename) + torch.save(output, tensor_path / output_filename) + logger.debug(f"File saved at {[input_filename, output_filename]}") + + def _ensemble_prediction( + self, + maps_manager: MapsManager, + data_group, + split, + selection_metrics, + use_labels=True, + skip_leak_check=False, + ): + """Computes the results on the image-level.""" + + if not selection_metrics: + selection_metrics = find_selection_metrics(maps_manager.maps_path, split) + + for selection_metric in selection_metrics: + ##################### + # Soft voting + if maps_manager.num_networks > 1 and not skip_leak_check: + maps_manager._ensemble_to_tsv( + split, + selection=selection_metric, + data_group=data_group, + use_labels=use_labels, + ) + elif maps_manager.mode != "image" and not skip_leak_check: + maps_manager._mode_to_image_tsv( + split, + selection=selection_metric, + data_group=data_group, + use_labels=use_labels, + ) +>>>>>>> 1ae72275 (Cb extract validator (#666)) diff --git a/clinicadl/utils/iotools/train_utils.py b/clinicadl/utils/iotools/train_utils.py index 21fb160d5..06423ca31 100644 --- a/clinicadl/utils/iotools/train_utils.py +++ b/clinicadl/utils/iotools/train_utils.py @@ -220,7 +220,11 @@ def merge_cli_and_maps_json_options(maps_json: Path, **kwargs) -> Dict[str, Any] Dict[str, Any] A dictionary with training options. """ +<<<<<<< HEAD from clinicadl.dataset.caps_dataset_utils import read_json +======= + from clinicadl.caps_dataset.caps_dataset_utils import read_json +>>>>>>> 1ae72275 (Cb extract validator (#666)) options = read_json(maps_json) for arg in kwargs: @@ -253,7 +257,11 @@ def merge_options_and_maps_json_options(maps_json: Path, **kwargs) -> Dict[str, Dict[str, Any] A dictionary with training options. """ +<<<<<<< HEAD from clinicadl.dataset.caps_dataset_utils import read_json +======= + from clinicadl.caps_dataset.caps_dataset_utils import read_json +>>>>>>> 1ae72275 (Cb extract validator (#666)) options = read_json(maps_json) for arg in kwargs: From 94b26d725946440d3a02ce4fb514030a83330b44 Mon Sep 17 00:00:00 2001 From: camillebrianceau <57992134+camillebrianceau@users.noreply.github.com> Date: Wed, 30 Oct 2024 14:02:15 +0100 Subject: [PATCH 16/16] Base for v2 (#676) * base for clinicadl v2 --- clinicadl/monai_networks/config/cnn.py | 24 - .../monai_networks/config/conv_decoder.py | 65 - .../monai_networks/config/conv_encoder.py | 64 - clinicadl/monai_networks/config/mlp.py | 52 - clinicadl/monai_networks/config/senet.py | 60 - clinicadl/monai_networks/nn/__init__.py | 13 - clinicadl/monai_networks/nn/att_unet.py | 207 --- clinicadl/monai_networks/nn/autoencoder.py | 416 ------ clinicadl/monai_networks/nn/cnn.py | 124 -- clinicadl/monai_networks/nn/conv_decoder.py | 388 ------ clinicadl/monai_networks/nn/conv_encoder.py | 392 ------ clinicadl/monai_networks/nn/densenet.py | 312 ----- clinicadl/monai_networks/nn/generator.py | 131 -- .../monai_networks/nn/layers/__init__.py | 0 clinicadl/monai_networks/nn/layers/resnet.py | 124 -- clinicadl/monai_networks/nn/layers/senet.py | 142 -- clinicadl/monai_networks/nn/layers/unet.py | 102 -- clinicadl/monai_networks/nn/layers/unpool.py | 87 -- .../nn/layers/utils/__init__.py | 19 - .../monai_networks/nn/layers/utils/enum.py | 65 - .../monai_networks/nn/layers/utils/types.py | 37 - clinicadl/monai_networks/nn/layers/vit.py | 94 -- clinicadl/monai_networks/nn/mlp.py | 146 -- clinicadl/monai_networks/nn/resnet.py | 566 -------- clinicadl/monai_networks/nn/senet.py | 214 --- clinicadl/monai_networks/nn/unet.py | 250 ---- clinicadl/monai_networks/nn/utils/__init__.py | 14 - clinicadl/monai_networks/nn/utils/checks.py | 167 --- clinicadl/monai_networks/nn/utils/shapes.py | 203 --- clinicadl/monai_networks/nn/vae.py | 200 --- clinicadl/monai_networks/nn/vit.py | 420 ------ clinicadl/predictor/old_predictor.py | 22 +- clinicadl/predictor/predictor.py | 1175 +---------------- clinicadl/utils/iotools/train_utils.py | 12 +- 34 files changed, 11 insertions(+), 6296 deletions(-) delete mode 100644 clinicadl/monai_networks/config/cnn.py delete mode 100644 clinicadl/monai_networks/config/conv_decoder.py delete mode 100644 clinicadl/monai_networks/config/conv_encoder.py delete mode 100644 clinicadl/monai_networks/config/mlp.py delete mode 100644 clinicadl/monai_networks/config/senet.py delete mode 100644 clinicadl/monai_networks/nn/__init__.py delete mode 100644 clinicadl/monai_networks/nn/att_unet.py delete mode 100644 clinicadl/monai_networks/nn/autoencoder.py delete mode 100644 clinicadl/monai_networks/nn/cnn.py delete mode 100644 clinicadl/monai_networks/nn/conv_decoder.py delete mode 100644 clinicadl/monai_networks/nn/conv_encoder.py delete mode 100644 clinicadl/monai_networks/nn/densenet.py delete mode 100644 clinicadl/monai_networks/nn/generator.py delete mode 100644 clinicadl/monai_networks/nn/layers/__init__.py delete mode 100644 clinicadl/monai_networks/nn/layers/resnet.py delete mode 100644 clinicadl/monai_networks/nn/layers/senet.py delete mode 100644 clinicadl/monai_networks/nn/layers/unet.py delete mode 100644 clinicadl/monai_networks/nn/layers/unpool.py delete mode 100644 clinicadl/monai_networks/nn/layers/utils/__init__.py delete mode 100644 clinicadl/monai_networks/nn/layers/utils/enum.py delete mode 100644 clinicadl/monai_networks/nn/layers/utils/types.py delete mode 100644 clinicadl/monai_networks/nn/layers/vit.py delete mode 100644 clinicadl/monai_networks/nn/mlp.py delete mode 100644 clinicadl/monai_networks/nn/resnet.py delete mode 100644 clinicadl/monai_networks/nn/senet.py delete mode 100644 clinicadl/monai_networks/nn/unet.py delete mode 100644 clinicadl/monai_networks/nn/utils/__init__.py delete mode 100644 clinicadl/monai_networks/nn/utils/checks.py delete mode 100644 clinicadl/monai_networks/nn/utils/shapes.py delete mode 100644 clinicadl/monai_networks/nn/vae.py delete mode 100644 clinicadl/monai_networks/nn/vit.py diff --git a/clinicadl/monai_networks/config/cnn.py b/clinicadl/monai_networks/config/cnn.py deleted file mode 100644 index a7d2043db..000000000 --- a/clinicadl/monai_networks/config/cnn.py +++ /dev/null @@ -1,24 +0,0 @@ -from typing import Optional, Sequence, Union - -from pydantic import PositiveInt, computed_field - -from clinicadl.utils.factories import DefaultFromLibrary - -from .base import ImplementedNetworks, NetworkConfig -from .conv_encoder import ConvEncoderOptions -from .mlp import MLPOptions - - -class CNNConfig(NetworkConfig): - """Config class for CNN.""" - - in_shape: Sequence[PositiveInt] - num_outputs: PositiveInt - conv_args: ConvEncoderOptions - mlp_args: Union[Optional[MLPOptions], DefaultFromLibrary] = DefaultFromLibrary.YES - - @computed_field - @property - def name(self) -> ImplementedNetworks: - """The name of the network.""" - return ImplementedNetworks.CNN diff --git a/clinicadl/monai_networks/config/conv_decoder.py b/clinicadl/monai_networks/config/conv_decoder.py deleted file mode 100644 index 5dc78dfec..000000000 --- a/clinicadl/monai_networks/config/conv_decoder.py +++ /dev/null @@ -1,65 +0,0 @@ -from typing import Optional, Sequence, Union - -from pydantic import BaseModel, ConfigDict, PositiveFloat, PositiveInt, computed_field - -from clinicadl.monai_networks.nn.layers.utils import ( - ActivationParameters, - ConvNormalizationParameters, - ConvParameters, - UnpoolingParameters, -) -from clinicadl.utils.factories import DefaultFromLibrary - -from .base import ImplementedNetworks, NetworkConfig - - -class ConvDecoderOptions(BaseModel): - """ - Config class for ConvDecoder when it is a submodule. - See for example: :py:class:`clinicadl.monai_networks.nn.generator.Generator` - """ - - channels: Sequence[PositiveInt] - kernel_size: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES - stride: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES - padding: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES - output_padding: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES - dilation: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES - unpooling: Union[ - Optional[UnpoolingParameters], DefaultFromLibrary - ] = DefaultFromLibrary.YES - unpooling_indices: Union[ - Optional[Sequence[int]], DefaultFromLibrary - ] = DefaultFromLibrary.YES - act: Union[ - Optional[ActivationParameters], DefaultFromLibrary - ] = DefaultFromLibrary.YES - output_act: Union[ - Optional[ActivationParameters], DefaultFromLibrary - ] = DefaultFromLibrary.YES - norm: Union[ - Optional[ConvNormalizationParameters], DefaultFromLibrary - ] = DefaultFromLibrary.YES - dropout: Union[Optional[PositiveFloat], DefaultFromLibrary] = DefaultFromLibrary.YES - bias: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES - adn_ordering: Union[str, DefaultFromLibrary] = DefaultFromLibrary.YES - - # pydantic config - model_config = ConfigDict( - validate_assignment=True, - use_enum_values=True, - validate_default=True, - ) - - -class ConvDecoderConfig(NetworkConfig, ConvDecoderOptions): - """Config class for ConvDecoder.""" - - spatial_dims: PositiveInt - in_channels: PositiveInt - - @computed_field - @property - def name(self) -> ImplementedNetworks: - """The name of the network.""" - return ImplementedNetworks.CONV_DECODER diff --git a/clinicadl/monai_networks/config/conv_encoder.py b/clinicadl/monai_networks/config/conv_encoder.py deleted file mode 100644 index 499f69b19..000000000 --- a/clinicadl/monai_networks/config/conv_encoder.py +++ /dev/null @@ -1,64 +0,0 @@ -from typing import Optional, Sequence, Union - -from pydantic import BaseModel, ConfigDict, PositiveFloat, PositiveInt, computed_field - -from clinicadl.monai_networks.nn.layers.utils import ( - ActivationParameters, - ConvNormalizationParameters, - ConvParameters, - PoolingParameters, -) -from clinicadl.utils.factories import DefaultFromLibrary - -from .base import ImplementedNetworks, NetworkConfig - - -class ConvEncoderOptions(BaseModel): - """ - Config class for ConvEncoder when it is a submodule. - See for example: :py:class:`clinicadl.monai_networks.nn.cnn.CNN` - """ - - channels: Sequence[PositiveInt] - kernel_size: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES - stride: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES - padding: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES - dilation: Union[ConvParameters, DefaultFromLibrary] = DefaultFromLibrary.YES - pooling: Union[ - Optional[PoolingParameters], DefaultFromLibrary - ] = DefaultFromLibrary.YES - pooling_indices: Union[ - Optional[Sequence[int]], DefaultFromLibrary - ] = DefaultFromLibrary.YES - act: Union[ - Optional[ActivationParameters], DefaultFromLibrary - ] = DefaultFromLibrary.YES - output_act: Union[ - Optional[ActivationParameters], DefaultFromLibrary - ] = DefaultFromLibrary.YES - norm: Union[ - Optional[ConvNormalizationParameters], DefaultFromLibrary - ] = DefaultFromLibrary.YES - dropout: Union[Optional[PositiveFloat], DefaultFromLibrary] = DefaultFromLibrary.YES - bias: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES - adn_ordering: Union[str, DefaultFromLibrary] = DefaultFromLibrary.YES - - # pydantic config - model_config = ConfigDict( - validate_assignment=True, - use_enum_values=True, - validate_default=True, - ) - - -class ConvEncoderConfig(NetworkConfig, ConvEncoderOptions): - """Config class for ConvEncoder.""" - - spatial_dims: PositiveInt - in_channels: PositiveInt - - @computed_field - @property - def name(self) -> ImplementedNetworks: - """The name of the network.""" - return ImplementedNetworks.CONV_ENCODER diff --git a/clinicadl/monai_networks/config/mlp.py b/clinicadl/monai_networks/config/mlp.py deleted file mode 100644 index 5d12f303f..000000000 --- a/clinicadl/monai_networks/config/mlp.py +++ /dev/null @@ -1,52 +0,0 @@ -from typing import Optional, Sequence, Union - -from pydantic import BaseModel, ConfigDict, PositiveFloat, PositiveInt, computed_field - -from clinicadl.monai_networks.nn.layers.utils import ( - ActivationParameters, - NormalizationParameters, -) -from clinicadl.utils.factories import DefaultFromLibrary - -from .base import ImplementedNetworks, NetworkConfig - - -class MLPOptions(BaseModel): - """ - Config class for MLP when it is a submodule. - See for example: :py:class:`clinicadl.monai_networks.nn.cnn.CNN` - """ - - hidden_channels: Sequence[PositiveInt] - act: Union[ - Optional[ActivationParameters], DefaultFromLibrary - ] = DefaultFromLibrary.YES - output_act: Union[ - Optional[ActivationParameters], DefaultFromLibrary - ] = DefaultFromLibrary.YES - norm: Union[ - Optional[NormalizationParameters], DefaultFromLibrary - ] = DefaultFromLibrary.YES - dropout: Union[Optional[PositiveFloat], DefaultFromLibrary] = DefaultFromLibrary.YES - bias: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES - adn_ordering: Union[str, DefaultFromLibrary] = DefaultFromLibrary.YES - - # pydantic config - model_config = ConfigDict( - validate_assignment=True, - use_enum_values=True, - validate_default=True, - ) - - -class MLPConfig(NetworkConfig, MLPOptions): - """Config class for Multi Layer Perceptron.""" - - in_channels: PositiveInt - out_channels: PositiveInt - - @computed_field - @property - def name(self) -> ImplementedNetworks: - """The name of the network.""" - return ImplementedNetworks.MLP diff --git a/clinicadl/monai_networks/config/senet.py b/clinicadl/monai_networks/config/senet.py deleted file mode 100644 index 79a356726..000000000 --- a/clinicadl/monai_networks/config/senet.py +++ /dev/null @@ -1,60 +0,0 @@ -from typing import Union - -from pydantic import PositiveInt, computed_field - -from clinicadl.utils.factories import DefaultFromLibrary - -from .base import ImplementedNetworks, NetworkType, PreTrainedConfig -from .resnet import ResNetConfig - - -class SEResNetConfig(ResNetConfig): - """Config class for Squeeze-and-Excitation ResNet.""" - - se_reduction: Union[PositiveInt, DefaultFromLibrary] = DefaultFromLibrary.YES - - @computed_field - @property - def name(self) -> ImplementedNetworks: - """The name of the network.""" - return ImplementedNetworks.SE_RESNET - - -class PreTrainedSEResNetConfig(PreTrainedConfig): - """Base config class for SOTA SE-ResNets.""" - - @computed_field - @property - def _type(self) -> NetworkType: - """To know where to look for the network.""" - return NetworkType.SE_RESNET - - -class SEResNet50Config(PreTrainedSEResNetConfig): - """Config class for SE-ResNet-50.""" - - @computed_field - @property - def name(self) -> ImplementedNetworks: - """The name of the network.""" - return ImplementedNetworks.SE_RESNET_50 - - -class SEResNet101Config(PreTrainedSEResNetConfig): - """Config class for SE-ResNet-101.""" - - @computed_field - @property - def name(self) -> ImplementedNetworks: - """The name of the network.""" - return ImplementedNetworks.SE_RESNET_101 - - -class SEResNet152Config(PreTrainedSEResNetConfig): - """Config class for SE-ResNet-152.""" - - @computed_field - @property - def name(self) -> ImplementedNetworks: - """The name of the network.""" - return ImplementedNetworks.SE_RESNET_152 diff --git a/clinicadl/monai_networks/nn/__init__.py b/clinicadl/monai_networks/nn/__init__.py deleted file mode 100644 index 0e1c7054a..000000000 --- a/clinicadl/monai_networks/nn/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from .att_unet import AttentionUNet -from .autoencoder import AutoEncoder -from .cnn import CNN -from .conv_decoder import ConvDecoder -from .conv_encoder import ConvEncoder -from .densenet import DenseNet, get_densenet -from .generator import Generator -from .mlp import MLP -from .resnet import ResNet, get_resnet -from .senet import SEResNet, get_seresnet -from .unet import UNet -from .vae import VAE -from .vit import ViT, get_vit diff --git a/clinicadl/monai_networks/nn/att_unet.py b/clinicadl/monai_networks/nn/att_unet.py deleted file mode 100644 index 77ef02081..000000000 --- a/clinicadl/monai_networks/nn/att_unet.py +++ /dev/null @@ -1,207 +0,0 @@ -from typing import Any - -import torch -from monai.networks.nets.attentionunet import AttentionBlock - -from .layers.unet import ConvBlock, UpSample -from .unet import BaseUNet - - -class AttentionUNet(BaseUNet): - """ - Attention-UNet based on [Attention U-Net: Learning Where to Look for the Pancreas](https://arxiv.org/pdf/1804.03999). - - The user can customize the number of encoding blocks, the number of channels in each block, as well as other parameters - like the activation function. - - .. warning:: AttentionUNet works only with images whose dimensions are high enough powers of 2. More precisely, if n is the - number of max pooling operation in your AttentionUNet (which is equal to `len(channels)-1`), the image must have :math:`2^{k}` - pixels in each dimension, with :math:`k \\geq n` (e.g. shape (:math:`2^{n}`, :math:`2^{n+3}`) for a 2D image). - - Parameters - ---------- - spatial_dims : int - number of spatial dimensions of the input image. - in_channels : int - number of channels in the input image. - out_channels : int - number of output channels. - kwargs : Any - any optional argument accepted by (:py:class:`clinicadl.monai_networks.nn.unet.UNet`). - - Examples - -------- - >>> AttentionUNet( - spatial_dims=2, - in_channels=1, - out_channels=2, - channels=(4, 8), - act="elu", - output_act=("softmax", {"dim": 1}), - dropout=0.1, - ) - AttentionUNet( - (doubleconv): ConvBlock( - (0): Convolution( - (conv): Conv2d(1, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (adn): ADN( - (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - (1): Convolution( - (conv): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (adn): ADN( - (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - ) - (down1): DownBlock( - (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) - (doubleconv): ConvBlock( - (0): Convolution( - (conv): Conv2d(4, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (adn): ADN( - (N): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - (1): Convolution( - (conv): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (adn): ADN( - (N): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - ) - ) - (upsample1): UpSample( - (0): Upsample(scale_factor=2.0, mode='nearest') - (1): Convolution( - (conv): Conv2d(8, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (adn): ADN( - (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - ) - (attention1): AttentionBlock( - (W_g): Sequential( - (0): Convolution( - (conv): Conv2d(4, 2, kernel_size=(1, 1), stride=(1, 1)) - ) - (1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - ) - (W_x): Sequential( - (0): Convolution( - (conv): Conv2d(4, 2, kernel_size=(1, 1), stride=(1, 1)) - ) - (1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - ) - (psi): Sequential( - (0): Convolution( - (conv): Conv2d(2, 1, kernel_size=(1, 1), stride=(1, 1)) - ) - (1): BatchNorm2d(1, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (2): Sigmoid() - ) - (relu): ReLU() - ) - (doubleconv1): ConvBlock( - (0): Convolution( - (conv): Conv2d(8, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (adn): ADN( - (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - (1): Convolution( - (conv): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (adn): ADN( - (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - ) - (reduce_channels): Convolution( - (conv): Conv2d(4, 2, kernel_size=(1, 1), stride=(1, 1)) - ) - (output_act): Softmax(dim=1) - ) - """ - - def __init__( - self, - spatial_dims: int, - in_channels: int, - out_channels: int, - **kwargs: Any, - ): - super().__init__( - spatial_dims=spatial_dims, - in_channels=in_channels, - out_channels=out_channels, - **kwargs, - ) - - def _build_decoder(self): - for i in range(len(self.channels) - 1, 0, -1): - self.add_module( - f"upsample{i}", - UpSample( - spatial_dims=self.spatial_dims, - in_channels=self.channels[i], - out_channels=self.channels[i - 1], - act=self.act, - dropout=self.dropout, - ), - ) - self.add_module( - f"attention{i}", - AttentionBlock( - spatial_dims=self.spatial_dims, - f_l=self.channels[i - 1], - f_g=self.channels[i - 1], - f_int=self.channels[i - 1] // 2, - dropout=self.dropout, - ), - ) - self.add_module( - f"doubleconv{i}", - ConvBlock( - spatial_dims=self.spatial_dims, - in_channels=self.channels[i - 1] * 2, - out_channels=self.channels[i - 1], - act=self.act, - dropout=self.dropout, - ), - ) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x_history = [self.doubleconv(x)] - - for i in range(1, len(self.channels)): - x = self.get_submodule(f"down{i}")(x_history[-1]) - x_history.append(x) - - x_history.pop() # the output of bottelneck is not used as a gating signal - for i in range(len(self.channels) - 1, 0, -1): - up = self.get_submodule(f"upsample{i}")(x) - att_res = self.get_submodule(f"attention{i}")(g=x_history.pop(), x=up) - merged = torch.cat((att_res, up), dim=1) - x = self.get_submodule(f"doubleconv{i}")(merged) - - out = self.reduce_channels(x) - - if self.output_act is not None: - out = self.output_act(out) - - return out diff --git a/clinicadl/monai_networks/nn/autoencoder.py b/clinicadl/monai_networks/nn/autoencoder.py deleted file mode 100644 index 5cf823eeb..000000000 --- a/clinicadl/monai_networks/nn/autoencoder.py +++ /dev/null @@ -1,416 +0,0 @@ -from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Union - -import numpy as np -import torch.nn as nn - -from .cnn import CNN -from .conv_encoder import ConvEncoder -from .generator import Generator -from .layers.utils import ( - ActivationParameters, - PoolingLayer, - SingleLayerPoolingParameters, - SingleLayerUnpoolingParameters, - UnpoolingLayer, - UnpoolingMode, -) -from .mlp import MLP -from .utils import ( - calculate_conv_out_shape, - calculate_convtranspose_out_shape, - calculate_pool_out_shape, -) - - -class AutoEncoder(nn.Sequential): - """ - An autoencoder with convolutional and fully connected layers. - - The user must pass the arguments to build an encoder, from its convolutional and - fully connected parts, and the decoder will be automatically built by taking the - symmetrical network. - - More precisely, to build the decoder, the order of the encoding layers is reverted, convolutions are - replaced by transposed convolutions and pooling layers are replaced by either upsampling or transposed - convolution layers. - Please note that the order of `Activation`, `Dropout` and `Normalization`, defined with the - argument `adn_ordering` in `conv_args`, is the same for the encoder and the decoder. - - Note that an `AutoEncoder` is an aggregation of a `CNN` (:py:class:`clinicadl.monai_networks.nn. - cnn.CNN`) and a `Generator` (:py:class:`clinicadl.monai_networks.nn.generator.Generator`). - - Parameters - ---------- - in_shape : Sequence[int] - sequence of integers stating the dimension of the input tensor (minus batch dimension). - latent_size : int - size of the latent vector. - conv_args : Dict[str, Any] - the arguments for the convolutional part of the encoder. The arguments are those accepted - by :py:class:`clinicadl.monai_networks.nn.conv_encoder.ConvEncoder`, except `in_shape` that - is specified here. So, the only mandatory argument is `channels`. - mlp_args : Optional[Dict[str, Any]] (optional, default=None) - the arguments for the MLP part of the encoder . The arguments are those accepted by - :py:class:`clinicadl.monai_networks.nn.mlp.MLP`, except `in_channels` that is inferred - from the output of the convolutional part, and `out_channels` that is set to `latent_size`. - So, the only mandatory argument is `hidden_channels`.\n - If None, the MLP part will be reduced to a single linear layer. - out_channels : Optional[int] (optional, default=None) - number of output channels. If None, the output will have the same number of channels as the - input. - output_act : Optional[ActivationParameters] (optional, default=None) - a potential activation layer applied to the output of the network, and optionally its arguments. - Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n - `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, - `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] - (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional - arguments for each of them. - unpooling_mode : Union[str, UnpoolingMode] (optional, default=UnpoolingMode.NEAREST) - type of unpooling. Can be either `"nearest"`, `"linear"`, `"bilinear"`, `"bicubic"`, `"trilinear"` or - `"convtranspose"`.\n - - `nearest`: unpooling is performed by upsampling with the :italic:`nearest` algorithm (see [PyTorch's Upsample layer] - (https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html)). - - `linear`: unpooling is performed by upsampling with the :italic:`linear` algorithm. Only works with 1D images (excluding the - channel dimension). - - `bilinear`: unpooling is performed by upsampling with the :italic:`bilinear` algorithm. Only works with 2D images. - - `bicubic`: unpooling is performed by upsampling with the :italic:`bicubic` algorithm. Only works with 2D images. - - `trilinear`: unpooling is performed by upsampling with the :italic:`trilinear` algorithm. Only works with 3D images. - - `convtranspose`: unpooling is performed with a transposed convolution, whose parameters (kernel size, stride, etc.) are - computed to reverse the pooling operation. - - Examples - -------- - >>> AutoEncoder( - in_shape=(1, 16, 16), - latent_size=8, - conv_args={ - "channels": [2, 4], - "pooling_indices": [0], - "pooling": ("avg", {"kernel_size": 2}), - }, - mlp_args={"hidden_channels": [32], "output_act": "relu"}, - out_channels=2, - output_act="sigmoid", - unpooling_mode="bilinear", - ) - AutoEncoder( - (encoder): CNN( - (convolutions): ConvEncoder( - (layer0): Convolution( - (conv): Conv2d(1, 2, kernel_size=(3, 3), stride=(1, 1)) - (adn): ADN( - (N): InstanceNorm2d(2, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False) - (A): PReLU(num_parameters=1) - ) - ) - (pool0): AvgPool2d(kernel_size=2, stride=2, padding=0) - (layer1): Convolution( - (conv): Conv2d(2, 4, kernel_size=(3, 3), stride=(1, 1)) - ) - ) - (mlp): MLP( - (flatten): Flatten(start_dim=1, end_dim=-1) - (hidden0): Sequential( - (linear): Linear(in_features=100, out_features=32, bias=True) - (adn): ADN( - (N): BatchNorm1d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (A): PReLU(num_parameters=1) - ) - ) - (output): Sequential( - (linear): Linear(in_features=32, out_features=8, bias=True) - (output_act): ReLU() - ) - ) - ) - (decoder): Generator( - (mlp): MLP( - (flatten): Flatten(start_dim=1, end_dim=-1) - (hidden0): Sequential( - (linear): Linear(in_features=8, out_features=32, bias=True) - (adn): ADN( - (N): BatchNorm1d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (A): PReLU(num_parameters=1) - ) - ) - (output): Sequential( - (linear): Linear(in_features=32, out_features=100, bias=True) - (output_act): ReLU() - ) - ) - (reshape): Reshape() - (convolutions): ConvDecoder( - (layer0): Convolution( - (conv): ConvTranspose2d(4, 4, kernel_size=(3, 3), stride=(1, 1)) - (adn): ADN( - (N): InstanceNorm2d(4, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False) - (A): PReLU(num_parameters=1) - ) - ) - (unpool0): Upsample(size=(14, 14), mode=) - (layer1): Convolution( - (conv): ConvTranspose2d(4, 2, kernel_size=(3, 3), stride=(1, 1)) - ) - (output_act): Sigmoid() - ) - ) - ) - - """ - - def __init__( - self, - in_shape: Sequence[int], - latent_size: int, - conv_args: Dict[str, Any], - mlp_args: Optional[Dict[str, Any]] = None, - out_channels: Optional[int] = None, - output_act: Optional[ActivationParameters] = None, - unpooling_mode: Union[str, UnpoolingMode] = UnpoolingMode.NEAREST, - ) -> None: - super().__init__() - self.in_shape = in_shape - self.latent_size = latent_size - self.out_channels = out_channels if out_channels else self.in_shape[0] - self._output_act = output_act - self.unpooling_mode = self._check_unpooling_mode(unpooling_mode) - self.spatial_dims = len(in_shape[1:]) - - self.encoder = CNN( - in_shape=self.in_shape, - num_outputs=latent_size, - conv_args=conv_args, - mlp_args=mlp_args, - ) - inter_channels = ( - conv_args["channels"][-1] if len(conv_args["channels"]) > 0 else in_shape[0] - ) - inter_shape = (inter_channels, *self.encoder.convolutions.final_size) - self.decoder = Generator( - latent_size=latent_size, - start_shape=inter_shape, - conv_args=self._invert_conv_args(conv_args, self.encoder.convolutions), - mlp_args=self._invert_mlp_args(mlp_args, self.encoder.mlp), - ) - - @classmethod - def _invert_mlp_args(cls, args: Dict[str, Any], mlp: MLP) -> Dict[str, Any]: - """ - Inverts arguments passed for the MLP part of the encoder, to get the MLP part of - the decoder. - """ - if args is None: - args = {} - args["hidden_channels"] = cls._invert_list_arg(mlp.hidden_channels) - - return args - - def _invert_conv_args( - self, args: Dict[str, Any], conv: ConvEncoder - ) -> Dict[str, Any]: - """ - Inverts arguments passed for the convolutional part of the encoder, to get the convolutional - part of the decoder. - """ - if len(args["channels"]) == 0: - args["channels"] = [] - else: - args["channels"] = self._invert_list_arg(conv.channels[:-1]) + [ - self.out_channels - ] - args["kernel_size"] = self._invert_list_arg(conv.kernel_size) - args["stride"] = self._invert_list_arg(conv.stride) - args["dilation"] = self._invert_list_arg(conv.dilation) - args["padding"], args["output_padding"] = self._get_paddings_list(conv) - - args["unpooling_indices"] = ( - conv.n_layers - np.array(conv.pooling_indices) - 2 - ).astype(int) - args["unpooling"] = [] - sizes_before_pooling = [ - size - for size, (layer_name, _) in zip(conv.size_details, conv.named_children()) - if "pool" in layer_name - ] - for size, pooling in zip(sizes_before_pooling[::-1], conv.pooling[::-1]): - args["unpooling"].append(self._invert_pooling_layer(size, pooling)) - - if "pooling" in args: - del args["pooling"] - if "pooling_indices" in args: - del args["pooling_indices"] - - args["output_act"] = self._output_act if self._output_act else None - - return args - - @classmethod - def _invert_list_arg(cls, arg: Union[Any, List[Any]]) -> Union[Any, List[Any]]: - """ - Reverses lists. - """ - return list(arg[::-1]) if isinstance(arg, Sequence) else arg - - def _invert_pooling_layer( - self, - size_before_pool: Sequence[int], - pooling: SingleLayerPoolingParameters, - ) -> SingleLayerUnpoolingParameters: - """ - Gets the unpooling layer. - """ - if self.unpooling_mode == UnpoolingMode.CONV_TRANS: - return ( - UnpoolingLayer.CONV_TRANS, - self._invert_pooling_with_convtranspose(size_before_pool, pooling), - ) - else: - return ( - UnpoolingLayer.UPSAMPLE, - {"size": size_before_pool, "mode": self.unpooling_mode}, - ) - - @classmethod - def _invert_pooling_with_convtranspose( - cls, - size_before_pool: Sequence[int], - pooling: SingleLayerPoolingParameters, - ) -> Dict[str, Any]: - """ - Computes the arguments of the transposed convolution, based on the pooling layer. - """ - pooling_mode, pooling_args = pooling - if ( - pooling_mode == PoolingLayer.ADAPT_AVG - or pooling_mode == PoolingLayer.ADAPT_MAX - ): - input_size_np = np.array(size_before_pool) - output_size_np = np.array(pooling_args["output_size"]) - stride_np = input_size_np // output_size_np # adaptive pooling formulas - kernel_size_np = ( - input_size_np - (output_size_np - 1) * stride_np - ) # adaptive pooling formulas - args = { - "kernel_size": tuple(int(k) for k in kernel_size_np), - "stride": tuple(int(s) for s in stride_np), - } - padding, output_padding = cls._find_convtranspose_paddings( - pooling_mode, - size_before_pool, - output_size=pooling_args["output_size"], - **args, - ) - - elif pooling_mode == PoolingLayer.MAX or pooling_mode == PoolingLayer.AVG: - if "stride" not in pooling_args: - pooling_args["stride"] = pooling_args["kernel_size"] - args = { - arg: value - for arg, value in pooling_args.items() - if arg in ["kernel_size", "stride", "padding", "dilation"] - } - padding, output_padding = cls._find_convtranspose_paddings( - pooling_mode, - size_before_pool, - **pooling_args, - ) - - args["padding"] = padding # pylint: disable=possibly-used-before-assignment - args["output_padding"] = output_padding # pylint: disable=possibly-used-before-assignment - - return args - - @classmethod - def _get_paddings_list(cls, conv: ConvEncoder) -> List[Tuple[int, ...]]: - """ - Finds output padding list. - """ - padding = [] - output_padding = [] - size_before_convs = [ - size - for size, (layer_name, _) in zip(conv.size_details, conv.named_children()) - if "layer" in layer_name - ] - for size, k, s, p, d in zip( - size_before_convs, - conv.kernel_size, - conv.stride, - conv.padding, - conv.dilation, - ): - p, out_p = cls._find_convtranspose_paddings( - "conv", size, kernel_size=k, stride=s, padding=p, dilation=d - ) - padding.append(p) - output_padding.append(out_p) - - return cls._invert_list_arg(padding), cls._invert_list_arg(output_padding) - - @classmethod - def _find_convtranspose_paddings( - cls, - layer_type: Union[Literal["conv"], PoolingLayer], - in_shape: Union[Sequence[int], int], - padding: Union[Sequence[int], int] = 0, - **kwargs, - ) -> Tuple[Tuple[int, ...], Tuple[int, ...]]: - """ - Finds padding and output padding necessary to recover the right image size after - a transposed convolution. - """ - if layer_type == "conv": - layer_out_shape = calculate_conv_out_shape(in_shape, **kwargs) - elif layer_type in list(PoolingLayer): - layer_out_shape = calculate_pool_out_shape(layer_type, in_shape, **kwargs) - - convt_out_shape = calculate_convtranspose_out_shape(layer_out_shape, **kwargs) # pylint: disable=possibly-used-before-assignment - output_padding = np.atleast_1d(in_shape) - np.atleast_1d(convt_out_shape) - - if ( - output_padding < 0 - ).any(): # can happen with ceil_mode=True for maxpool. Then, add some padding - padding = np.atleast_1d(padding) * np.ones_like( - output_padding - ) # to have the same shape as output_padding - padding[output_padding < 0] += np.maximum(np.abs(output_padding) // 2, 1)[ - output_padding < 0 - ] # //2 because 2*padding pixels are removed - - convt_out_shape = calculate_convtranspose_out_shape( - layer_out_shape, padding=padding, **kwargs - ) - output_padding = np.atleast_1d(in_shape) - np.atleast_1d(convt_out_shape) - padding = tuple(int(s) for s in padding) - - return padding, tuple(int(s) for s in output_padding) - - def _check_unpooling_mode( - self, unpooling_mode: Union[str, UnpoolingMode] - ) -> UnpoolingMode: - """ - Checks consistency between data shape and unpooling mode. - """ - unpooling_mode = UnpoolingMode(unpooling_mode) - if unpooling_mode == UnpoolingMode.LINEAR and len(self.in_shape) != 2: - raise ValueError( - f"unpooling mode `linear` only works with 2D data (counting the channel dimension). " - f"Got in_shape={self.in_shape}, which is understood as {len(self.in_shape)}D data." - ) - elif unpooling_mode == UnpoolingMode.BILINEAR and len(self.in_shape) != 3: - raise ValueError( - f"unpooling mode `bilinear` only works with 3D data (counting the channel dimension). " - f"Got in_shape={self.in_shape}, which is understood as {len(self.in_shape)}D data." - ) - elif unpooling_mode == UnpoolingMode.BICUBIC and len(self.in_shape) != 3: - raise ValueError( - f"unpooling mode `bicubic` only works with 3D data (counting the channel dimension). " - f"Got in_shape={self.in_shape}, which is understood as {len(self.in_shape)}D data." - ) - elif unpooling_mode == UnpoolingMode.TRILINEAR and len(self.in_shape) != 4: - raise ValueError( - f"unpooling mode `trilinear` only works with 4D data (counting the channel dimension). " - f"Got in_shape={self.in_shape}, which is understood as {len(self.in_shape)}D data." - ) - - return unpooling_mode diff --git a/clinicadl/monai_networks/nn/cnn.py b/clinicadl/monai_networks/nn/cnn.py deleted file mode 100644 index 1479ecaea..000000000 --- a/clinicadl/monai_networks/nn/cnn.py +++ /dev/null @@ -1,124 +0,0 @@ -from typing import Any, Dict, Optional, Sequence - -import numpy as np -import torch.nn as nn - -from .conv_encoder import ConvEncoder -from .mlp import MLP -from .utils import check_conv_args, check_mlp_args - - -class CNN(nn.Sequential): - """ - A regressor/classifier with first convolutional layers and then fully connected layers. - - This network is a simple aggregation of a Fully Convolutional Network (:py:class:`clinicadl. - monai_networks.nn.conv_encoder.ConvEncoder`) and a Multi Layer Perceptron (:py:class:`clinicadl. - monai_networks.nn.mlp.MLP`). - - Parameters - ---------- - in_shape : Sequence[int] - sequence of integers stating the dimension of the input tensor (minus batch dimension). - num_outputs : int - number of variables to predict. - conv_args : Dict[str, Any] - the arguments for the convolutional part. The arguments are those accepted by - :py:class:`clinicadl.monai_networks.nn.conv_encoder.ConvEncoder`, except `in_shape` - that is specified here. So, the only mandatory argument is `channels`. - mlp_args : Optional[Dict[str, Any]] (optional, default=None) - the arguments for the MLP part. The arguments are those accepted by - :py:class:`clinicadl.monai_networks.nn.mlp.MLP`, except `in_channels` that is inferred - from the output of the convolutional part, and `out_channels` that is set to `num_outputs`. - So, the only mandatory argument is `hidden_channels`.\n - If None, the MLP part will be reduced to a single linear layer. - - Examples - -------- - # a classifier - >>> CNN( - in_shape=(1, 10, 10), - num_outputs=2, - conv_args={"channels": [2, 4], "norm": None, "act": None}, - mlp_args={"hidden_channels": [5], "act": "elu", "norm": None, "output_act": "softmax"}, - ) - CNN( - (convolutions): ConvEncoder( - (layer0): Convolution( - (conv): Conv2d(1, 2, kernel_size=(3, 3), stride=(1, 1)) - ) - (layer1): Convolution( - (conv): Conv2d(2, 4, kernel_size=(3, 3), stride=(1, 1)) - ) - ) - (mlp): MLP( - (flatten): Flatten(start_dim=1, end_dim=-1) - (hidden0): Sequential( - (linear): Linear(in_features=144, out_features=5, bias=True) - (adn): ADN( - (A): ELU(alpha=1.0) - ) - ) - (output): Sequential( - (linear): Linear(in_features=5, out_features=2, bias=True) - (output_act): Softmax(dim=None) - ) - ) - ) - - # a regressor - >>> CNN( - in_shape=(1, 10, 10), - num_outputs=2, - conv_args={"channels": [2, 4], "norm": None, "act": None}, - ) - CNN( - (convolutions): ConvEncoder( - (layer0): Convolution( - (conv): Conv2d(1, 2, kernel_size=(3, 3), stride=(1, 1)) - ) - (layer1): Convolution( - (conv): Conv2d(2, 4, kernel_size=(3, 3), stride=(1, 1)) - ) - ) - (mlp): MLP( - (flatten): Flatten(start_dim=1, end_dim=-1) - (output): Linear(in_features=144, out_features=2, bias=True) - ) - ) - """ - - def __init__( - self, - in_shape: Sequence[int], - num_outputs: int, - conv_args: Dict[str, Any], - mlp_args: Optional[Dict[str, Any]] = None, - ) -> None: - super().__init__() - check_conv_args(conv_args) - check_mlp_args(mlp_args) - self.in_shape = in_shape - self.num_outputs = num_outputs - - in_channels, *input_size = in_shape - spatial_dims = len(input_size) - - self.convolutions = ConvEncoder( - in_channels=in_channels, - spatial_dims=spatial_dims, - _input_size=tuple(input_size), - **conv_args, - ) - - n_channels = ( - conv_args["channels"][-1] if len(conv_args["channels"]) > 0 else in_shape[0] - ) - flatten_shape = int(np.prod(self.convolutions.final_size) * n_channels) - if mlp_args is None: - mlp_args = {"hidden_channels": []} - self.mlp = MLP( - in_channels=flatten_shape, - out_channels=num_outputs, - **mlp_args, - ) diff --git a/clinicadl/monai_networks/nn/conv_decoder.py b/clinicadl/monai_networks/nn/conv_decoder.py deleted file mode 100644 index 28c9be96f..000000000 --- a/clinicadl/monai_networks/nn/conv_decoder.py +++ /dev/null @@ -1,388 +0,0 @@ -from typing import Callable, Optional, Sequence, Tuple - -import torch.nn as nn -from monai.networks.blocks import Convolution -from monai.networks.layers.utils import get_act_layer -from monai.utils.misc import ensure_tuple - -from .layers.unpool import get_unpool_layer -from .layers.utils import ( - ActFunction, - ActivationParameters, - ConvNormalizationParameters, - ConvNormLayer, - ConvParameters, - NormLayer, - SingleLayerUnpoolingParameters, - UnpoolingLayer, - UnpoolingParameters, -) -from .utils import ( - calculate_convtranspose_out_shape, - calculate_unpool_out_shape, - check_adn_ordering, - check_norm_layer, - check_pool_indices, - ensure_list_of_tuples, -) - - -class ConvDecoder(nn.Sequential): - """ - Fully convolutional decoder network with transposed convolutions, unpooling, normalization, activation - and dropout layers. - - Parameters - ---------- - spatial_dims : int - number of spatial dimensions of the input image. - in_channels : int - number of channels in the input image. - channels : Sequence[int] - sequence of integers stating the output channels of each transposed convolution. Thus, this - parameter also controls the number of transposed convolutions. - kernel_size : ConvParameters (optional, default=3) - the kernel size of the transposed convolutions. Can be an integer, a tuple or a list.\n - If integer, the value will be used for all layers and all dimensions.\n - If tuple (of integers), it will be interpreted as the values for each dimension. These values - will be used for all the layers.\n - If list (of tuples or integers), it will be interpreted as the kernel sizes for each layer. - The length of the list must be equal to the number of transposed convolution layers (i.e. - `len(channels)`). - stride : ConvParameters (optional, default=1) - the stride of the transposed convolutions. Can be an integer, a tuple or a list.\n - If integer, the value will be used for all layers and all dimensions.\n - If tuple (of integers), it will be interpreted as the values for each dimension. These values - will be used for all the layers.\n - If list (of tuples or integers), it will be interpreted as the strides for each layer. - The length of the list must be equal to the number of transposed convolution layers (i.e. - `len(channels)`). - padding : ConvParameters (optional, default=0) - the padding of the transposed convolutions. Can be an integer, a tuple or a list.\n - If integer, the value will be used for all layers and all dimensions.\n - If tuple (of integers), it will be interpreted as the values for each dimension. These values - will be used for all the layers.\n - If list (of tuples or integers), it will be interpreted as the paddings for each layer. - The length of the list must be equal to the number of transposed convolution layers (i.e. - `len(channels)`). - output_padding : ConvParameters (optional, default=0) - the output padding of the transposed convolutions. Can be an integer, a tuple or a list.\n - If integer, the value will be used for all layers and all dimensions.\n - If tuple (of integers), it will be interpreted as the values for each dimension. These values - will be used for all the layers.\n - If list (of tuples or integers), it will be interpreted as the output paddings for each layer. - The length of the list must be equal to the number of transposed convolution layers (i.e. - `len(channels)`). - dilation : ConvParameters (optional, default=1) - the dilation factor of the transposed convolutions. Can be an integer, a tuple or a list.\n - If integer, the value will be used for all layers and all dimensions.\n - If tuple (of integers), it will be interpreted as the values for each dimension. These values - will be used for all the layers.\n - If list (of tuples or integers), it will be interpreted as the dilations for each layer. - The length of the list must be equal to the number of transposed convolution layers (i.e. - `len(channels)`). - unpooling : Optional[UnpoolingParameters] (optional, default=(UnpoolingLayer.UPSAMPLE, {"scale_factor": 2})) - the unpooling mode and the arguments of the unpooling layer, passed as `(unpooling_mode, arguments)`. - If None, no unpooling will be performed in the network.\n - `unpooling_mode` can be either `upsample` or `convtranspose`. Please refer to PyTorch's [Upsample] - (https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html) or [ConvTranspose](https:// - pytorch.org/docs/stable/generated/torch.nn.ConvTranspose2d.html) to know the mandatory and optional - arguments.\n - If a list is passed, it will be understood as `(unpooling_mode, arguments)` for each unpooling layer.\n - Note: no need to pass `in_channels` and `out_channels` for `convtranspose` because the unpooling - layers are not intended to modify the number of channels. - unpooling_indices : Optional[Sequence[int]] (optional, default=None) - indices of the transposed convolution layers after which unpooling should be performed. - If None, no unpooling will be performed. An index equal to -1 will be understood as a pooling layer before - the first transposed convolution. - act : Optional[ActivationParameters] (optional, default=ActFunction.PRELU) - the activation function used after a transposed convolution layer, and optionally its arguments. - Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n - `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, - `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] - (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional - arguments for each of them. - output_act : Optional[ActivationParameters] (optional, default=None) - a potential activation layer applied to the output of the network. Should be pass in the same way as `act`. - If None, no last activation will be applied. - norm : Optional[ConvNormalizationParameters] (optional, default=NormLayer.INSTANCE) - the normalization type used after a transposed convolution layer, and optionally the arguments of the normalization - layer. Should be passed as `norm_type` or `(norm_type, parameters)`. If None, no normalization will be - performed.\n - `norm_type` can be any value in {`batch`, `group`, `instance`, `syncbatch`}. Please refer to PyTorch's - [normalization layers](https://pytorch.org/docs/stable/nn.html#normalization-layers) to know the mandatory and - optional arguments for each of them.\n - Please note that arguments `num_channels`, `num_features` of the normalization layer - should not be passed, as they are automatically inferred from the output of the previous layer in the network. - dropout : Optional[float] (optional, default=None) - dropout ratio. If None, no dropout. - bias : bool (optional, default=True) - whether to have a bias term in transposed convolutions. - adn_ordering : str (optional, default="NDA") - order of operations `Activation`, `Dropout` and `Normalization` after a transposed convolutional layer (except the - last one).\n - For example if "ND" is passed, `Normalization` and then `Dropout` will be performed (without `Activation`).\n - Note: ADN will not be applied after the last convolution. - - Examples - -------- - >>> ConvDecoder( - in_channels=16, - spatial_dims=2, - channels=[8, 4, 1], - kernel_size=(3, 5), - stride=2, - padding=[1, 0, 0], - output_padding=[0, 0, (1, 2)], - dilation=1, - unpooling=[("upsample", {"scale_factor": 2}), ("upsample", {"size": (32, 32)})], - unpooling_indices=[0, 1], - act="elu", - output_act="relu", - norm=("batch", {"eps": 1e-05}), - dropout=0.1, - bias=True, - adn_ordering="NDA", - ) - ConvDecoder( - (layer0): Convolution( - (conv): ConvTranspose2d(16, 8, kernel_size=(3, 5), stride=(2, 2), padding=(1, 1)) - (adn): ADN( - (N): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - (unpool0): Upsample(scale_factor=2.0, mode='nearest') - (layer1): Convolution( - (conv): ConvTranspose2d(8, 4, kernel_size=(3, 5), stride=(2, 2)) - (adn): ADN( - (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - (unpool1): Upsample(size=(32, 32), mode='nearest') - (layer2): Convolution( - (conv): ConvTranspose2d(4, 1, kernel_size=(3, 5), stride=(2, 2), output_padding=(1, 2)) - ) - (output_act): ReLU() - ) - - """ - - def __init__( - self, - spatial_dims: int, - in_channels: int, - channels: Sequence[int], - kernel_size: ConvParameters = 3, - stride: ConvParameters = 1, - padding: ConvParameters = 0, - output_padding: ConvParameters = 0, - dilation: ConvParameters = 1, - unpooling: Optional[UnpoolingParameters] = ( - UnpoolingLayer.UPSAMPLE, - {"scale_factor": 2}, - ), - unpooling_indices: Optional[Sequence[int]] = None, - act: Optional[ActivationParameters] = ActFunction.PRELU, - output_act: Optional[ActivationParameters] = None, - norm: Optional[ConvNormalizationParameters] = ConvNormLayer.INSTANCE, - dropout: Optional[float] = None, - bias: bool = True, - adn_ordering: str = "NDA", - _input_size: Optional[Sequence[int]] = None, - ) -> None: - super().__init__() - - self._current_size = _input_size if _input_size else None - - self.spatial_dims = spatial_dims - self.in_channels = in_channels - self.channels = ensure_tuple(channels) - self.n_layers = len(self.channels) - - self.kernel_size = ensure_list_of_tuples( - kernel_size, self.spatial_dims, self.n_layers, "kernel_size" - ) - self.stride = ensure_list_of_tuples( - stride, self.spatial_dims, self.n_layers, "stride" - ) - self.padding = ensure_list_of_tuples( - padding, self.spatial_dims, self.n_layers, "padding" - ) - self.output_padding = ensure_list_of_tuples( - output_padding, self.spatial_dims, self.n_layers, "output_padding" - ) - self.dilation = ensure_list_of_tuples( - dilation, self.spatial_dims, self.n_layers, "dilation" - ) - - self.unpooling_indices = check_pool_indices(unpooling_indices, self.n_layers) - self.unpooling = self._check_unpool_layers(unpooling) - self.act = act - self.norm = check_norm_layer(norm) - if self.norm == NormLayer.LAYER: - raise ValueError("Layer normalization not implemented in ConvDecoder.") - self.dropout = dropout - self.bias = bias - self.adn_ordering = check_adn_ordering(adn_ordering) - - n_unpoolings = 0 - if self.unpooling and -1 in self.unpooling_indices: - unpooling_layer = self._get_unpool_layer( - self.unpooling[n_unpoolings], n_channels=self.in_channels - ) - self.add_module("init_unpool", unpooling_layer) - n_unpoolings += 1 - - echannel = self.in_channels - for i, (c, k, s, p, o_p, d) in enumerate( - zip( - self.channels, - self.kernel_size, - self.stride, - self.padding, - self.output_padding, - self.dilation, - ) - ): - conv_layer = self._get_convtranspose_layer( - in_channels=echannel, - out_channels=c, - kernel_size=k, - stride=s, - padding=p, - output_padding=o_p, - dilation=d, - is_last=(i == len(channels) - 1), - ) - self.add_module(f"layer{i}", conv_layer) - echannel = c # use the output channel number as the input for the next loop - if self.unpooling and i in self.unpooling_indices: - unpooling_layer = self._get_unpool_layer( - self.unpooling[n_unpoolings], n_channels=c - ) - self.add_module(f"unpool{i}", unpooling_layer) - n_unpoolings += 1 - - self.output_act = get_act_layer(output_act) if output_act else None - - @property - def final_size(self): - """ - To know the size of an image at the end of the network. - """ - return self._current_size - - @final_size.setter - def final_size(self, fct: Callable[[Tuple[int, ...]], Tuple[int, ...]]): - """ - Takes as input the function used to update the current image size. - """ - if self._current_size is not None: - self._current_size = fct(self._current_size) - - def _get_convtranspose_layer( - self, - in_channels: int, - out_channels: int, - kernel_size: Tuple[int, ...], - stride: Tuple[int, ...], - padding: Tuple[int, ...], - output_padding: Tuple[int, ...], - dilation: Tuple[int, ...], - is_last: bool, - ) -> Convolution: - """ - Gets the parametrized TransposedConvolution-ADN block and updates the current output size. - """ - self.final_size = lambda size: calculate_convtranspose_out_shape( - size, kernel_size, stride, padding, output_padding, dilation - ) - - return Convolution( - is_transposed=True, - conv_only=is_last, - spatial_dims=self.spatial_dims, - in_channels=in_channels, - out_channels=out_channels, - strides=stride, - kernel_size=kernel_size, - padding=padding, - output_padding=output_padding, - dilation=dilation, - act=self.act, - norm=self.norm, - dropout=self.dropout, - bias=self.bias, - adn_ordering=self.adn_ordering, - ) - - def _get_unpool_layer( - self, unpooling: SingleLayerUnpoolingParameters, n_channels: int - ) -> nn.Module: - """ - Gets the parametrized unpooling layer and updates the current output size. - """ - unpool_layer = get_unpool_layer( - unpooling, - spatial_dims=self.spatial_dims, - in_channels=n_channels, - out_channels=n_channels, - ) - self.final_size = lambda size: calculate_unpool_out_shape( - unpool_mode=unpooling[0], - in_shape=size, - **unpool_layer.__dict__, - ) - return unpool_layer - - @classmethod - def _check_single_unpool_layer( - cls, unpooling: SingleLayerUnpoolingParameters - ) -> SingleLayerUnpoolingParameters: - """ - Checks unpooling arguments for a single pooling layer. - """ - if not isinstance(unpooling, tuple) or len(unpooling) != 2: - raise ValueError( - "unpooling must be double (or a list of doubles) with first the type of unpooling and then the parameters of " - f"the unpooling layer in a dict. Got {unpooling}" - ) - _ = UnpoolingLayer(unpooling[0]) # check unpooling mode - args = unpooling[1] - if not isinstance(args, dict): - raise ValueError( - f"The arguments of the unpooling layer must be passed in a dict. Got {args}" - ) - - return unpooling - - def _check_unpool_layers( - self, unpooling: UnpoolingParameters - ) -> UnpoolingParameters: - """ - Checks argument unpooling. - """ - if unpooling is None: - return unpooling - if isinstance(unpooling, list): - for unpool_layer in unpooling: - self._check_single_unpool_layer(unpool_layer) - if len(unpooling) != len(self.unpooling_indices): - raise ValueError( - "If you pass a list for unpooling, the size of that list must match " - f"the size of unpooling_indices. Got: unpooling={unpooling} and " - f"unpooling_indices={self.unpooling_indices}" - ) - elif isinstance(unpooling, tuple): - self._check_single_unpool_layer(unpooling) - unpooling = (unpooling,) * len(self.unpooling_indices) - else: - raise ValueError( - f"unpooling can be either None, a double (string, dictionary) or a list of such doubles. Got {unpooling}" - ) - - return unpooling diff --git a/clinicadl/monai_networks/nn/conv_encoder.py b/clinicadl/monai_networks/nn/conv_encoder.py deleted file mode 100644 index f3ec66484..000000000 --- a/clinicadl/monai_networks/nn/conv_encoder.py +++ /dev/null @@ -1,392 +0,0 @@ -from typing import Callable, List, Optional, Sequence, Tuple - -import numpy as np -import torch.nn as nn -from monai.networks.blocks import Convolution -from monai.networks.layers.utils import get_act_layer, get_pool_layer -from monai.utils.misc import ensure_tuple - -from .layers.utils import ( - ActFunction, - ActivationParameters, - ConvNormalizationParameters, - ConvNormLayer, - ConvParameters, - NormLayer, - PoolingLayer, - PoolingParameters, - SingleLayerPoolingParameters, -) -from .utils import ( - calculate_conv_out_shape, - calculate_pool_out_shape, - check_adn_ordering, - check_norm_layer, - check_pool_indices, - ensure_list_of_tuples, -) - - -class ConvEncoder(nn.Sequential): - """ - Fully convolutional encoder network with convolutional, pooling, normalization, activation - and dropout layers. - - Parameters - ---------- - spatial_dims : int - number of spatial dimensions of the input image. - in_channels : int - number of channels in the input image. - channels : Sequence[int] - sequence of integers stating the output channels of each convolutional layer. Thus, this - parameter also controls the number of convolutional layers. - kernel_size : ConvParameters (optional, default=3) - the kernel size of the convolutional layers. Can be an integer, a tuple or a list.\n - If integer, the value will be used for all layers and all dimensions.\n - If tuple (of integers), it will be interpreted as the values for each dimension. These values - will be used for all the layers.\n - If list (of tuples or integers), it will be interpreted as the kernel sizes for each layer. - The length of the list must be equal to the number of convolutional layers (i.e. `len(channels)`). - stride : ConvParameters (optional, default=1) - the stride of the convolutional layers. Can be an integer, a tuple or a list.\n - If integer, the value will be used for all layers and all dimensions.\n - If tuple (of integers), it will be interpreted as the values for each dimension. These values - will be used for all the layers.\n - If list (of tuples or integers), it will be interpreted as the strides for each layer. - The length of the list must be equal to the number of convolutional layers (i.e. `len(channels)`). - padding : ConvParameters (optional, default=0) - the padding of the convolutional layers. Can be an integer, a tuple or a list.\n - If integer, the value will be used for all layers and all dimensions.\n - If tuple (of integers), it will be interpreted as the values for each dimension. These values - will be used for all the layers.\n - If list (of tuples or integers), it will be interpreted as the paddings for each layer. - The length of the list must be equal to the number of convolutional layers (i.e. `len(channels)`). - dilation : ConvParameters (optional, default=1) - the dilation factor of the convolutional layers. Can be an integer, a tuple or a list.\n - If integer, the value will be used for all layers and all dimensions.\n - If tuple (of integers), it will be interpreted as the values for each dimension. These values - will be used for all the layers.\n - If list (of tuples or integers), it will be interpreted as the dilations for each layer. - The length of the list must be equal to the number of convolutional layers (i.e. `len(channels)`). - pooling : Optional[PoolingParameters] (optional, default=(PoolingLayer.MAX, {"kernel_size": 2})) - the pooling mode and the arguments of the pooling layer, passed as `(pooling_mode, arguments)`. - If None, no pooling will be performed in the network.\n - `pooling_mode` can be either `max`, `avg`, `adaptivemax` or `adaptiveavg`. Please refer to PyTorch's [documentation] - (https://pytorch.org/docs/stable/nn.html#pooling-layers) to know the mandatory and optional arguments.\n - If a list is passed, it will be understood as `(pooling_mode, arguments)` for each pooling layer. - pooling_indices : Optional[Sequence[int]] (optional, default=None) - indices of the convolutional layers after which pooling should be performed. - If None, no pooling will be performed. An index equal to -1 will be understood as an unpooling layer before - the first convolution. - act : Optional[ActivationParameters] (optional, default=ActFunction.PRELU) - the activation function used after a convolutional layer, and optionally its arguments. - Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n - `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, - `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] - (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional - arguments for each of them. - output_act : Optional[ActivationParameters] (optional, default=None) - a potential activation layer applied to the output of the network. Should be pass in the same way as `act`. - If None, no last activation will be applied. - norm : Optional[ConvNormalizationParameters] (optional, default=NormLayer.INSTANCE) - the normalization type used after a convolutional layer, and optionally the arguments of the normalization - layer. Should be passed as `norm_type` or `(norm_type, parameters)`. If None, no normalization will be - performed.\n - `norm_type` can be any value in {`batch`, `group`, `instance`, `syncbatch`}. Please refer to PyTorch's - [normalization layers](https://pytorch.org/docs/stable/nn.html#normalization-layers) to know the mandatory and - optional arguments for each of them.\n - Please note that arguments `num_channels`, `num_features` of the normalization layer - should not be passed, as they are automatically inferred from the output of the previous layer in the network. - dropout : Optional[float] (optional, default=None) - dropout ratio. If None, no dropout. - bias : bool (optional, default=True) - whether to have a bias term in convolutions. - adn_ordering : str (optional, default="NDA") - order of operations `Activation`, `Dropout` and `Normalization` after a convolutional layer (except the last - one). - For example if "ND" is passed, `Normalization` and then `Dropout` will be performed (without `Activation`).\n - Note: ADN will not be applied after the last convolution. - - Examples - -------- - >>> ConvEncoder( - spatial_dims=2, - in_channels=1, - channels=[2, 4, 8], - kernel_size=(3, 5), - stride=1, - padding=[1, (0, 1), 0], - dilation=1, - pooling=[("max", {"kernel_size": 2}), ("avg", {"kernel_size": 2})], - pooling_indices=[0, 1], - act="elu", - output_act="relu", - norm=("batch", {"eps": 1e-05}), - dropout=0.1, - bias=True, - adn_ordering="NDA", - ) - ConvEncoder( - (layer0): Convolution( - (conv): Conv2d(1, 2, kernel_size=(3, 5), stride=(1, 1), padding=(1, 1)) - (adn): ADN( - (N): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - (pool0): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) - (layer1): Convolution( - (conv): Conv2d(2, 4, kernel_size=(3, 5), stride=(1, 1), padding=(0, 1)) - (adn): ADN( - (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - (pool1): AvgPool2d(kernel_size=2, stride=2, padding=0) - (layer2): Convolution( - (conv): Conv2d(4, 8, kernel_size=(3, 5), stride=(1, 1)) - ) - (output_act): ReLU() - ) - - """ - - def __init__( - self, - spatial_dims: int, - in_channels: int, - channels: Sequence[int], - kernel_size: ConvParameters = 3, - stride: ConvParameters = 1, - padding: ConvParameters = 0, - dilation: ConvParameters = 1, - pooling: Optional[PoolingParameters] = ( - PoolingLayer.MAX, - {"kernel_size": 2}, - ), - pooling_indices: Optional[Sequence[int]] = None, - act: Optional[ActivationParameters] = ActFunction.PRELU, - output_act: Optional[ActivationParameters] = None, - norm: Optional[ConvNormalizationParameters] = ConvNormLayer.INSTANCE, - dropout: Optional[float] = None, - bias: bool = True, - adn_ordering: str = "NDA", - _input_size: Optional[Sequence[int]] = None, - ) -> None: - super().__init__() - - self._current_size = _input_size if _input_size else None - self._size_details = [self._current_size] if _input_size else None - - self.spatial_dims = spatial_dims - self.in_channels = in_channels - self.channels = ensure_tuple(channels) - self.n_layers = len(self.channels) - - self.kernel_size = ensure_list_of_tuples( - kernel_size, self.spatial_dims, self.n_layers, "kernel_size" - ) - self.stride = ensure_list_of_tuples( - stride, self.spatial_dims, self.n_layers, "stride" - ) - self.padding = ensure_list_of_tuples( - padding, self.spatial_dims, self.n_layers, "padding" - ) - self.dilation = ensure_list_of_tuples( - dilation, self.spatial_dims, self.n_layers, "dilation" - ) - - self.pooling_indices = check_pool_indices(pooling_indices, self.n_layers) - self.pooling = self._check_pool_layers(pooling) - self.act = act - self.norm = check_norm_layer(norm) - if self.norm == NormLayer.LAYER: - raise ValueError("Layer normalization not implemented in ConvEncoder.") - self.dropout = dropout - self.bias = bias - self.adn_ordering = check_adn_ordering(adn_ordering) - - n_poolings = 0 - if self.pooling and -1 in self.pooling_indices: - pooling_layer = self._get_pool_layer(self.pooling[n_poolings]) - self.add_module("init_pool", pooling_layer) - n_poolings += 1 - - echannel = self.in_channels - for i, (c, k, s, p, d) in enumerate( - zip( - self.channels, - self.kernel_size, - self.stride, - self.padding, - self.dilation, - ) - ): - conv_layer = self._get_conv_layer( - in_channels=echannel, - out_channels=c, - kernel_size=k, - stride=s, - padding=p, - dilation=d, - is_last=(i == len(channels) - 1), - ) - self.add_module(f"layer{i}", conv_layer) - echannel = c # use the output channel number as the input for the next loop - if self.pooling and i in self.pooling_indices: - pooling_layer = self._get_pool_layer(self.pooling[n_poolings]) - self.add_module(f"pool{i}", pooling_layer) - n_poolings += 1 - - self.output_act = get_act_layer(output_act) if output_act else None - - @property - def final_size(self): - """ - To know the size of an image at the end of the network. - """ - return self._current_size - - @property - def size_details(self): - """ - To know the sizes of intermediate images. - """ - return self._size_details - - @final_size.setter - def final_size(self, fct: Callable[[Tuple[int, ...]], Tuple[int, ...]]): - """ - Takes as input the function used to update the current image size. - """ - if self._current_size is not None: - self._current_size = fct(self._current_size) - self._size_details.append(self._current_size) - self._check_size() - - def _get_conv_layer( - self, - in_channels: int, - out_channels: int, - kernel_size: Tuple[int, ...], - stride: Tuple[int, ...], - padding: Tuple[int, ...], - dilation: Tuple[int, ...], - is_last: bool, - ) -> Convolution: - """ - Gets the parametrized Convolution-ADN block and updates the current output size. - """ - self.final_size = lambda size: calculate_conv_out_shape( - size, kernel_size, stride, padding, dilation - ) - - return Convolution( - conv_only=is_last, - spatial_dims=self.spatial_dims, - in_channels=in_channels, - out_channels=out_channels, - strides=stride, - kernel_size=kernel_size, - padding=padding, - dilation=dilation, - act=self.act, - norm=self.norm, - dropout=self.dropout, - bias=self.bias, - adn_ordering=self.adn_ordering, - ) - - def _get_pool_layer(self, pooling: SingleLayerPoolingParameters) -> nn.Module: - """ - Gets the parametrized pooling layer and updates the current output size. - """ - pool_layer = get_pool_layer(pooling, spatial_dims=self.spatial_dims) - old_size = self.final_size - self.final_size = lambda size: calculate_pool_out_shape( - pool_mode=pooling[0], in_shape=size, **pool_layer.__dict__ - ) - - if ( - self.final_size is not None - and (np.array(old_size) < np.array(self.final_size)).any() - ): - raise ValueError( - f"You passed {pooling} as a pooling layer. But before this layer, the size of the image " - f"was {old_size}. So, pooling can't be performed." - ) - - return pool_layer - - def _check_size(self) -> None: - """ - Checks that image size never reaches 0. - """ - if self._current_size is not None and (np.array(self._current_size) <= 0).any(): - raise ValueError( - f"Failed to build the network. An image of size 0 or less has been reached. Stopped at:\n {self}" - ) - - @classmethod - def _check_single_pool_layer( - cls, pooling: SingleLayerPoolingParameters - ) -> SingleLayerPoolingParameters: - """ - Checks pooling arguments for a single pooling layer. - """ - if not isinstance(pooling, tuple) or len(pooling) != 2: - raise ValueError( - "pooling must be a double (or a list of doubles) with first the type of pooling and then the parameters " - f"of the pooling layer in a dict. Got {pooling}" - ) - pooling_type = PoolingLayer(pooling[0]) - args = pooling[1] - if not isinstance(args, dict): - raise ValueError( - f"The arguments of the pooling layer must be passed in a dict. Got {args}" - ) - if ( - pooling_type == PoolingLayer.MAX or pooling_type == PoolingLayer.AVG - ) and "kernel_size" not in args: - raise ValueError( - f"For {pooling_type} pooling mode, `kernel_size` argument must be passed. " - f"Got {args}" - ) - elif ( - pooling_type == PoolingLayer.ADAPT_AVG - or pooling_type == PoolingLayer.ADAPT_MAX - ) and "output_size" not in args: - raise ValueError( - f"For {pooling_type} pooling mode, `output_size` argument must be passed. " - f"Got {args}" - ) - - def _check_pool_layers( - self, pooling: PoolingParameters - ) -> List[SingleLayerPoolingParameters]: - """ - Check argument pooling. - """ - if pooling is None: - return pooling - if isinstance(pooling, list): - for pool_layer in pooling: - self._check_single_pool_layer(pool_layer) - if len(pooling) != len(self.pooling_indices): - raise ValueError( - "If you pass a list for pooling, the size of that list must match " - f"the size of pooling_indices. Got: pooling={pooling} and " - f"pooling_indices={self.pooling_indices}" - ) - elif isinstance(pooling, tuple): - self._check_single_pool_layer(pooling) - pooling = [pooling] * len(self.pooling_indices) - else: - raise ValueError( - f"pooling can be either None, a double (string, dictionary) or a list of such doubles. Got {pooling}" - ) - - return pooling diff --git a/clinicadl/monai_networks/nn/densenet.py b/clinicadl/monai_networks/nn/densenet.py deleted file mode 100644 index 45d99cc71..000000000 --- a/clinicadl/monai_networks/nn/densenet.py +++ /dev/null @@ -1,312 +0,0 @@ -import re -from collections import OrderedDict -from enum import Enum -from typing import Any, Mapping, Optional, Sequence, Union - -import torch.nn as nn -from monai.networks.layers.utils import get_act_layer -from monai.networks.nets import DenseNet as BaseDenseNet -from torch.hub import load_state_dict_from_url -from torchvision.models.densenet import ( - DenseNet121_Weights, - DenseNet161_Weights, - DenseNet169_Weights, - DenseNet201_Weights, -) - -from .layers.utils import ActivationParameters - - -class DenseNet(nn.Sequential): - """ - DenseNet based on the [Densely Connected Convolutional Networks](https://arxiv.org/pdf/1608.06993) paper. - Adapted from [MONAI's implementation](https://docs.monai.io/en/stable/networks.html#densenet). - - The user can customize the number of dense blocks, the number of dense layers in each block, as well as - other parameters like the growth rate. - - DenseNet is a fully convolutional network that can work with input of any size, provided that is it large - enough not to be reduced to a 1-pixel image (before the adaptative average pooling). - - Parameters - ---------- - spatial_dims : int - number of spatial dimensions of the input image. - in_channels : int - number of channels in the input image. - num_outputs : Optional[int] - number of output variables after the last linear layer.\n - If None, the features before the last fully connected layer will be returned. - n_dense_layers : Sequence[int] (optional, default=(6, 12, 24, 16)) - number of dense layers in each dense block. Thus, this parameter also defines the number of dense blocks. - Default is set to DenseNet-121 parameter. - init_features : int (optional, default=64) - number of feature maps after the initial convolution. Default is set to 64, as in the original paper. - growth_rate : int (optional, default=32) - how many feature maps to add at each dense layer. Default is set to 32, as in the original paper. - bottleneck_factor : int (optional, default=4) - multiplicative factor for bottleneck layers (1x1 convolutions). The output of of these bottleneck layers will - have `bottleneck_factor * growth_rate` feature maps. Default is 4, as in the original paper. - act : ActivationParameters (optional, default=("relu", {"inplace": True})) - the activation function used in the convolutional part, and optionally its arguments. - Should be passed as `activation_name` or `(activation_name, arguments)`. - `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, - `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] - (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional - arguments for each of them.\n - Default is "relu", as in the original paper. - output_act : Optional[ActivationParameters] (optional, default=None) - if `num_outputs` is not None, a potential activation layer applied to the outputs of the network. - Should be pass in the same way as `act`. - If None, no last activation will be applied. - dropout : Optional[float] (optional, default=None) - dropout ratio. If None, no dropout. - - Examples - -------- - >>> DenseNet(spatial_dims=2, in_channels=1, num_outputs=2, output_act="softmax", n_dense_layers=(2, 2)) - DenseNet( - (features): Sequential( - (conv0): Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) - (norm0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act0): ReLU(inplace=True) - (pool0): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) - (denseblock1): _DenseBlock( - (denselayer1): _DenseLayer( - (layers): Sequential( - (norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act1): ReLU(inplace=True) - (conv1): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) - (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act2): ReLU(inplace=True) - (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) - ) - ) - (denselayer2): _DenseLayer( - (layers): Sequential( - (norm1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act1): ReLU(inplace=True) - (conv1): Conv2d(96, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) - (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act2): ReLU(inplace=True) - (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) - ) - ) - ) - (transition1): _Transition( - (norm): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act): ReLU(inplace=True) - (conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) - (pool): AvgPool2d(kernel_size=2, stride=2, padding=0) - ) - (denseblock2): _DenseBlock( - (denselayer1): _DenseLayer( - (layers): Sequential( - (norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act1): ReLU(inplace=True) - (conv1): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) - (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act2): ReLU(inplace=True) - (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) - ) - ) - (denselayer2): _DenseLayer( - (layers): Sequential( - (norm1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act1): ReLU(inplace=True) - (conv1): Conv2d(96, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) - (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act2): ReLU(inplace=True) - (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) - ) - ) - ) - (norm5): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - ) - (fc): Sequential( - (act): ReLU(inplace=True) - (pool): AdaptiveAvgPool2d(output_size=1) - (flatten): Flatten(start_dim=1, end_dim=-1) - (out): Linear(in_features=128, out_features=2, bias=True) - (output_act): Softmax(dim=None) - ) - ) - """ - - def __init__( - self, - spatial_dims: int, - in_channels: int, - num_outputs: Optional[int], - n_dense_layers: Sequence[int] = (6, 12, 24, 16), - init_features: int = 64, - growth_rate: int = 32, - bottleneck_factor: int = 4, - act: ActivationParameters = ("relu", {"inplace": True}), - output_act: Optional[ActivationParameters] = None, - dropout: Optional[float] = None, - ) -> None: - super().__init__() - self.spatial_dims = spatial_dims - self.in_channels = in_channels - self.num_outputs = num_outputs - self.n_dense_layers = n_dense_layers - self.init_features = init_features - self.growth_rate = growth_rate - self.bottleneck_factor = bottleneck_factor - self.act = act - self.dropout = dropout - - base_densenet = BaseDenseNet( - spatial_dims=spatial_dims, - in_channels=in_channels, - out_channels=num_outputs if num_outputs else 1, - init_features=init_features, - growth_rate=growth_rate, - block_config=n_dense_layers, - bn_size=bottleneck_factor, - act=act, - dropout_prob=dropout if dropout else 0.0, - ) - self.features = base_densenet.features - self.fc = base_densenet.class_layers if num_outputs else None - if self.fc: - self.fc.output_act = get_act_layer(output_act) if output_act else None - - self._rename_act(self) - - @classmethod - def _rename_act(cls, module: nn.Module) -> None: - """ - Rename activation layers from 'relu' to 'act'. - """ - for name, layer in list(module.named_children()): - if "relu" in name: - module._modules = OrderedDict( # pylint: disable=protected-access - [ - (key.replace("relu", "act"), sub_m) - for key, sub_m in module._modules.items() # pylint: disable=protected-access - ] - ) - else: - cls._rename_act(layer) - - -class SOTADenseNet(str, Enum): - """Supported DenseNet networks.""" - - DENSENET_121 = "DenseNet-121" - DENSENET_161 = "DenseNet-161" - DENSENET_169 = "DenseNet-169" - DENSENET_201 = "DenseNet-201" - - -def get_densenet( - name: Union[str, SOTADenseNet], - num_outputs: Optional[int], - output_act: ActivationParameters = None, - pretrained: bool = False, -) -> DenseNet: - """ - To get a DenseNet implemented in the [Densely Connected Convolutional Networks](https://arxiv.org/pdf/1608.06993) - paper. - - Only the last fully connected layer will be changed to match `num_outputs`. - - The user can also use the pretrained models from `torchvision`. Note that the last fully connected layer will not - used pretrained weights, as it is task specific. - - .. warning:: `DenseNet-121`, `DenseNet-161`, `DenseNet-169` and `DenseNet-201` only works with 2D images with 3 channels. - - Notes: `torchvision` does not provide an implementation for `DenseNet-264` but provides a `DenseNet-161` that is not - mentioned in the paper. - - Parameters - ---------- - name : Union[str, SOTADenseNet] - The name of the DenseNet. Available networks are `DenseNet-121`, `DenseNet-161`, `DenseNet-169` and `DenseNet-201`. - num_outputs : Optional[int] - number of output variables after the last linear layer.\n - If None, the features before the last fully connected layer will be returned. - output_act : ActivationParameters (optional, default=None) - if `num_outputs` is not None, a potential activation layer applied to the outputs of the network, - and optionally its arguments. - Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n - `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, - `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] - (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional - arguments for each of them. - pretrained : bool (optional, default=False) - whether to use pretrained weights. The pretrained weights used are the default ones from [torchvision](https:// - pytorch.org/vision/main/models/densenet.html). - - Returns - ------- - DenseNet - The network, with potentially pretrained weights. - """ - name = SOTADenseNet(name) - if name == SOTADenseNet.DENSENET_121: - n_dense_layers = (6, 12, 24, 16) - growth_rate = 32 - init_features = 64 - model_url = DenseNet121_Weights.DEFAULT.url - elif name == SOTADenseNet.DENSENET_161: - n_dense_layers = (6, 12, 36, 24) - growth_rate = 48 - init_features = 96 - model_url = DenseNet161_Weights.DEFAULT.url - elif name == SOTADenseNet.DENSENET_169: - n_dense_layers = (6, 12, 32, 32) - growth_rate = 32 - init_features = 64 - model_url = DenseNet169_Weights.DEFAULT.url - elif name == SOTADenseNet.DENSENET_201: - n_dense_layers = (6, 12, 48, 32) - growth_rate = 32 - init_features = 64 - model_url = DenseNet201_Weights.DEFAULT.url - - # pylint: disable=possibly-used-before-assignment - densenet = DenseNet( - spatial_dims=2, - in_channels=3, - num_outputs=num_outputs, - n_dense_layers=n_dense_layers, - growth_rate=growth_rate, - init_features=init_features, - output_act=output_act, - ) - if not pretrained: - return densenet - - pretrained_dict = load_state_dict_from_url(model_url, progress=True) - features_state_dict = { - k.replace("features.", ""): v - for k, v in pretrained_dict.items() - if "classifier" not in k - } - densenet.features.load_state_dict(_state_dict_adapter(features_state_dict)) - - return densenet - - -def _state_dict_adapter(state_dict: Mapping[str, Any]) -> Mapping[str, Any]: - """ - To update the old nomenclature in the pretrained state dict. - Adapted from `_load_state_dict` in [torchvision.models.densenet](https://pytorch.org/vision/main - /_modules/torchvision/models/densenet.html). - """ - pattern = re.compile( - r"^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$" - ) - - for key in list(state_dict.keys()): - res = pattern.match(key) - if res: - new_key = res.group(1) + res.group(2) - new_key = re.sub(r"^(.*denselayer\d+)\.", r"\1.layers.", new_key) - state_dict[new_key] = state_dict[key] - del state_dict[key] - - return state_dict diff --git a/clinicadl/monai_networks/nn/generator.py b/clinicadl/monai_networks/nn/generator.py deleted file mode 100644 index 5f68a2e58..000000000 --- a/clinicadl/monai_networks/nn/generator.py +++ /dev/null @@ -1,131 +0,0 @@ -from typing import Any, Dict, Optional, Sequence - -import numpy as np -import torch.nn as nn -from monai.networks.layers.simplelayers import Reshape - -from .conv_decoder import ConvDecoder -from .mlp import MLP -from .utils import check_conv_args, check_mlp_args - - -class Generator(nn.Sequential): - """ - A generator with first fully connected layers and then convolutional layers. - - This network is a simple aggregation of a Multi Layer Perceptron (:py:class: - `clinicadl.monai_networks.nn.mlp.MLP`) and a Fully Convolutional Network - (:py:class:`clinicadl.monai_networks.nn.conv_decoder.ConvDecoder`). - - Parameters - ---------- - latent_size : int - size of the latent vector. - start_shape : Sequence[int] - sequence of integers stating the initial shape of the image, i.e. the shape at the - beginning of the convolutional part (minus batch dimension, but including the number - of channels).\n - Thus, `start_shape` determines the dimension of the output of the generator (the exact - shape depends on the convolutional part and can be accessed via the class attribute - `output_shape`). - conv_args : Dict[str, Any] - the arguments for the convolutional part. The arguments are those accepted by - :py:class:`clinicadl.monai_networks.nn.conv_decoder.ConvDecoder`, except `in_shape` that - is specified here via `start_shape`. So, the only mandatory argument is `channels`. - mlp_args : Optional[Dict[str, Any]] (optional, default=None) - the arguments for the MLP part. The arguments are those accepted by - :py:class:`clinicadl.monai_networks.nn.mlp.MLP`, except `in_channels` that is specified - here via `latent_size`, and `out_channels` that is inferred from `start_shape`. - So, the only mandatory argument is `hidden_channels`.\n - If None, the MLP part will be reduced to a single linear layer. - - Examples - -------- - >>> Generator( - latent_size=8, - start_shape=(8, 2, 2), - conv_args={"channels": [4, 2], "norm": None, "act": None}, - mlp_args={"hidden_channels": [16], "act": "elu", "norm": None}, - ) - Generator( - (mlp): MLP( - (flatten): Flatten(start_dim=1, end_dim=-1) - (hidden0): Sequential( - (linear): Linear(in_features=8, out_features=16, bias=True) - (adn): ADN( - (A): ELU(alpha=1.0) - ) - ) - (output): Linear(in_features=16, out_features=32, bias=True) - ) - (reshape): Reshape() - (convolutions): ConvDecoder( - (layer0): Convolution( - (conv): ConvTranspose2d(8, 4, kernel_size=(3, 3), stride=(1, 1)) - ) - (layer1): Convolution( - (conv): ConvTranspose2d(4, 2, kernel_size=(3, 3), stride=(1, 1)) - ) - ) - ) - - >>> Generator( - latent_size=8, - start_shape=(8, 2, 2), - conv_args={"channels": [4, 2], "norm": None, "act": None, "output_act": "relu"}, - ) - Generator( - (mlp): MLP( - (flatten): Flatten(start_dim=1, end_dim=-1) - (output): Linear(in_features=8, out_features=32, bias=True) - ) - (reshape): Reshape() - (convolutions): ConvDecoder( - (layer0): Convolution( - (conv): ConvTranspose2d(8, 4, kernel_size=(3, 3), stride=(1, 1)) - ) - (layer1): Convolution( - (conv): ConvTranspose2d(4, 2, kernel_size=(3, 3), stride=(1, 1)) - ) - (output_act): ReLU() - ) - ) - """ - - def __init__( - self, - latent_size: int, - start_shape: Sequence[int], - conv_args: Dict[str, Any], - mlp_args: Optional[Dict[str, Any]] = None, - ) -> None: - super().__init__() - check_conv_args(conv_args) - check_mlp_args(mlp_args) - self.latent_size = latent_size - self.start_shape = start_shape - - flatten_shape = int(np.prod(start_shape)) - if mlp_args is None: - mlp_args = {"hidden_channels": []} - self.mlp = MLP( - in_channels=latent_size, - out_channels=flatten_shape, - **mlp_args, - ) - - self.reshape = Reshape(*start_shape) - inter_channels, *inter_size = start_shape - self.convolutions = ConvDecoder( - in_channels=inter_channels, - spatial_dims=len(inter_size), - _input_size=inter_size, - **conv_args, - ) - - n_channels = ( - conv_args["channels"][-1] - if len(conv_args["channels"]) > 0 - else start_shape[0] - ) - self.output_shape = (n_channels, *self.convolutions.final_size) diff --git a/clinicadl/monai_networks/nn/layers/__init__.py b/clinicadl/monai_networks/nn/layers/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/clinicadl/monai_networks/nn/layers/resnet.py b/clinicadl/monai_networks/nn/layers/resnet.py deleted file mode 100644 index c115da512..000000000 --- a/clinicadl/monai_networks/nn/layers/resnet.py +++ /dev/null @@ -1,124 +0,0 @@ -from collections.abc import Callable -from typing import Optional - -import torch -import torch.nn as nn -from monai.networks.layers.factories import Conv, Norm -from monai.networks.layers.utils import get_act_layer - -from .utils import ActivationParameters - - -class ResNetBlock(nn.Module): - """ - ResNet basic block. Adapted from MONAI's implementation: - https://github.com/Project-MONAI/MONAI/blob/59a7211070538586369afd4a01eca0a7fe2e742e/ - monai/networks/nets/resnet.py#L71 - """ - - expansion = 1 - - def __init__( - self, - in_planes: int, - planes: int, - spatial_dims: int, - stride: int = 1, - downsample: Optional[nn.Module] = None, - act: ActivationParameters = ("relu", {"inplace": True}), - ) -> None: - super().__init__() - - conv_type: Callable = Conv[Conv.CONV, spatial_dims] - norm_type: Callable = Norm[Norm.BATCH, spatial_dims] - - self.conv1 = conv_type( # pylint: disable=not-callable - in_planes, planes, kernel_size=3, padding=1, stride=stride, bias=False - ) - self.norm1 = norm_type(planes) # pylint: disable=not-callable - self.act1 = get_act_layer(name=act) - self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, bias=False) # pylint: disable=not-callable - self.norm2 = norm_type(planes) # pylint: disable=not-callable - self.downsample = downsample - self.act2 = get_act_layer(name=act) - self.stride = stride - - def forward(self, x: torch.Tensor) -> torch.Tensor: - residual = x - - out: torch.Tensor = self.conv1(x) - out = self.norm1(out) - out = self.act1(out) - - out = self.conv2(out) - out = self.norm2(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.act2(out) - - return out - - -class ResNetBottleneck(nn.Module): - """ - ResNet bottleneck block. Adapted from MONAI's implementation: - https://github.com/Project-MONAI/MONAI/blob/59a7211070538586369afd4a01eca0a7fe2e742e/ - monai/networks/nets/resnet.py#L124 - """ - - expansion = 4 - - def __init__( - self, - in_planes: int, - planes: int, - spatial_dims: int, - stride: int = 1, - downsample: Optional[nn.Module] = None, - act: ActivationParameters = ("relu", {"inplace": True}), - ) -> None: - super().__init__() - - conv_type: Callable = Conv[Conv.CONV, spatial_dims] - norm_type: Callable = Norm[Norm.BATCH, spatial_dims] - - self.conv1 = conv_type(in_planes, planes, kernel_size=1, bias=False) # pylint: disable=not-callable - self.norm1 = norm_type(planes) # pylint: disable=not-callable - self.act1 = get_act_layer(name=act) - self.conv2 = conv_type( # pylint: disable=not-callable - planes, planes, kernel_size=3, stride=stride, padding=1, bias=False - ) - self.norm2 = norm_type(planes) # pylint: disable=not-callable - self.act2 = get_act_layer(name=act) - self.conv3 = conv_type( # pylint: disable=not-callable - planes, planes * self.expansion, kernel_size=1, bias=False - ) - self.norm3 = norm_type(planes * self.expansion) # pylint: disable=not-callable - self.downsample = downsample - self.act3 = get_act_layer(name=act) - self.stride = stride - - def forward(self, x: torch.Tensor) -> torch.Tensor: - residual = x - - out: torch.Tensor = self.conv1(x) - out = self.norm1(out) - out = self.act1(out) - - out = self.conv2(out) - out = self.norm2(out) - out = self.act2(out) - - out = self.conv3(out) - out = self.norm3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.act3(out) - - return out diff --git a/clinicadl/monai_networks/nn/layers/senet.py b/clinicadl/monai_networks/nn/layers/senet.py deleted file mode 100644 index 8847ef577..000000000 --- a/clinicadl/monai_networks/nn/layers/senet.py +++ /dev/null @@ -1,142 +0,0 @@ -from typing import Callable, Optional - -import torch -import torch.nn as nn -from monai.networks.blocks.squeeze_and_excitation import ChannelSELayer -from monai.networks.layers.factories import Conv, Norm -from monai.networks.layers.utils import get_act_layer - -from .utils import ActivationParameters - - -class SEResNetBlock(nn.Module): - """ - ResNet basic block. Adapted from MONAI's ResNetBlock: - https://github.com/Project-MONAI/MONAI/blob/59a7211070538586369afd4a01eca0a7fe2e742e/ - monai/networks/nets/resnet.py#L71 - """ - - expansion = 1 - reduction = 16 - - def __init__( - self, - in_planes: int, - planes: int, - spatial_dims: int, - stride: int = 1, - downsample: Optional[nn.Module] = None, - act: ActivationParameters = ("relu", {"inplace": True}), - ) -> None: - super().__init__() - - conv_type: Callable = Conv[Conv.CONV, spatial_dims] - norm_type: Callable = Norm[Norm.BATCH, spatial_dims] - - self.conv1 = conv_type( # pylint: disable=not-callable - in_planes, planes, kernel_size=3, padding=1, stride=stride, bias=False - ) - self.norm1 = norm_type(planes) # pylint: disable=not-callable - self.act1 = get_act_layer(name=act) - self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, bias=False) # pylint: disable=not-callable - self.norm2 = norm_type(planes) # pylint: disable=not-callable - self.se_layer = ChannelSELayer( - spatial_dims=spatial_dims, - in_channels=planes, - r=self.reduction, - acti_type_1=("relu", {"inplace": True}), - acti_type_2="sigmoid", - ) - self.downsample = downsample - self.act2 = get_act_layer(name=act) - self.stride = stride - - def forward(self, x: torch.Tensor) -> torch.Tensor: - residual = x - - out = self.conv1(x) - out = self.norm1(out) - out = self.act1(out) - - out = self.conv2(out) - out = self.norm2(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out = self.se_layer(out) - out += residual - out = self.act2(out) - - return out - - -class SEResNetBottleneck(nn.Module): - """ - ResNet bottleneck block. Adapted from MONAI's ResNetBottleneck: - https://github.com/Project-MONAI/MONAI/blob/59a7211070538586369afd4a01eca0a7fe2e742e/ - monai/networks/nets/resnet.py#L124 - """ - - expansion = 4 - reduction = 16 - - def __init__( - self, - in_planes: int, - planes: int, - spatial_dims: int, - stride: int = 1, - downsample: Optional[nn.Module] = None, - act: ActivationParameters = ("relu", {"inplace": True}), - ) -> None: - super().__init__() - - conv_type: Callable = Conv[Conv.CONV, spatial_dims] - norm_type: Callable = Norm[Norm.BATCH, spatial_dims] - - self.conv1 = conv_type(in_planes, planes, kernel_size=1, bias=False) # pylint: disable=not-callable - self.norm1 = norm_type(planes) # pylint: disable=not-callable - self.act1 = get_act_layer(name=act) - self.conv2 = conv_type( # pylint: disable=not-callable - planes, planes, kernel_size=3, stride=stride, padding=1, bias=False - ) - self.norm2 = norm_type(planes) # pylint: disable=not-callable - self.act2 = get_act_layer(name=act) - self.conv3 = conv_type( # pylint: disable=not-callable - planes, planes * self.expansion, kernel_size=1, bias=False - ) - self.norm3 = norm_type(planes * self.expansion) # pylint: disable=not-callable - self.se_layer = ChannelSELayer( - spatial_dims=spatial_dims, - in_channels=planes * self.expansion, - r=self.reduction, - acti_type_1=("relu", {"inplace": True}), - acti_type_2="sigmoid", - ) - self.downsample = downsample - self.act3 = get_act_layer(name=act) - self.stride = stride - - def forward(self, x: torch.Tensor) -> torch.Tensor: - residual = x - - out = self.conv1(x) - out = self.norm1(out) - out = self.act1(out) - - out = self.conv2(out) - out = self.norm2(out) - out = self.act2(out) - - out = self.conv3(out) - out = self.norm3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out = self.se_layer(out) - out += residual - out = self.act3(out) - - return out diff --git a/clinicadl/monai_networks/nn/layers/unet.py b/clinicadl/monai_networks/nn/layers/unet.py deleted file mode 100644 index 2186425be..000000000 --- a/clinicadl/monai_networks/nn/layers/unet.py +++ /dev/null @@ -1,102 +0,0 @@ -from typing import Optional - -import torch.nn as nn -from monai.networks.blocks.convolutions import Convolution -from monai.networks.layers.utils import get_pool_layer - -from .utils import ActFunction, ActivationParameters, NormLayer - - -class ConvBlock(nn.Sequential): - """UNet doouble convolution block.""" - - def __init__( - self, - spatial_dims: int, - in_channels: int, - out_channels: int, - act: ActivationParameters = ActFunction.RELU, - dropout: Optional[float] = None, - ): - super().__init__() - self.add_module( - "0", - Convolution( - spatial_dims=spatial_dims, - in_channels=in_channels, - out_channels=out_channels, - kernel_size=3, - strides=1, - padding=None, - adn_ordering="NDA", - act=act, - norm=NormLayer.BATCH, - dropout=dropout, - ), - ) - self.add_module( - "1", - Convolution( - spatial_dims=spatial_dims, - in_channels=out_channels, - out_channels=out_channels, - kernel_size=3, - strides=1, - padding=None, - adn_ordering="NDA", - act=act, - norm=NormLayer.BATCH, - dropout=dropout, - ), - ) - - -class UpSample(nn.Sequential): - """UNet up-conv block with first upsampling and then a convolution.""" - - def __init__( - self, - spatial_dims: int, - in_channels: int, - out_channels: int, - act: ActivationParameters = ActFunction.RELU, - dropout: Optional[float] = None, - ): - super().__init__() - self.add_module("0", nn.Upsample(scale_factor=2)) - self.add_module( - "1", - Convolution( - spatial_dims, - in_channels, - out_channels, - strides=1, - kernel_size=3, - act=act, - adn_ordering="NDA", - norm=NormLayer.BATCH, - dropout=dropout, - ), - ) - - -class DownBlock(nn.Sequential): - """UNet down block with first max pooling and then two convolutions.""" - - def __init__( - self, - spatial_dims: int, - in_channels: int, - out_channels: int, - act: ActivationParameters = ActFunction.RELU, - dropout: Optional[float] = None, - ): - super().__init__() - self.pool = get_pool_layer(("max", {"kernel_size": 2}), spatial_dims) - self.doubleconv = ConvBlock( - spatial_dims=spatial_dims, - in_channels=in_channels, - out_channels=out_channels, - act=act, - dropout=dropout, - ) diff --git a/clinicadl/monai_networks/nn/layers/unpool.py b/clinicadl/monai_networks/nn/layers/unpool.py deleted file mode 100644 index 1c90fde90..000000000 --- a/clinicadl/monai_networks/nn/layers/unpool.py +++ /dev/null @@ -1,87 +0,0 @@ -from typing import Any, Dict, Optional, Tuple, Type, Union - -import torch.nn as nn -from monai.networks.layers.factories import LayerFactory, split_args -from monai.utils import has_option - -from .utils import UnpoolingLayer - -Unpool = LayerFactory( - name="Unpooling layers", description="Factory for creating unpooling layers." -) - - -@Unpool.factory_function("upsample") -def upsample_factory(dim: int) -> Type[nn.Upsample]: - """ - Upsample layer. - """ - return nn.Upsample - - -@Unpool.factory_function("convtranspose") -def convtranspose_factory( - dim: int, -) -> Type[Union[nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d]]: - """ - Transposed convolutional layers in 1,2,3 dimensions. - - Parameters - ---------- - dim : int - desired dimension of the transposed convolutional layer. - - Returns - ------- - type[Union[nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d]] - ConvTranspose[dim]d - """ - types = (nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d) - return types[dim - 1] - - -def get_unpool_layer( - name: Union[UnpoolingLayer, Tuple[UnpoolingLayer, Dict[str, Any]]], - spatial_dims: int, - in_channels: Optional[int] = None, - out_channels: Optional[int] = None, -) -> nn.Module: - """ - Creates an unpooling layer instance. - - Parameters - ---------- - name : Union[UnpoolingLayer, Tuple[UnpoolingLayer, Dict[str, Any]]] - the unpooling type, potentially with arguments in a dict. - - Returns - ------- - nn.Module - the parametrized unpooling layer. - - Parameters - ---------- - name : Union[UnpoolingLayer, Tuple[UnpoolingLayer, Dict[str, Any]]] - the unpooling type, potentially with arguments in a dict. - spatial_dims : int - number of spatial dimensions of the input. - in_channels : Optional[int] (optional, default=None) - number of input channels if the unpool layer requires this parameter. - out_channels : Optional[int] (optional, default=None) - number of output channels if the unpool layer requires this parameter. - - Returns - ------- - nn.Module - the parametrized unpooling layer. - """ - unpool_name, unpool_args = split_args(name) - unpool_name = UnpoolingLayer(unpool_name) - unpool_type = Unpool[unpool_name, spatial_dims] - kw_args = dict(unpool_args) - if has_option(unpool_type, "in_channels") and "in_channels" not in kw_args: - kw_args["in_channels"] = in_channels - if has_option(unpool_type, "out_channels") and "out_channels" not in kw_args: - kw_args["out_channels"] = out_channels - - return unpool_type(**kw_args) # pylint: disable=not-callable diff --git a/clinicadl/monai_networks/nn/layers/utils/__init__.py b/clinicadl/monai_networks/nn/layers/utils/__init__.py deleted file mode 100644 index 5c080fffd..000000000 --- a/clinicadl/monai_networks/nn/layers/utils/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from .enum import ( - ActFunction, - ConvNormLayer, - NormLayer, - PoolingLayer, - UnpoolingLayer, - UnpoolingMode, -) -from .types import ( - ActivationParameters, - ConvNormalizationParameters, - ConvParameters, - NormalizationParameters, - PoolingParameters, - SingleLayerConvParameter, - SingleLayerPoolingParameters, - SingleLayerUnpoolingParameters, - UnpoolingParameters, -) diff --git a/clinicadl/monai_networks/nn/layers/utils/enum.py b/clinicadl/monai_networks/nn/layers/utils/enum.py deleted file mode 100644 index 695776551..000000000 --- a/clinicadl/monai_networks/nn/layers/utils/enum.py +++ /dev/null @@ -1,65 +0,0 @@ -from clinicadl.utils.enum import CaseInsensitiveEnum - - -class UnpoolingLayer(CaseInsensitiveEnum): - """Supported unpooling layers in ClinicaDL.""" - - CONV_TRANS = "convtranspose" - UPSAMPLE = "upsample" - - -class ActFunction(CaseInsensitiveEnum): - """Supported activation functions in ClinicaDL.""" - - ELU = "elu" - RELU = "relu" - LEAKY_RELU = "leakyrelu" - PRELU = "prelu" - RELU6 = "relu6" - SELU = "selu" - CELU = "celu" - GELU = "gelu" - SIGMOID = "sigmoid" - TANH = "tanh" - SOFTMAX = "softmax" - LOGSOFTMAX = "logsoftmax" - MISH = "mish" - - -class PoolingLayer(CaseInsensitiveEnum): - """Supported pooling layers in ClinicaDL.""" - - MAX = "max" - AVG = "avg" - ADAPT_AVG = "adaptiveavg" - ADAPT_MAX = "adaptivemax" - - -class NormLayer(CaseInsensitiveEnum): - """Supported normalization layers in ClinicaDL.""" - - GROUP = "group" - LAYER = "layer" - SYNCBATCH = "syncbatch" - BATCH = "batch" - INSTANCE = "instance" - - -class ConvNormLayer(CaseInsensitiveEnum): - """Supported normalization layers with convolutions in ClinicaDL.""" - - GROUP = "group" - SYNCBATCH = "syncbatch" - BATCH = "batch" - INSTANCE = "instance" - - -class UnpoolingMode(CaseInsensitiveEnum): - """Supported unpooling mode for AutoEncoders in ClinicaDL.""" - - NEAREST = "nearest" - LINEAR = "linear" - BILINEAR = "bilinear" - BICUBIC = "bicubic" - TRILINEAR = "trilinear" - CONV_TRANS = "convtranspose" diff --git a/clinicadl/monai_networks/nn/layers/utils/types.py b/clinicadl/monai_networks/nn/layers/utils/types.py deleted file mode 100644 index f5ef18847..000000000 --- a/clinicadl/monai_networks/nn/layers/utils/types.py +++ /dev/null @@ -1,37 +0,0 @@ -from typing import Any, Dict, List, Tuple, Union - -from .enum import ( - ActFunction, - ConvNormLayer, - NormLayer, - PoolingLayer, - UnpoolingLayer, -) - -SingleLayerConvParameter = Union[int, Tuple[int, ...]] -ConvParameters = Union[SingleLayerConvParameter, List[SingleLayerConvParameter]] - -PoolingType = Union[str, PoolingLayer] -SingleLayerPoolingParameters = Tuple[PoolingType, Dict[str, Any]] -PoolingParameters = Union[ - SingleLayerPoolingParameters, List[SingleLayerPoolingParameters] -] - -UnpoolingType = Union[str, UnpoolingLayer] -SingleLayerUnpoolingParameters = Tuple[UnpoolingType, Dict[str, Any]] -UnpoolingParameters = Union[ - SingleLayerUnpoolingParameters, List[SingleLayerUnpoolingParameters] -] - -NormalizationType = Union[str, NormLayer] -NormalizationParameters = Union[ - NormalizationType, Tuple[NormalizationType, Dict[str, Any]] -] - -ConvNormalizationType = Union[str, ConvNormLayer] -ConvNormalizationParameters = Union[ - ConvNormalizationType, Tuple[ConvNormalizationType, Dict[str, Any]] -] - -ActivationType = Union[str, ActFunction] -ActivationParameters = Union[ActivationType, Tuple[ActivationType, Dict[str, Any]]] diff --git a/clinicadl/monai_networks/nn/layers/vit.py b/clinicadl/monai_networks/nn/layers/vit.py deleted file mode 100644 index e485d6c6b..000000000 --- a/clinicadl/monai_networks/nn/layers/vit.py +++ /dev/null @@ -1,94 +0,0 @@ -from functools import partial -from typing import Callable, Optional - -import torch -import torch.nn as nn -from torchvision.models.vision_transformer import MLPBlock - - -class EncoderBlock(nn.Module): - """Transformer encoder block.""" - - def __init__( - self, - num_heads: int, - hidden_dim: int, - mlp_dim: int, - dropout: float, - attention_dropout: float, - norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6), - ) -> None: - super().__init__() - self.num_heads = num_heads - - # Attention block - self.norm1 = norm_layer(hidden_dim) - self.self_attention = nn.MultiheadAttention( - hidden_dim, num_heads, dropout=attention_dropout, batch_first=True - ) - self.dropout = nn.Dropout(dropout) - - # MLP block - self.norm2 = norm_layer(hidden_dim) - self.mlp = MLPBlock(hidden_dim, mlp_dim, dropout) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - residual = x - - x = self.norm1(x) - x, _ = self.self_attention(x, x, x, need_weights=False) - x = self.dropout(x) - x += residual - - y = self.norm2(x) - y = self.mlp(y) - return x + y - - -class Encoder(nn.Module): - """Encoder with multiple transformer blocks.""" - - def __init__( - self, - seq_length: int, - num_layers: int, - num_heads: int, - hidden_dim: int, - mlp_dim: int, - dropout: float, - attention_dropout: float, - pos_embedding: Optional[nn.Parameter] = None, - norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6), - ) -> None: - super().__init__() - - if pos_embedding is not None: - self.pos_embedding = pos_embedding - else: - self.pos_embedding = nn.Parameter( - torch.empty(1, seq_length, hidden_dim).normal_(std=0.02) - ) # from BERT - self.dropout = nn.Dropout(dropout) - self.layers = nn.ModuleList( - [ - EncoderBlock( - num_heads, - hidden_dim, - mlp_dim, - dropout, - attention_dropout, - norm_layer, - ) - for _ in range(num_layers) - ] - ) - self.norm = norm_layer(hidden_dim) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = x + self.pos_embedding - - x = self.dropout(x) - for layer in self.layers: - x = layer(x) - - return self.norm(x) diff --git a/clinicadl/monai_networks/nn/mlp.py b/clinicadl/monai_networks/nn/mlp.py deleted file mode 100644 index a27b2ad4e..000000000 --- a/clinicadl/monai_networks/nn/mlp.py +++ /dev/null @@ -1,146 +0,0 @@ -from collections import OrderedDict -from typing import Optional, Sequence - -import torch.nn as nn -from monai.networks.blocks import ADN -from monai.networks.layers.utils import get_act_layer -from monai.networks.nets import FullyConnectedNet as BaseMLP - -from .layers.utils import ( - ActFunction, - ActivationParameters, - NormalizationParameters, - NormLayer, -) -from .utils import check_adn_ordering, check_norm_layer - - -class MLP(BaseMLP): - """Simple full-connected layer neural network (or Multi-Layer Perceptron) with linear, normalization, activation - and dropout layers. - - Parameters - ---------- - in_channels : int - number of input channels (i.e. number of features). - out_channels : int - number of output channels. - hidden_channels : Sequence[int] - number of output channels for each hidden layer. Thus, this parameter also controls the number of hidden layers. - act : Optional[ActivationParameters] (optional, default=ActFunction.PRELU) - the activation function used after a linear layer, and optionally its arguments. - Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n - `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, - `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] - (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional - arguments for each of them. - output_act : Optional[ActivationParameters] (optional, default=None) - a potential activation layer applied to the output of the network. Should be pass in the same way as `act`. - If None, no last activation will be applied. - norm : Optional[NormalizationParameters] (optional, default=NormLayer.BATCH) - the normalization type used after a linear layer, and optionally the arguments of the normalization - layer. Should be passed as `norm_type` or `(norm_type, parameters)`. If None, no normalization will be - performed.\n - `norm_type` can be any value in {`batch`, `group`, `instance`, `layer`, `syncbatch`}. Please refer to PyTorch's - [normalization layers](https://pytorch.org/docs/stable/nn.html#normalization-layers) to know the mandatory and - optional arguments for each of them.\n - Please note that arguments `num_channels`, `num_features` and `normalized_shape` of the normalization layer - should not be passed, as they are automatically inferred from the output of the previous layer in the network. - dropout : Optional[float] (optional, default=None) - dropout ratio. If None, no dropout. - bias : bool (optional, default=True) - whether to have a bias term in linear layers. - adn_ordering : str (optional, default="NDA") - order of operations `Activation`, `Dropout` and `Normalization` after a linear layer (except the last - one). - For example if "ND" is passed, `Normalization` and then `Dropout` will be performed (without `Activation`).\n - Note: ADN will not be applied after the last linear layer. - - Examples - -------- - >>> MLP(in_channels=12, out_channels=2, hidden_channels=[8, 4], dropout=0.1, act=("elu", {"alpha": 0.5}), - norm=("group", {"num_groups": 2}), bias=True, adn_ordering="ADN", output_act="softmax") - MLP( - (flatten): Flatten(start_dim=1, end_dim=-1) - (hidden0): Sequential( - (linear): Linear(in_features=12, out_features=8, bias=True) - (adn): ADN( - (A): ELU(alpha=0.5) - (D): Dropout(p=0.1, inplace=False) - (N): GroupNorm(2, 8, eps=1e-05, affine=True) - ) - ) - (hidden1): Sequential( - (linear): Linear(in_features=8, out_features=4, bias=True) - (adn): ADN( - (A): ELU(alpha=0.5) - (D): Dropout(p=0.1, inplace=False) - (N): GroupNorm(2, 4, eps=1e-05, affine=True) - ) - ) - (output): Sequential( - (linear): Linear(in_features=4, out_features=2, bias=True) - (output_act): Softmax(dim=None) - ) - ) - """ - - def __init__( - self, - in_channels: int, - out_channels: int, - hidden_channels: Sequence[int], - act: Optional[ActivationParameters] = ActFunction.PRELU, - output_act: Optional[ActivationParameters] = None, - norm: Optional[NormalizationParameters] = NormLayer.BATCH, - dropout: Optional[float] = None, - bias: bool = True, - adn_ordering: str = "NDA", - ) -> None: - self.norm = check_norm_layer(norm) - super().__init__( - in_channels, - out_channels, - hidden_channels, - dropout, - act, - bias, - check_adn_ordering(adn_ordering), - ) - self.output = nn.Sequential(OrderedDict([("linear", self.output)])) - self.output.output_act = get_act_layer(output_act) if output_act else None - # renaming - self._modules = OrderedDict( - [ - (key.replace("hidden_", "hidden"), sub_m) - for key, sub_m in self._modules.items() - ] - ) - - def _get_layer(self, in_channels: int, out_channels: int, bias: bool) -> nn.Module: - """ - Gets the parametrized Linear layer + ADN block. - """ - if self.norm == NormLayer.LAYER: - norm = ("layer", {"normalized_shape": out_channels}) - else: - norm = self.norm - seq = nn.Sequential( - OrderedDict( - [ - ("linear", nn.Linear(in_channels, out_channels, bias)), - ( - "adn", - ADN( - ordering=self.adn_ordering, - act=self.act, - norm=norm, - dropout=self.dropout, - dropout_dim=1, - in_channels=out_channels, - ), - ), - ] - ) - ) - return seq diff --git a/clinicadl/monai_networks/nn/resnet.py b/clinicadl/monai_networks/nn/resnet.py deleted file mode 100644 index 1ba90b30c..000000000 --- a/clinicadl/monai_networks/nn/resnet.py +++ /dev/null @@ -1,566 +0,0 @@ -import re -from collections import OrderedDict -from copy import deepcopy -from enum import Enum -from typing import Any, Callable, Mapping, Optional, Sequence, Type, Union - -import torch -import torch.nn as nn -from monai.networks.layers.factories import Conv, Norm, Pool -from monai.networks.layers.utils import get_act_layer -from monai.utils import ensure_tuple_rep -from torch.hub import load_state_dict_from_url -from torchvision.models.resnet import ( - ResNet18_Weights, - ResNet34_Weights, - ResNet50_Weights, - ResNet101_Weights, - ResNet152_Weights, -) - -from .layers.resnet import ResNetBlock, ResNetBottleneck -from .layers.senet import SEResNetBlock, SEResNetBottleneck -from .layers.utils import ActivationParameters - - -class ResNetBlockType(str, Enum): - """Supported ResNet blocks.""" - - BASIC = "basic" - BOTTLENECK = "bottleneck" - - -class GeneralResNet(nn.Module): - """Common base class for ResNet and SEResNet.""" - - def __init__( - self, - spatial_dims: int, - in_channels: int, - num_outputs: Optional[int], - block_type: Union[str, ResNetBlockType], - n_res_blocks: Sequence[int], - n_features: Sequence[int], - init_conv_size: Union[Sequence[int], int], - init_conv_stride: Union[Sequence[int], int], - bottleneck_reduction: int, - se_reduction: Optional[int], - act: ActivationParameters, - output_act: ActivationParameters, - ) -> None: - super().__init__() - - self.spatial_dims = spatial_dims - self.in_channels = in_channels - self.num_outputs = num_outputs - self.block_type = block_type - self._check_args_consistency(n_res_blocks, n_features) - self.n_res_blocks = n_res_blocks - self.n_features = n_features - self.bottleneck_reduction = bottleneck_reduction - self.se_reduction = se_reduction - self.act = act - self.squeeze_excitation = True if se_reduction else False - - self.init_conv_size = ensure_tuple_rep(init_conv_size, spatial_dims) - self.init_conv_stride = ensure_tuple_rep(init_conv_stride, spatial_dims) - - block, in_planes = self._get_block(block_type) - - conv_type, norm_type, pool_type, avgp_type = self._get_layers() - - block_avgpool = [0, 1, (1, 1), (1, 1, 1)] - - self.in_planes = in_planes[0] - self.n_layers = len(in_planes) - self.bias_downsample = False - - self.conv0 = conv_type( # pylint: disable=not-callable - in_channels, - self.in_planes, - kernel_size=self.init_conv_size, - stride=self.init_conv_stride, - padding=tuple(k // 2 for k in self.init_conv_size), - bias=False, - ) - self.norm0 = norm_type(self.in_planes) # pylint: disable=not-callable - self.act0 = get_act_layer(name=act) - self.pool0 = pool_type(kernel_size=3, stride=2, padding=1) # pylint: disable=not-callable - self.layer1 = self._make_resnet_layer( - block, in_planes[0], n_res_blocks[0], spatial_dims, act - ) - for i, (n_blocks, n_feats) in enumerate( - zip(n_res_blocks[1:], in_planes[1:]), start=2 - ): - self.add_module( - f"layer{i}", - self._make_resnet_layer( - block, - planes=n_feats, - blocks=n_blocks, - spatial_dims=spatial_dims, - stride=2, - act=act, - ), - ) - self.fc = ( - nn.Sequential( - OrderedDict( - [ - ("pool", avgp_type(block_avgpool[spatial_dims])), # pylint: disable=not-callable - ("flatten", nn.Flatten(1)), - ("out", nn.Linear(n_features[-1], num_outputs)), - ] - ) - ) - if num_outputs - else None - ) - if self.fc: - self.fc.output_act = get_act_layer(output_act) if output_act else None - - self._init_module(conv_type, norm_type) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.conv0(x) - x = self.norm0(x) - x = self.act0(x) - x = self.pool0(x) - - for i in range(1, self.n_layers + 1): - x = self.get_submodule(f"layer{i}")(x) - - if self.fc is not None: - x = self.fc(x) - - return x - - def _get_block(self, block_type: Union[str, ResNetBlockType]) -> nn.Module: - """ - Gets the residual block, depending on the block choice made by the user and depending - on whether squeeze-excitation mode or not. - """ - block_type = ResNetBlockType(block_type) - if block_type == ResNetBlockType.BASIC: - in_planes = self.n_features - if self.squeeze_excitation: - block = SEResNetBlock - block.reduction = self.se_reduction - else: - block = ResNetBlock - elif block_type == ResNetBlockType.BOTTLENECK: - in_planes = self._bottleneck_reduce( - self.n_features, self.bottleneck_reduction - ) - if self.squeeze_excitation: - block = SEResNetBottleneck - block.reduction = self.se_reduction - else: - block = ResNetBottleneck - block.expansion = self.bottleneck_reduction - - return block, in_planes - - def _get_layers(self): - """ - Gets convolution, normalization, pooling and adaptative average pooling layers. - """ - conv_type: Type[Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]] = Conv[ - Conv.CONV, self.spatial_dims - ] - norm_type: Type[Union[nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[ - Norm.BATCH, self.spatial_dims - ] - pool_type: Type[Union[nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d]] = Pool[ - Pool.MAX, self.spatial_dims - ] - avgp_type: Type[ - Union[nn.AdaptiveAvgPool1d, nn.AdaptiveAvgPool2d, nn.AdaptiveAvgPool3d] - ] = Pool[Pool.ADAPTIVEAVG, self.spatial_dims] - - return conv_type, norm_type, pool_type, avgp_type - - def _make_resnet_layer( - self, - block: Type[Union[ResNetBlock, ResNetBottleneck]], - planes: int, - blocks: int, - spatial_dims: int, - act: ActivationParameters, - stride: int = 1, - ) -> nn.Sequential: - """ - Builds a ResNet layer. - """ - conv_type: Callable = Conv[Conv.CONV, spatial_dims] - norm_type: Callable = Norm[Norm.BATCH, spatial_dims] - - downsample = None - if stride != 1 or self.in_planes != planes * block.expansion: - downsample = nn.Sequential( - conv_type( # pylint: disable=not-callable - self.in_planes, - planes * block.expansion, - kernel_size=1, - stride=stride, - bias=self.bias_downsample, - ), - norm_type(planes * block.expansion), # pylint: disable=not-callable - ) - - layers = [ - block( - in_planes=self.in_planes, - planes=planes, - spatial_dims=spatial_dims, - stride=stride, - downsample=downsample, - act=act, - ) - ] - - self.in_planes = planes * block.expansion - for _ in range(1, blocks): - layers.append( - block(self.in_planes, planes, spatial_dims=spatial_dims, act=act) - ) - - return nn.Sequential(*layers) - - def _init_module( - self, conv_type: Type[nn.Module], norm_type: Type[nn.Module] - ) -> None: - """ - Initializes the parameters. - """ - for m in self.modules(): - if isinstance(m, conv_type): - nn.init.kaiming_normal_( - torch.as_tensor(m.weight), mode="fan_out", nonlinearity="relu" - ) - elif isinstance(m, norm_type): - nn.init.constant_(torch.as_tensor(m.weight), 1) - nn.init.constant_(torch.as_tensor(m.bias), 0) - elif isinstance(m, nn.Linear): - nn.init.constant_(torch.as_tensor(m.bias), 0) - - @classmethod - def _bottleneck_reduce( - cls, n_features: Sequence[int], bottleneck_reduction: int - ) -> Sequence[int]: - """ - Finds number of feature maps for the bottleneck layers. - """ - reduced_features = [] - for n in n_features: - if n % bottleneck_reduction != 0: - raise ValueError( - "All elements of n_features must be divisible by bottleneck_reduction. " - f"Got {n} in n_features and bottleneck_reduction={bottleneck_reduction}" - ) - reduced_features.append(n // bottleneck_reduction) - - return reduced_features - - @classmethod - def _check_args_consistency( - cls, n_res_blocks: Sequence[int], n_features: Sequence[int] - ) -> None: - """ - Checks consistency between `n_res_blocks` and `n_features`. - """ - if not isinstance(n_res_blocks, Sequence): - raise ValueError(f"n_res_blocks must be a sequence, got {n_res_blocks}") - if not isinstance(n_features, Sequence): - raise ValueError(f"n_features must be a sequence, got {n_features}") - if len(n_features) != len(n_res_blocks): - raise ValueError( - f"n_features and n_res_blocks must have the same length, got n_features={n_features} " - f"and n_res_blocks={n_res_blocks}" - ) - - -class ResNet(GeneralResNet): - """ - ResNet based on the [Deep Residual Learning for Image Recognition](https://arxiv.org/pdf/1512.03385) paper. - Adapted from [MONAI's implementation](https://docs.monai.io/en/stable/networks.html#resnet). - - The user can customize the number of residual blocks, the number of downsampling blocks, the number of channels - in each block, as well as other parameters like the type of residual block used. - - ResNet is a fully convolutional network that can work with input of any size, provided that is it large - enough not to be reduced to a 1-pixel image (before the adaptative average pooling). - - Parameters - ---------- - spatial_dims : int - number of spatial dimensions of the input image. - in_channels : int - number of channels in the input image. - num_outputs : Optional[int] - number of output variables after the last linear layer.\n - If None, the features before the last fully connected layer (including average pooling) will be returned. - block_type : Union[str, ResNetBlockType] (optional, default=ResNetBlockType.BASIC) - type of residual block. Either `basic` or `bottleneck`. Default to `basic`, as in `ResNet-18`. - n_res_blocks : Sequence[int] (optional, default=(2, 2, 2, 2)) - number of residual block in each ResNet layer. A ResNet layer refers here to the set of residual blocks - between two downsamplings. The length of `n_res_blocks` thus determines the number of ResNet layers. - Default to `(2, 2, 2, 2)`, as in `ResNet-18`. - n_features : Sequence[int] (optional, default=(64, 128, 256, 512)) - number of output feature maps for each ResNet layer. The length of `n_features` must be equal to the length - of `n_res_blocks`. Default to `(64, 128, 256, 512)`, as in `ResNet-18`. - init_conv_size : Union[Sequence[int], int] (optional, default=7) - kernel_size for the first convolution. - If tuple, it will be understood as the values for each dimension. - Default to 7, as in the original paper. - init_conv_stride : Union[Sequence[int], int] (optional, default=2) - stride for the first convolution. - If tuple, it will be understood as the values for each dimension. - Default to 2, as in the original paper. - bottleneck_reduction : int (optional, default=4) - if `block_type='bottleneck'`, `bottleneck_reduction` determines the reduction factor for the number - of feature maps in bottleneck layers (1x1 convolutions). Default to 4, as in the original paper. - act : ActivationParameters (optional, default=("relu", {"inplace": True})) - the activation function used in the convolutional part, and optionally its arguments. - Should be passed as `activation_name` or `(activation_name, arguments)`. - `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, - `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] - (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional - arguments for each of them.\n - Default is "relu", as in the original paper. - output_act : Optional[ActivationParameters] (optional, default=None) - if `num_outputs` is not None, a potential activation layer applied to the outputs of the network. - Should be pass in the same way as `act`. - If None, no last activation will be applied. - - Examples - -------- - >>> ResNet( - spatial_dims=2, - in_channels=1, - num_outputs=2, - block_type="bottleneck", - bottleneck_reduction=4, - n_features=(8, 16), - n_res_blocks=(2, 2), - output_act="softmax", - init_conv_size=5, - ) - ResNet( - (conv0): Conv2d(1, 2, kernel_size=(5, 5), stride=(2, 2), padding=(2, 2), bias=False) - (norm0): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act0): ReLU(inplace=True) - (pool0): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) - (layer1): Sequential( - (0): ResNetBottleneck( - (conv1): Conv2d(2, 2, kernel_size=(1, 1), stride=(1, 1), bias=False) - (norm1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act1): ReLU(inplace=True) - (conv2): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) - (norm2): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act2): ReLU(inplace=True) - (conv3): Conv2d(2, 8, kernel_size=(1, 1), stride=(1, 1), bias=False) - (norm3): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (downsample): Sequential( - (0): Conv2d(2, 8, kernel_size=(1, 1), stride=(1, 1), bias=False) - (1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - ) - (act3): ReLU(inplace=True) - ) - (1): ResNetBottleneck( - (conv1): Conv2d(8, 2, kernel_size=(1, 1), stride=(1, 1), bias=False) - (norm1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act1): ReLU(inplace=True) - (conv2): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) - (norm2): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act2): ReLU(inplace=True) - (conv3): Conv2d(2, 8, kernel_size=(1, 1), stride=(1, 1), bias=False) - (norm3): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act3): ReLU(inplace=True) - ) - ) - (layer2): Sequential( - (0): ResNetBottleneck( - (conv1): Conv2d(8, 4, kernel_size=(1, 1), stride=(1, 1), bias=False) - (norm1): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act1): ReLU(inplace=True) - (conv2): Conv2d(4, 4, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) - (norm2): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act2): ReLU(inplace=True) - (conv3): Conv2d(4, 16, kernel_size=(1, 1), stride=(1, 1), bias=False) - (norm3): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (downsample): Sequential( - (0): Conv2d(8, 16, kernel_size=(1, 1), stride=(2, 2), bias=False) - (1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - ) - (act3): ReLU(inplace=True) - ) - (1): ResNetBottleneck( - (conv1): Conv2d(16, 4, kernel_size=(1, 1), stride=(1, 1), bias=False) - (norm1): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act1): ReLU(inplace=True) - (conv2): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) - (norm2): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act2): ReLU(inplace=True) - (conv3): Conv2d(4, 16, kernel_size=(1, 1), stride=(1, 1), bias=False) - (norm3): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act3): ReLU(inplace=True) - ) - ) - (fc): Sequential( - (pool): AdaptiveAvgPool2d(output_size=(1, 1)) - (flatten): Flatten(start_dim=1, end_dim=-1) - (out): Linear(in_features=16, out_features=2, bias=True) - (output_act): Softmax(dim=None) - ) - ) - """ - - def __init__( - self, - spatial_dims: int, - in_channels: int, - num_outputs: Optional[int], - block_type: Union[str, ResNetBlockType] = ResNetBlockType.BASIC, - n_res_blocks: Sequence[int] = (2, 2, 2, 2), - n_features: Sequence[int] = (64, 128, 256, 512), - init_conv_size: Union[Sequence[int], int] = 7, - init_conv_stride: Union[Sequence[int], int] = 2, - bottleneck_reduction: int = 4, - act: ActivationParameters = ("relu", {"inplace": True}), - output_act: Optional[ActivationParameters] = None, - ) -> None: - super().__init__( - spatial_dims=spatial_dims, - in_channels=in_channels, - num_outputs=num_outputs, - block_type=block_type, - n_res_blocks=n_res_blocks, - n_features=n_features, - init_conv_size=init_conv_size, - init_conv_stride=init_conv_stride, - bottleneck_reduction=bottleneck_reduction, - se_reduction=None, - act=act, - output_act=output_act, - ) - - -class SOTAResNet(str, Enum): - """Supported ResNet networks.""" - - RESNET_18 = "ResNet-18" - RESNET_34 = "ResNet-34" - RESNET_50 = "ResNet-50" - RESNET_101 = "ResNet-101" - RESNET_152 = "ResNet-152" - - -def get_resnet( - name: Union[str, SOTAResNet], - num_outputs: Optional[int], - output_act: ActivationParameters = None, - pretrained: bool = False, -) -> ResNet: - """ - To get a ResNet implemented in the [Deep Residual Learning for Image Recognition](https://arxiv.org/pdf/1512.03385) - paper. - - Only the last fully connected layer will be changed to match `num_outputs`. - - The user can also use the pretrained models from `torchvision`. Note that the last fully connected layer will not - used pretrained weights, as it is task specific. - - .. warning:: `ResNet-18`, `ResNet-34`, `ResNet-50`, `ResNet-101` and `ResNet-152` only works with 2D images with 3 - channels. - - Parameters - ---------- - model : Union[str, SOTAResNet] - The name of the ResNet. Available networks are `ResNet-18`, `ResNet-34`, `ResNet-50`, `ResNet-101` and `ResNet-152`. - num_outputs : Optional[int] - number of output variables after the last linear layer.\n - If None, the features before the last fully connected layer will be returned. - output_act : ActivationParameters (optional, default=None) - if `num_outputs` is not None, a potential activation layer applied to the outputs of the network, - and optionally its arguments. - Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n - `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, - `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] - (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional - arguments for each of them. - pretrained : bool (optional, default=False) - whether to use pretrained weights. The pretrained weights used are the default ones from [torchvision](https:// - pytorch.org/vision/main/models/resnet.html). - - Returns - ------- - ResNet - The network, with potentially pretrained weights. - """ - name = SOTAResNet(name) - if name == SOTAResNet.RESNET_18: - block_type = ResNetBlockType.BASIC - n_res_blocks = (2, 2, 2, 2) - n_features = (64, 128, 256, 512) - model_url = ResNet18_Weights.DEFAULT.url - elif name == SOTAResNet.RESNET_34: - block_type = ResNetBlockType.BASIC - n_res_blocks = (3, 4, 6, 3) - n_features = (64, 128, 256, 512) - model_url = ResNet34_Weights.DEFAULT.url - elif name == SOTAResNet.RESNET_50: - block_type = ResNetBlockType.BOTTLENECK - n_res_blocks = (3, 4, 6, 3) - n_features = (256, 512, 1024, 2048) - model_url = ResNet50_Weights.DEFAULT.url - elif name == SOTAResNet.RESNET_101: - block_type = ResNetBlockType.BOTTLENECK - n_res_blocks = (3, 4, 23, 3) - n_features = (256, 512, 1024, 2048) - model_url = ResNet101_Weights.DEFAULT.url - elif name == SOTAResNet.RESNET_152: - block_type = ResNetBlockType.BOTTLENECK - n_res_blocks = (3, 8, 36, 3) - n_features = (256, 512, 1024, 2048) - model_url = ResNet152_Weights.DEFAULT.url - - # pylint: disable=possibly-used-before-assignment - resnet = ResNet( - spatial_dims=2, - in_channels=3, - num_outputs=num_outputs, - n_res_blocks=n_res_blocks, - block_type=block_type, - n_features=n_features, - output_act=output_act, - ) - if pretrained: - fc_layers = deepcopy(resnet.fc) - resnet.fc = None - pretrained_dict = load_state_dict_from_url(model_url, progress=True) - resnet.load_state_dict(_state_dict_adapter(pretrained_dict)) - resnet.fc = fc_layers - - return resnet - - -def _state_dict_adapter(state_dict: Mapping[str, Any]) -> Mapping[str, Any]: - """ - A mapping between torchvision's layer names and ours. - """ - state_dict = {k: v for k, v in state_dict.items() if "fc" not in k} - - mappings = [ - (r"(?>> SEResNet( - spatial_dims=2, - in_channels=1, - num_outputs=2, - block_type="basic", - se_reduction=2, - n_features=(8,), - n_res_blocks=(2,), - output_act="softmax", - init_conv_size=5, - ) - SEResNet( - (conv0): Conv2d(1, 8, kernel_size=(5, 5), stride=(2, 2), padding=(2, 2), bias=False) - (norm0): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act0): ReLU(inplace=True) - (pool0): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) - (layer1): Sequential( - (0): SEResNetBlock( - (conv1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) - (norm1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act1): ReLU(inplace=True) - (conv2): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) - (norm2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (se_layer): ChannelSELayer( - (avg_pool): AdaptiveAvgPool2d(output_size=1) - (fc): Sequential( - (0): Linear(in_features=8, out_features=4, bias=True) - (1): ReLU(inplace=True) - (2): Linear(in_features=4, out_features=8, bias=True) - (3): Sigmoid() - ) - ) - (act2): ReLU(inplace=True) - ) - (1): SEResNetBlock( - (conv1): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) - (norm1): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (act1): ReLU(inplace=True) - (conv2): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) - (norm2): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (se_layer): ChannelSELayer( - (avg_pool): AdaptiveAvgPool2d(output_size=1) - (fc): Sequential( - (0): Linear(in_features=8, out_features=4, bias=True) - (1): ReLU(inplace=True) - (2): Linear(in_features=4, out_features=8, bias=True) - (3): Sigmoid() - ) - ) - (act2): ReLU(inplace=True) - ) - ) - (fc): Sequential( - (pool): AdaptiveAvgPool2d(output_size=(1, 1)) - (flatten): Flatten(start_dim=1, end_dim=-1) - (out): Linear(in_features=8, out_features=2, bias=True) - (output_act): Softmax(dim=None) - ) - ) - """ - - def __init__( - self, - spatial_dims: int, - in_channels: int, - num_outputs: Optional[int], - se_reduction: int = 16, - **kwargs: Any, - ) -> None: - # get defaults from resnet - _, default_resnet_args = get_args_and_defaults(ResNet.__init__) - for arg, value in default_resnet_args.items(): - if arg not in kwargs: - kwargs[arg] = value - - self._check_se_channels(kwargs["n_features"], se_reduction) - - super().__init__( - spatial_dims=spatial_dims, - in_channels=in_channels, - num_outputs=num_outputs, - se_reduction=se_reduction, - **kwargs, - ) - - @classmethod - def _check_se_channels(cls, n_features: Sequence[int], se_reduction: int) -> None: - """ - Checks that the output of residual blocks always have a number of channels greater - than squeeze-excitation bottleneck reduction factor. - """ - if not isinstance(n_features, Sequence): - raise ValueError(f"n_features must be a sequence. Got {n_features}") - for n in n_features: - if n < se_reduction: - raise ValueError( - f"elements of n_features must be greater or equal to se_reduction. Got {n} in n_features " - f"and se_reduction={se_reduction}" - ) - - -class SOTAResNet(str, Enum): - """Supported SEResNet networks.""" - - SE_RESNET_50 = "SEResNet-50" - SE_RESNET_101 = "SEResNet-101" - SE_RESNET_152 = "SEResNet-152" - - -def get_seresnet( - name: Union[str, SOTAResNet], - num_outputs: Optional[int], - output_act: ActivationParameters = None, - pretrained: bool = False, -) -> SEResNet: - """ - To get a Squeeze-and-Excitation ResNet implemented in the [Squeeze-and-Excitation Networks](https://arxiv.org/pdf/ - 1709.01507) paper. - - Only the last fully connected layer will be changed to match `num_outputs`. - - .. warning:: `SEResNet-50`, `SEResNet-101` and `SEResNet-152` only works with 2D images with 3 channels. - - Note: pretrained weights are not yet available for these networks. - - Parameters - ---------- - model : Union[str, SOTAResNet] - the name of the SEResNet. Available networks are `SEResNet-50`, `SEResNet-101` and `SEResNet-152`. - num_outputs : Optional[int] - number of output variables after the last linear layer.\n - If None, the features before the last fully connected layer will be returned. - output_act : ActivationParameters (optional, default=None) - if `num_outputs` is not None, a potential activation layer applied to the outputs of the network, - and optionally its arguments. - Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n - `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, - `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] - (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional - arguments for each of them. - pretrained : bool (optional, default=False) - pretrained networks are not yet available for SE-ResNets. Leave this argument to False. - - Returns - ------- - SEResNet - the network. - """ - if pretrained is not False: - raise ValueError( - "Pretrained networks are not yet available for SE-ResNets. Please leave " - "'pretrained' to False." - ) - - name = SOTAResNet(name) - if name == SOTAResNet.SE_RESNET_50: - block_type = ResNetBlockType.BOTTLENECK - n_res_blocks = (3, 4, 6, 3) - n_features = (256, 512, 1024, 2048) - elif name == SOTAResNet.SE_RESNET_101: - block_type = ResNetBlockType.BOTTLENECK - n_res_blocks = (3, 4, 23, 3) - n_features = (256, 512, 1024, 2048) - elif name == SOTAResNet.SE_RESNET_152: - block_type = ResNetBlockType.BOTTLENECK - n_res_blocks = (3, 8, 36, 3) - n_features = (256, 512, 1024, 2048) - - # pylint: disable=possibly-used-before-assignment - resnet = SEResNet( - spatial_dims=2, - in_channels=3, - num_outputs=num_outputs, - n_res_blocks=n_res_blocks, - block_type=block_type, - n_features=n_features, - output_act=output_act, - ) - - return resnet diff --git a/clinicadl/monai_networks/nn/unet.py b/clinicadl/monai_networks/nn/unet.py deleted file mode 100644 index dd1e59141..000000000 --- a/clinicadl/monai_networks/nn/unet.py +++ /dev/null @@ -1,250 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Optional, Sequence - -import torch -import torch.nn as nn -from monai.networks.blocks.convolutions import Convolution -from monai.networks.layers.utils import get_act_layer - -from .layers.unet import ConvBlock, DownBlock, UpSample -from .layers.utils import ActFunction, ActivationParameters - - -class BaseUNet(nn.Module, ABC): - """Base class for UNet and AttentionUNet.""" - - def __init__( - self, - spatial_dims: int, - in_channels: int, - out_channels: int, - channels: Sequence[int] = (64, 128, 256, 512, 1024), - act: ActivationParameters = ActFunction.RELU, - output_act: Optional[ActivationParameters] = None, - dropout: Optional[float] = None, - ): - super().__init__() - if not isinstance(channels, Sequence) or len(channels) < 2: - raise ValueError( - f"channels should be a sequence, whose length is no less than 2. Got {channels}" - ) - self.spatial_dims = spatial_dims - self.in_channels = in_channels - self.out_channels = out_channels - self.channels = channels - self.act = act - self.dropout = dropout - - self.doubleconv = ConvBlock( - spatial_dims=spatial_dims, - in_channels=in_channels, - out_channels=channels[0], - act=act, - dropout=dropout, - ) - self._build_encoder() - self._build_decoder() - self.reduce_channels = Convolution( - spatial_dims=spatial_dims, - in_channels=channels[0], - out_channels=out_channels, - kernel_size=1, - strides=1, - padding=0, - conv_only=True, - ) - self.output_act = get_act_layer(output_act) if output_act else None - - @abstractmethod - def forward(self, x: torch.Tensor) -> torch.Tensor: - pass - - def _build_encoder(self) -> None: - for i in range(1, len(self.channels)): - self.add_module( - f"down{i}", - DownBlock( - spatial_dims=self.spatial_dims, - in_channels=self.channels[i - 1], - out_channels=self.channels[i], - act=self.act, - dropout=self.dropout, - ), - ) - - @abstractmethod - def _build_decoder(self) -> None: - pass - - -class UNet(BaseUNet): - """ - UNet based on [U-Net: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/pdf/1505.04597). - - The user can customize the number of encoding blocks, the number of channels in each block, as well as other parameters - like the activation function. - - .. warning:: UNet works only with images whose dimensions are high enough powers of 2. More precisely, if n is the number - of max pooling operation in your UNet (which is equal to `len(channels)-1`), the image must have :math:`2^{k}` - pixels in each dimension, with :math:`k \\geq n` (e.g. shape (:math:`2^{n}`, :math:`2^{n+3}`) for a 2D image). - - Note: the implementation proposed here is not exactly the one described in the original paper. Padding is added to - convolutions so that the feature maps keep a constant size (except when they are passed to `max pool` or `up-sample` - layers), batch normalization is used, and `up-conv` layers are here made with an [Upsample](https://pytorch.org/docs/ - stable/generated/torch.nn.Upsample.html) layer followed by a 3x3 convolution. - - Parameters - ---------- - spatial_dims : int - number of spatial dimensions of the input image. - in_channels : int - number of channels in the input image. - out_channels : int - number of output channels. - channels : Sequence[int] (optional, default=(64, 128, 256, 512, 1024)) - sequence of integers stating the number of channels in each UNet block. Thus, this parameter also controls - the number of UNet blocks. The length `channels` should be nos less than 2.\n - Default to `(64, 128, 256, 512, 1024)`, as in the original paper. - act : ActivationParameters (optional, default=ActFunction.RELU) - the activation function used in the convolutional part, and optionally its arguments. - Should be passed as `activation_name` or `(activation_name, arguments)`. - `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, - `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] - (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional - arguments for each of them.\n - Default is "relu", as in the original paper. - output_act : Optional[ActivationParameters] (optional, default=None) - a potential activation layer applied to the output of the network. Should be pass in the same way as `act`. - If None, no last activation will be applied. - dropout : Optional[float] (optional, default=None) - dropout ratio. If None, no dropout. - - Examples - -------- - >>> UNet( - spatial_dims=2, - in_channels=1, - out_channels=2, - channels=(4, 8), - act="elu", - output_act=("softmax", {"dim": 1}), - dropout=0.1, - ) - UNet( - (doubleconv): ConvBlock( - (0): Convolution( - (conv): Conv2d(1, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (adn): ADN( - (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - (1): Convolution( - (conv): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (adn): ADN( - (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - ) - (down1): DownBlock( - (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) - (doubleconv): ConvBlock( - (0): Convolution( - (conv): Conv2d(4, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (adn): ADN( - (N): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - (1): Convolution( - (conv): Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (adn): ADN( - (N): BatchNorm2d(8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - ) - ) - (upsample1): UpSample( - (0): Upsample(scale_factor=2.0, mode='nearest') - (1): Convolution( - (conv): Conv2d(8, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (adn): ADN( - (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - ) - (doubleconv1): ConvBlock( - (0): Convolution( - (conv): Conv2d(8, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (adn): ADN( - (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - (1): Convolution( - (conv): Conv2d(4, 4, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) - (adn): ADN( - (N): BatchNorm2d(4, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (D): Dropout(p=0.1, inplace=False) - (A): ELU(alpha=1.0) - ) - ) - ) - (reduce_channels): Convolution( - (conv): Conv2d(4, 2, kernel_size=(1, 1), stride=(1, 1)) - ) - (output_act): Softmax(dim=1) - ) - """ - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x_history = [self.doubleconv(x)] - - for i in range(1, len(self.channels)): - x = self.get_submodule(f"down{i}")(x_history[-1]) - x_history.append(x) - - x_history.pop() # the output of bottelneck is not used as a residual - for i in range(len(self.channels) - 1, 0, -1): - up = self.get_submodule(f"upsample{i}")(x) - merged = torch.cat((x_history.pop(), up), dim=1) - x = self.get_submodule(f"doubleconv{i}")(merged) - - out = self.reduce_channels(x) - - if self.output_act is not None: - out = self.output_act(out) - - return out - - def _build_decoder(self): - for i in range(len(self.channels) - 1, 0, -1): - self.add_module( - f"upsample{i}", - UpSample( - spatial_dims=self.spatial_dims, - in_channels=self.channels[i], - out_channels=self.channels[i - 1], - act=self.act, - dropout=self.dropout, - ), - ) - self.add_module( - f"doubleconv{i}", - ConvBlock( - spatial_dims=self.spatial_dims, - in_channels=self.channels[i - 1] * 2, - out_channels=self.channels[i - 1], - act=self.act, - dropout=self.dropout, - ), - ) diff --git a/clinicadl/monai_networks/nn/utils/__init__.py b/clinicadl/monai_networks/nn/utils/__init__.py deleted file mode 100644 index ce603f205..000000000 --- a/clinicadl/monai_networks/nn/utils/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -from .checks import ( - check_adn_ordering, - check_conv_args, - check_mlp_args, - check_norm_layer, - check_pool_indices, - ensure_list_of_tuples, -) -from .shapes import ( - calculate_conv_out_shape, - calculate_convtranspose_out_shape, - calculate_pool_out_shape, - calculate_unpool_out_shape, -) diff --git a/clinicadl/monai_networks/nn/utils/checks.py b/clinicadl/monai_networks/nn/utils/checks.py deleted file mode 100644 index 1917a2894..000000000 --- a/clinicadl/monai_networks/nn/utils/checks.py +++ /dev/null @@ -1,167 +0,0 @@ -from typing import Any, Dict, List, Optional, Sequence, Tuple, Union - -from ..layers.utils import ( - ConvParameters, - NormalizationParameters, - NormLayer, - PoolingLayer, -) - -__all__ = [ - "ensure_list_of_tuples", - "check_norm_layer", - "check_conv_args", - "check_mlp_args", - "check_pool_indices", -] - - -def ensure_list_of_tuples( - parameter: ConvParameters, dim: int, n_layers: int, name: str -) -> List[Tuple[int, ...]]: - """ - Checks spatial parameters (e.g. kernel_size) and returns a list of tuples. - Each element of the list corresponds to the parameters of one layer, and - each element of the tuple corresponds to the parameters for one dimension. - """ - parameter = _check_conv_parameter(parameter, dim, n_layers, name) - if isinstance(parameter, tuple): - return [parameter] * n_layers - else: - return parameter - - -def check_norm_layer( - norm: Optional[NormalizationParameters], -) -> Optional[NormalizationParameters]: - """ - Checks that the argument for normalization layers has the right format (i.e. - `norm_type` or (`norm_type`, `norm_layer_parameters`)) and checks potential - mandatory arguments in `norm_layer_parameters`. - """ - if norm is None: - return norm - - if not isinstance(norm, str) and not isinstance(norm, PoolingLayer): - if ( - not isinstance(norm, tuple) - or len(norm) != 2 - or not isinstance(norm[1], dict) - ): - raise ValueError( - "norm must be either the name of the normalization layer or a double with first the name and then the " - f"arguments of the layer in a dict. Got {norm}" - ) - norm_mode = NormLayer(norm[0]) - args = norm[1] - else: - norm_mode = NormLayer(norm) - args = {} - if norm_mode == NormLayer.GROUP and "num_groups" not in args: - raise ValueError( - f"num_groups is a mandatory argument for GroupNorm and must be passed in `norm`. Got `norm`={norm}" - ) - - return norm - - -def check_adn_ordering(adn: str) -> str: - """ - Checks ADN sequence. - """ - if not isinstance(adn, str): - raise ValueError(f"adn_ordering must be a string. Got {adn}") - - for letter in adn: - if letter not in { - "A", - "D", - "N", - }: - raise ValueError( - f"adn_ordering must be composed by 'A', 'D' or/and 'N'. Got {letter}" - ) - if len(adn) != len(set(adn)): - raise ValueError(f"adn_ordering cannot contain duplicated letter. Got {adn}") - - return adn - - -def check_conv_args(conv_args: Dict[str, Any]) -> None: - """ - Checks that `conv_args` is a dict with at least the mandatory argument `channels`. - """ - if not isinstance(conv_args, dict): - raise ValueError( - f"conv_args must be a dict with the arguments for the convolutional part. Got: {conv_args}" - ) - if "channels" not in conv_args: - raise ValueError( - "channels is a mandatory argument for the convolutional part and must therefore be " - f"passed in conv_args. Got conv_args={conv_args}" - ) - - -def check_mlp_args(mlp_args: Optional[Dict[str, Any]]) -> None: - """ - Checks that `mlp_args` is a dict with at least the mandatory argument `hidden_channels`. - """ - if mlp_args is not None: - if not isinstance(mlp_args, dict): - raise ValueError( - f"mlp_args must be a dict with the arguments for the MLP part. Got: {mlp_args}" - ) - if "hidden_channels" not in mlp_args: - raise ValueError( - "hidden_channels is a mandatory argument for the MLP part and must therefore be " - f"passed in mlp_args. Got mlp_args={mlp_args}" - ) - - -def check_pool_indices( - pooling_indices: Optional[Sequence[int]], n_layers: int -) -> Sequence[int]: - """ - Checks that the (un)pooling indices are consistent with the number of layers. - """ - if pooling_indices is not None: - for idx in pooling_indices: - if idx > n_layers - 1: - raise ValueError( - f"indices in (un)pooling_indices must be smaller than len(channels)-1, got (un)pooling_indices={pooling_indices} and len(channels)={n_layers}" - ) - elif idx < -1: - raise ValueError( - f"indices in (un)pooling_indices must be greater or equal to -1, got (un)pooling_indices={pooling_indices}" - ) - return sorted(pooling_indices) - else: - return [] - - -def _check_conv_parameter( - parameter: ConvParameters, dim: int, n_layers: int, name: str -) -> Union[Tuple[int, ...], List[Tuple[int, ...]]]: - """ - Checks spatial parameters (e.g. kernel_size). - """ - if isinstance(parameter, int): - return (parameter,) * dim - elif isinstance(parameter, tuple): - if len(parameter) != dim: - raise ValueError( - f"If a tuple is passed for {name}, its dimension must be {dim}. Got {parameter}" - ) - return parameter - elif isinstance(parameter, list): - if len(parameter) != n_layers: - raise ValueError( - f"If a list is passed, {name} must contain as many elements as there are layers. " - f"There are {n_layers} layers, but got {parameter}" - ) - checked_params = [] - for param in parameter: - checked_params.append(_check_conv_parameter(param, dim, n_layers, name)) - return checked_params - else: - raise ValueError(f"{name} must be an int, a tuple or a list. Got {name}") diff --git a/clinicadl/monai_networks/nn/utils/shapes.py b/clinicadl/monai_networks/nn/utils/shapes.py deleted file mode 100644 index a649af076..000000000 --- a/clinicadl/monai_networks/nn/utils/shapes.py +++ /dev/null @@ -1,203 +0,0 @@ -from math import ceil -from typing import Optional, Sequence, Tuple, Union - -import numpy as np - -from ..layers.utils import PoolingLayer, UnpoolingLayer - -__all__ = [ - "calculate_conv_out_shape", - "calculate_convtranspose_out_shape", - "calculate_pool_out_shape", - "calculate_unpool_out_shape", -] - - -def calculate_conv_out_shape( - in_shape: Union[Sequence[int], int], - kernel_size: Union[Sequence[int], int], - stride: Union[Sequence[int], int] = 1, - padding: Union[Sequence[int], int] = 0, - dilation: Union[Sequence[int], int] = 1, - **kwargs, # for uniformization -) -> Tuple[int, ...]: - """ - Calculates the output shape of a convolution layer. All arguments can be scalars or multiple - values. Always return a tuple. - """ - in_shape_np = np.atleast_1d(in_shape) - kernel_size_np = np.atleast_1d(kernel_size) - stride_np = np.atleast_1d(stride) - padding_np = np.atleast_1d(padding) - dilation_np = np.atleast_1d(dilation) - - out_shape_np = ( - (in_shape_np + 2 * padding_np - dilation_np * (kernel_size_np - 1) - 1) - / stride_np - ) + 1 - - return tuple(int(s) for s in out_shape_np) - - -def calculate_convtranspose_out_shape( - in_shape: Union[Sequence[int], int], - kernel_size: Union[Sequence[int], int], - stride: Union[Sequence[int], int] = 1, - padding: Union[Sequence[int], int] = 0, - output_padding: Union[Sequence[int], int] = 0, - dilation: Union[Sequence[int], int] = 1, - **kwargs, # for uniformization -) -> Tuple[int, ...]: - """ - Calculates the output shape of a transposed convolution layer. All arguments can be scalars or - multiple values. Always return a tuple. - """ - in_shape_np = np.atleast_1d(in_shape) - kernel_size_np = np.atleast_1d(kernel_size) - stride_np = np.atleast_1d(stride) - padding_np = np.atleast_1d(padding) - dilation_np = np.atleast_1d(dilation) - output_padding_np = np.atleast_1d(output_padding) - - out_shape_np = ( - (in_shape_np - 1) * stride_np - - 2 * padding_np - + dilation_np * (kernel_size_np - 1) - + output_padding_np - + 1 - ) - - return tuple(int(s) for s in out_shape_np) - - -def calculate_pool_out_shape( - pool_mode: Union[str, PoolingLayer], - in_shape: Union[Sequence[int], int], - **kwargs, -) -> Tuple[int, ...]: - """ - Calculates the output shape of a pooling layer. The first argument is the type of pooling - performed (`max` or `avg`). All other arguments can be scalars or multiple values, except - `ceil_mode`. - Always return a tuple. - """ - pool_mode = PoolingLayer(pool_mode) - if pool_mode == PoolingLayer.MAX: - return _calculate_maxpool_out_shape(in_shape, **kwargs) - elif pool_mode == PoolingLayer.AVG: - return _calculate_avgpool_out_shape(in_shape, **kwargs) - elif pool_mode == PoolingLayer.ADAPT_MAX or pool_mode == PoolingLayer.ADAPT_AVG: - return _calculate_adaptivepool_out_shape(in_shape, **kwargs) - - -def calculate_unpool_out_shape( - unpool_mode: Union[str, UnpoolingLayer], - in_shape: Union[Sequence[int], int], - **kwargs, -) -> Tuple[int, ...]: - """ - Calculates the output shape of an unpooling layer. The first argument is the type of unpooling - performed (`upsample` or `convtranspose`). - Always return a tuple. - """ - unpool_mode = UnpoolingLayer(unpool_mode) - if unpool_mode == UnpoolingLayer.UPSAMPLE: - return _calculate_upsample_out_shape(in_shape, **kwargs) - elif unpool_mode == UnpoolingLayer.CONV_TRANS: - return calculate_convtranspose_out_shape(in_shape, **kwargs) - - -def _calculate_maxpool_out_shape( - in_shape: Union[Sequence[int], int], - kernel_size: Union[Sequence[int], int], - stride: Optional[Union[Sequence[int], int]] = None, - padding: Union[Sequence[int], int] = 0, - dilation: Union[Sequence[int], int] = 1, - ceil_mode: bool = False, - **kwargs, # for uniformization -) -> Tuple[int, ...]: - """ - Calculates the output shape of a MaxPool layer. - """ - if stride is None: - stride = kernel_size - - in_shape_np = np.atleast_1d(in_shape) - kernel_size_np = np.atleast_1d(kernel_size) - stride_np = np.atleast_1d(stride) - padding_np = np.atleast_1d(padding) - dilation_np = np.atleast_1d(dilation) - - out_shape_np = ( - (in_shape_np + 2 * padding_np - dilation_np * (kernel_size_np - 1) - 1) - / stride_np - ) + 1 - if ceil_mode: - out_shape = tuple(ceil(s) for s in out_shape_np) - else: - out_shape = tuple(int(s) for s in out_shape_np) - - return out_shape - - -def _calculate_avgpool_out_shape( - in_shape: Union[Sequence[int], int], - kernel_size: Union[Sequence[int], int], - stride: Optional[Union[Sequence[int], int]] = None, - padding: Union[Sequence[int], int] = 0, - ceil_mode: bool = False, - **kwargs, # for uniformization -) -> Tuple[int, ...]: - """ - Calculates the output shape of an AvgPool layer. - """ - if stride is None: - stride = kernel_size - - in_shape_np = np.atleast_1d(in_shape) - kernel_size_np = np.atleast_1d(kernel_size) - stride_np = np.atleast_1d(stride) - padding_np = np.atleast_1d(padding) - - out_shape_np = ((in_shape_np + 2 * padding_np - kernel_size_np) / stride_np) + 1 - if ceil_mode: - out_shape_np = np.ceil(out_shape_np) - out_shape_np[(out_shape_np - 1) * stride_np >= in_shape_np + padding_np] -= 1 - - return tuple(int(s) for s in out_shape_np) - - -def _calculate_adaptivepool_out_shape( - in_shape: Union[Sequence[int], int], - output_size: Union[Sequence[int], int], - **kwargs, # for uniformization -) -> Tuple[int, ...]: - """ - Calculates the output shape of an AdaptiveMaxPool or AdaptiveAvgPool layer. - """ - in_shape_np = np.atleast_1d(in_shape) - out_shape_np = np.ones_like(in_shape_np) * np.atleast_1d(output_size) - - return tuple(int(s) for s in out_shape_np) - - -def _calculate_upsample_out_shape( - in_shape: Union[Sequence[int], int], - scale_factor: Optional[Union[Sequence[int], int]] = None, - size: Optional[Union[Sequence[int], int]] = None, - **kwargs, # for uniformization -) -> Tuple[int, ...]: - """ - Calculates the output shape of an Upsample layer. - """ - in_shape_np = np.atleast_1d(in_shape) - if size and scale_factor: - raise ValueError("Pass either size or scale_factor, not both.") - elif size: - out_shape_np = np.ones_like(in_shape_np) * np.atleast_1d(size) - elif scale_factor: - out_shape_np = in_shape_np * scale_factor - else: - raise ValueError("Pass one of size or scale_factor.") - - return tuple(int(s) for s in out_shape_np) diff --git a/clinicadl/monai_networks/nn/vae.py b/clinicadl/monai_networks/nn/vae.py deleted file mode 100644 index 9dac6b43b..000000000 --- a/clinicadl/monai_networks/nn/vae.py +++ /dev/null @@ -1,200 +0,0 @@ -from copy import deepcopy -from typing import Any, Dict, Optional, Sequence, Tuple, Union - -import torch -import torch.nn as nn - -from .autoencoder import AutoEncoder -from .layers.utils import ActivationParameters, UnpoolingMode - - -class VAE(nn.Module): - """ - A Variational AutoEncoder with convolutional and fully connected layers. - - The user must pass the arguments to build an encoder, from its convolutional and - fully connected parts, and the decoder will be automatically built by taking the - symmetrical network. - - More precisely, to build the decoder, the order of the encoding layers is reverted, convolutions are - replaced by transposed convolutions and pooling layers are replaced by either upsampling or transposed - convolution layers. - Please note that the order of `Activation`, `Dropout` and `Normalization`, defined with the - argument `adn_ordering` in `conv_args`, is the same for the encoder and the decoder. - - Note that an `AutoEncoder` is an aggregation of a `CNN` (:py:class:`clinicadl.monai_networks.nn. - cnn.CNN`), whose last linear layer is duplicated to infer both the mean and the log variance, - and a `Generator` (:py:class:`clinicadl.monai_networks.nn.generator.Generator`). - - Parameters - ---------- - in_shape : Sequence[int] - sequence of integers stating the dimension of the input tensor (minus batch dimension). - latent_size : int - size of the latent vector. - conv_args : Dict[str, Any] - the arguments for the convolutional part of the encoder. The arguments are those accepted - by :py:class:`clinicadl.monai_networks.nn.conv_encoder.ConvEncoder`, except `in_shape` that - is specified here. So, the only mandatory argument is `channels`. - mlp_args : Optional[Dict[str, Any]] (optional, default=None) - the arguments for the MLP part of the encoder . The arguments are those accepted by - :py:class:`clinicadl.monai_networks.nn.mlp.MLP`, except `in_channels` that is inferred - from the output of the convolutional part, and `out_channels` that is set to `latent_size`. - So, the only mandatory argument is `hidden_channels`.\n - If None, the MLP part will be reduced to a single linear layer.\n - The last linear layer will be duplicated to infer both the mean and the log variance. - out_channels : Optional[int] (optional, default=None) - number of output channels. If None, the output will have the same number of channels as the - input. - output_act : Optional[ActivationParameters] (optional, default=None) - a potential activation layer applied to the output of the network, and optionally its arguments. - Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n - `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, - `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] - (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional - arguments for each of them. - unpooling_mode : Union[str, UnpoolingMode] (optional, default=UnpoolingMode.NEAREST) - type of unpooling. Can be either `"nearest"`, `"linear"`, `"bilinear"`, `"bicubic"`, `"trilinear"` or - `"convtranspose"`.\n - - `nearest`: unpooling is performed by upsampling with the :italic:`nearest` algorithm (see [PyTorch's Upsample layer] - (https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html)). - - `linear`: unpooling is performed by upsampling with the :italic:`linear` algorithm. Only works with 1D images (excluding the - channel dimension). - - `bilinear`: unpooling is performed by upsampling with the :italic:`bilinear` algorithm. Only works with 2D images. - - `bicubic`: unpooling is performed by upsampling with the :italic:`bicubic` algorithm. Only works with 2D images. - - `trilinear`: unpooling is performed by upsampling with the :italic:`trilinear` algorithm. Only works with 3D images. - - `convtranspose`: unpooling is performed with a transposed convolution, whose parameters (kernel size, stride, etc.) are - computed to reverse the pooling operation. - - Examples - -------- - >>> VAE( - in_shape=(1, 16, 16), - latent_size=4, - conv_args={"channels": [2]}, - mlp_args={"hidden_channels": [16], "output_act": "relu"}, - out_channels=2, - output_act="sigmoid", - unpooling_mode="bilinear", - ) - VAE( - (encoder): CNN( - (convolutions): ConvEncoder( - (layer0): Convolution( - (conv): Conv2d(1, 2, kernel_size=(3, 3), stride=(1, 1)) - ) - ) - (mlp): MLP( - (flatten): Flatten(start_dim=1, end_dim=-1) - (hidden0): Sequential( - (linear): Linear(in_features=392, out_features=16, bias=True) - (adn): ADN( - (N): BatchNorm1d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (A): PReLU(num_parameters=1) - ) - ) - (output): Identity() - ) - ) - (mu): Sequential( - (linear): Linear(in_features=16, out_features=4, bias=True) - (output_act): ReLU() - ) - (log_var): Sequential( - (linear): Linear(in_features=16, out_features=4, bias=True) - (output_act): ReLU() - ) - (decoder): Generator( - (mlp): MLP( - (flatten): Flatten(start_dim=1, end_dim=-1) - (hidden0): Sequential( - (linear): Linear(in_features=4, out_features=16, bias=True) - (adn): ADN( - (N): BatchNorm1d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) - (A): PReLU(num_parameters=1) - ) - ) - (output): Sequential( - (linear): Linear(in_features=16, out_features=392, bias=True) - (output_act): ReLU() - ) - ) - (reshape): Reshape() - (convolutions): ConvDecoder( - (layer0): Convolution( - (conv): ConvTranspose2d(2, 2, kernel_size=(3, 3), stride=(1, 1)) - ) - (output_act): Sigmoid() - ) - ) - ) - """ - - def __init__( - self, - in_shape: Sequence[int], - latent_size: int, - conv_args: Dict[str, Any], - mlp_args: Optional[Dict[str, Any]] = None, - out_channels: Optional[int] = None, - output_act: Optional[ActivationParameters] = None, - unpooling_mode: Union[str, UnpoolingMode] = UnpoolingMode.NEAREST, - ) -> None: - super().__init__() - ae = AutoEncoder( - in_shape, - latent_size, - conv_args, - mlp_args, - out_channels, - output_act, - unpooling_mode, - ) - - # replace last mlp layer by two parallel layers - mu_layers = deepcopy(ae.encoder.mlp.output) - log_var_layers = deepcopy(ae.encoder.mlp.output) - self._reset_weights( - log_var_layers - ) # to have different initialization for the two layers - ae.encoder.mlp.output = nn.Identity() - - self.encoder = ae.encoder - self.mu = mu_layers - self.log_var = log_var_layers - self.decoder = ae.decoder - - def forward( - self, x: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Encoding, sampling and decoding. - """ - feature = self.encoder(x) - mu = self.mu(feature) - log_var = self.log_var(feature) - z = self.reparameterize(mu, log_var) - - return self.decoder(z), mu, log_var - - def reparameterize(self, mu: torch.Tensor, log_var: torch.Tensor) -> torch.Tensor: - """ - Samples a random vector from a gaussian distribution, given the mean and log-variance - of this distribution. - """ - std = torch.exp(0.5 * log_var) - - if self.training: # multiply random noise with std only during training - std = torch.randn_like(std).mul(std) - - return std.add_(mu) - - @classmethod - def _reset_weights(cls, layer: Union[nn.Sequential, nn.Linear]) -> None: - """ - Resets the output layer(s) of an MLP. - """ - if isinstance(layer, nn.Linear): - layer.reset_parameters() - else: - layer.linear.reset_parameters() diff --git a/clinicadl/monai_networks/nn/vit.py b/clinicadl/monai_networks/nn/vit.py deleted file mode 100644 index 372e1728a..000000000 --- a/clinicadl/monai_networks/nn/vit.py +++ /dev/null @@ -1,420 +0,0 @@ -import math -import re -from collections import OrderedDict -from copy import deepcopy -from enum import Enum -from typing import Any, Mapping, Optional, Sequence, Tuple, Union - -import numpy as np -import torch -import torch.nn as nn -from monai.networks.blocks.pos_embed_utils import build_sincos_position_embedding -from monai.networks.layers import Conv -from monai.networks.layers.utils import get_act_layer -from monai.utils import ensure_tuple_rep -from torch.hub import load_state_dict_from_url -from torchvision.models.vision_transformer import ( - ViT_B_16_Weights, - ViT_B_32_Weights, - ViT_L_16_Weights, - ViT_L_32_Weights, -) - -from .layers.utils import ActFunction, ActivationParameters -from .layers.vit import Encoder - - -class PosEmbedType(str, Enum): - """Available position embedding types for ViT.""" - - LEARN = "learnable" - SINCOS = "sincos" - - -class ViT(nn.Module): - """ - Vision Transformer based on the [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale] - (https://arxiv.org/pdf/2010.11929) paper. - Adapted from [torchvision's implementation](https://pytorch.org/vision/main/models/vision_transformer.html). - - The user can customize the patch size, the embedding dimension, the number of transformer blocks, the number of - attention heads, as well as other parameters like the type of position embedding. - - Parameters - ---------- - in_shape : Sequence[int] - sequence of integers stating the dimension of the input tensor (minus batch dimension). - patch_size : Union[Sequence[int], int] - sequence of integers stating the patch size (minus batch and channel dimensions). If int, the same - patch size will be used for all dimensions. - Patch size must divide image size in all dimensions. - num_outputs : Optional[int] - number of output variables after the last linear layer.\n - If None, the patch embeddings after the last transformer block will be returned. - embedding_dim : int (optional, default=768) - size of the embedding vectors. Must be divisible by `num_heads` as each head will be responsible for - a part of the embedding vectors. Default to 768, as for 'ViT-Base' in the original paper. - num_layers : int (optional, default=12) - number of consecutive transformer blocks. Default to 12, as for 'ViT-Base' in the original paper. - num_heads : int (optional, default=12) - number of heads in the self-attention block. Must divide `embedding_size`. - Default to 12, as for 'ViT-Base' in the original paper. - mlp_dim : int (optional, default=3072) - size of the hidden layer in the MLP part of the transformer block. Default to 3072, as for 'ViT-Base' - in the original paper. - pos_embed_type : Optional[Union[str, PosEmbedType]] (optional, default="learnable") - type of position embedding. Can be either `"learnable"`, `"sincos"` or `None`.\n - - `learnable`: the position embeddings are parameters that will be learned during the training - process. - - `sincos`: the position embeddings are fixed and determined with sinus and cosinus formulas (based on Dosovitskiy et al., - 'Attention Is All You Need, https://arxiv.org/pdf/1706.03762). Only implemented for 2D and 3D images. With `sincos` - position embedding, `embedding_dim` must be divisible by 4 for 2D images and by 6 for 3D images. - - `None`: no position embeddings are used.\n - Default to `"learnable"`, as in the original paper. - output_act : Optional[ActivationParameters] (optional, default=ActFunction.TANH) - if `num_outputs` is not None, a potential activation layer applied to the outputs of the network, - and optionally its arguments. - Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n - `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, - `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] - (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional - arguments for each of them.\n - Default to `"tanh"`, as in the original paper. - dropout : Optional[float] (optional, default=None) - dropout ratio. If None, no dropout. - - Examples - -------- - >>> ViT( - in_shape=(3, 60, 64), - patch_size=4, - num_outputs=2, - embedding_dim=32, - num_layers=2, - num_heads=4, - mlp_dim=128, - output_act="softmax", - ) - ViT( - (conv_proj): Conv2d(3, 32, kernel_size=(4, 4), stride=(4, 4)) - (encoder): Encoder( - (dropout): Dropout(p=0.0, inplace=False) - (layers): ModuleList( - (0-1): 2 x EncoderBlock( - (norm1): LayerNorm((32,), eps=1e-06, elementwise_affine=True) - (self_attention): MultiheadAttention( - (out_proj): NonDynamicallyQuantizableLinear(in_features=32, out_features=32, bias=True) - ) - (dropout): Dropout(p=0.0, inplace=False) - (norm2): LayerNorm((32,), eps=1e-06, elementwise_affine=True) - (mlp): MLPBlock( - (0): Linear(in_features=32, out_features=128, bias=True) - (1): GELU(approximate='none') - (2): Dropout(p=0.0, inplace=False) - (3): Linear(in_features=128, out_features=32, bias=True) - (4): Dropout(p=0.0, inplace=False) - ) - ) - ) - (norm): LayerNorm((32,), eps=1e-06, elementwise_affine=True) - ) - (fc): Sequential( - (out): Linear(in_features=32, out_features=2, bias=True) - (output_act): Softmax(dim=None) - ) - ) - """ - - def __init__( - self, - in_shape: Sequence[int], - patch_size: Union[Sequence[int], int], - num_outputs: Optional[int], - embedding_dim: int = 768, - num_layers: int = 12, - num_heads: int = 12, - mlp_dim: int = 3072, - pos_embed_type: Optional[Union[str, PosEmbedType]] = PosEmbedType.LEARN, - output_act: Optional[ActivationParameters] = ActFunction.TANH, - dropout: Optional[float] = None, - ) -> None: - super().__init__() - - self.in_channels, *self.img_size = in_shape - self.spatial_dims = len(self.img_size) - self.patch_size = ensure_tuple_rep(patch_size, self.spatial_dims) - - self._check_embedding_dim(embedding_dim, num_heads) - self._check_patch_size(self.img_size, self.patch_size) - self.embedding_dim = embedding_dim - self.classification = True if num_outputs else False - dropout = dropout if dropout else 0.0 - - self.conv_proj = Conv[Conv.CONV, self.spatial_dims]( # pylint: disable=not-callable - in_channels=self.in_channels, - out_channels=self.embedding_dim, - kernel_size=self.patch_size, - stride=self.patch_size, - ) - self.seq_length = int( - np.prod(np.array(self.img_size) // np.array(self.patch_size)) - ) - - # Add a class token - if self.classification: - self.class_token = nn.Parameter(torch.zeros(1, 1, self.embedding_dim)) - self.seq_length += 1 - - pos_embedding = self._get_pos_embedding(pos_embed_type) - self.encoder = Encoder( - self.seq_length, - num_layers, - num_heads, - self.embedding_dim, - mlp_dim, - dropout=dropout, - attention_dropout=dropout, - pos_embedding=pos_embedding, - ) - - if self.classification: - self.class_token = nn.Parameter(torch.zeros(1, 1, embedding_dim)) - self.fc = nn.Sequential( - OrderedDict([("out", nn.Linear(embedding_dim, num_outputs))]) - ) - self.fc.output_act = get_act_layer(output_act) if output_act else None - else: - self.fc = None - - self._init_layers() - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.conv_proj(x) - # (n, hidden_dim, n_h, n_w) -> (n, (h * w * d), hidden_dim) - x = x.flatten(2).transpose(-1, -2) - n = x.shape[0] - - # Expand the class token to the full batch - if self.fc: - batch_class_token = self.class_token.expand(n, -1, -1) - x = torch.cat([batch_class_token, x], dim=1) - - x = self.encoder(x) - - # Classifier "token" as used by standard language architectures - if self.fc: - x = x[:, 0] - x = self.fc(x) - - return x - - def _get_pos_embedding( - self, pos_embed_type: Optional[Union[str, PosEmbedType]] - ) -> Optional[nn.Parameter]: - """ - Gets position embeddings. If `pos_embed_type` is "learnable", will return None as it will be handled - by the encoder module. - """ - if pos_embed_type is None: - pos_embed = nn.Parameter( - torch.zeros(1, self.seq_length, self.embedding_dim) - ) - pos_embed.requires_grad = False - return pos_embed - - pos_embed_type = PosEmbedType(pos_embed_type) - - if pos_embed_type == PosEmbedType.LEARN: - return None # will be initialized inside the Encoder - - elif pos_embed_type == PosEmbedType.SINCOS: - if self.spatial_dims != 2 and self.spatial_dims != 3: - raise ValueError( - f"{self.spatial_dims}D sincos position embedding not implemented" - ) - elif self.spatial_dims == 2 and self.embedding_dim % 4: - raise ValueError( - f"embedding_dim must be divisible by 4 for 2D sincos position embedding. Got embedding_dim={self.embedding_dim}" - ) - elif self.spatial_dims == 3 and self.embedding_dim % 6: - raise ValueError( - f"embedding_dim must be divisible by 6 for 3D sincos position embedding. Got embedding_dim={self.embedding_dim}" - ) - grid_size = [] - for in_size, pa_size in zip(self.img_size, self.patch_size): - grid_size.append(in_size // pa_size) - pos_embed = build_sincos_position_embedding( - grid_size, self.embedding_dim, self.spatial_dims - ) - if self.classification: - pos_embed = torch.nn.Parameter( - torch.cat([torch.zeros(1, 1, self.embedding_dim), pos_embed], dim=1) - ) # add 0 for class token pos embedding - pos_embed.requires_grad = False - return pos_embed - - def _init_layers(self): - """ - Initializes some layers, based on torchvision's implementation: https://pytorch.org/vision/main/ - _modules/torchvision/models/vision_transformer.html - """ - fan_in = self.conv_proj.in_channels * np.prod(self.conv_proj.kernel_size) - nn.init.trunc_normal_(self.conv_proj.weight, std=math.sqrt(1 / fan_in)) - nn.init.zeros_(self.conv_proj.bias) - - @classmethod - def _check_embedding_dim(cls, embedding_dim: int, num_heads: int) -> None: - """ - Checks consistency between embedding dimension and number of heads. - """ - if embedding_dim % num_heads != 0: - raise ValueError( - f"embedding_dim should be divisible by num_heads. Got embedding_dim={embedding_dim} " - f" and num_heads={num_heads}" - ) - - @classmethod - def _check_patch_size( - cls, img_size: Tuple[int, ...], patch_size: Tuple[int, ...] - ) -> None: - """ - Checks consistency between image size and patch size. - """ - for i, p in zip(img_size, patch_size): - if i % p != 0: - raise ValueError( - f"img_size should be divisible by patch_size. Got img_size={img_size} " - f" and patch_size={patch_size}" - ) - - -class SOTAViT(str, Enum): - """Supported ViT networks.""" - - B_16 = "ViT-B/16" - B_32 = "ViT-B/32" - L_16 = "ViT-L/16" - L_32 = "ViT-L/32" - - -def get_vit( - name: Union[str, SOTAViT], - num_outputs: Optional[int], - output_act: ActivationParameters = None, - pretrained: bool = False, -) -> ViT: - """ - To get a Vision Transformer implemented in the [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale] - (https://arxiv.org/pdf/2010.11929) paper. - - Only the last fully connected layer will be changed to match `num_outputs`. - - The user can also use the pretrained models from `torchvision`. Note that the last fully connected layer will not - used pretrained weights, as it is task specific. - - .. warning:: `ViT-B/16`, `ViT-B/32`, `ViT-L/16` and `ViT-L/32` work with 2D images of size (224, 224), with 3 channels. - - Parameters - ---------- - model : Union[str, SOTAViT] - The name of the Vision Transformer. Available networks are `ViT-B/16`, `ViT-B/32`, `ViT-L/16` and `ViT-L/32`. - num_outputs : Optional[int] - number of output variables after the last linear layer.\n - If None, the features before the last fully connected layer will be returned. - output_act : ActivationParameters (optional, default=None) - if `num_outputs` is not None, a potential activation layer applied to the outputs of the network, - and optionally its arguments. - Should be passed as `activation_name` or `(activation_name, arguments)`. If None, no activation will be used.\n - `activation_name` can be any value in {`celu`, `elu`, `gelu`, `leakyrelu`, `logsoftmax`, `mish`, `prelu`, - `relu`, `relu6`, `selu`, `sigmoid`, `softmax`, `tanh`}. Please refer to PyTorch's [activationfunctions] - (https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity) to know the optional - arguments for each of them. - pretrained : bool (optional, default=False) - whether to use pretrained weights. The pretrained weights used are the default ones from [torchvision](https:// - pytorch.org/vision/main/models/vision_transformer.html). - - Returns - ------- - ViT - The network, with potentially pretrained weights. - """ - name = SOTAViT(name) - if name == SOTAViT.B_16: - in_shape = (3, 224, 224) - patch_size = 16 - embedding_dim = 768 - mlp_dim = 3072 - num_layers = 12 - num_heads = 12 - model_url = ViT_B_16_Weights.DEFAULT.url - elif name == SOTAViT.B_32: - in_shape = (3, 224, 224) - patch_size = 32 - embedding_dim = 768 - mlp_dim = 3072 - num_layers = 12 - num_heads = 12 - model_url = ViT_B_32_Weights.DEFAULT.url - elif name == SOTAViT.L_16: - in_shape = (3, 224, 224) - patch_size = 16 - embedding_dim = 1024 - mlp_dim = 4096 - num_layers = 24 - num_heads = 16 - model_url = ViT_L_16_Weights.DEFAULT.url - elif name == SOTAViT.L_32: - in_shape = (3, 224, 224) - patch_size = 32 - embedding_dim = 1024 - mlp_dim = 4096 - num_layers = 24 - num_heads = 16 - model_url = ViT_L_32_Weights.DEFAULT.url - - # pylint: disable=possibly-used-before-assignment - vit = ViT( - in_shape=in_shape, - patch_size=patch_size, - num_outputs=num_outputs, - embedding_dim=embedding_dim, - mlp_dim=mlp_dim, - num_heads=num_heads, - num_layers=num_layers, - output_act=output_act, - ) - - if pretrained: - pretrained_dict = load_state_dict_from_url(model_url, progress=True) - if num_outputs is None: - del pretrained_dict["class_token"] - pretrained_dict["encoder.pos_embedding"] = pretrained_dict[ - "encoder.pos_embedding" - ][:, 1:] # remove class token position embedding - fc_layers = deepcopy(vit.fc) - vit.fc = None - vit.load_state_dict(_state_dict_adapter(pretrained_dict)) - vit.fc = fc_layers - - return vit - - -def _state_dict_adapter(state_dict: Mapping[str, Any]) -> Mapping[str, Any]: - """ - A mapping between torchvision's layer names and ours. - """ - state_dict = {k: v for k, v in state_dict.items() if "heads" not in k} - - mappings = [ - ("ln_", "norm"), - ("ln", "norm"), - (r"encoder_layer_(\d+)", r"\1"), - ] - - for key in list(state_dict.keys()): - new_key = key - for transform in mappings: - new_key = re.sub(transform[0], transform[1], new_key) - state_dict[new_key] = state_dict.pop(key) - - return state_dict diff --git a/clinicadl/predictor/old_predictor.py b/clinicadl/predictor/old_predictor.py index 360917e36..e4d727190 100644 --- a/clinicadl/predictor/old_predictor.py +++ b/clinicadl/predictor/old_predictor.py @@ -17,22 +17,14 @@ ) from clinicadl.experiment_manager.maps_manager import MapsManager from clinicadl.interpret.config import InterpretConfig -<<<<<<<< HEAD:clinicadl/predictor/old_predictor.py + from clinicadl.metrics.old_metrics.metric_module import MetricModule from clinicadl.metrics.old_metrics.utils import ( check_selection_metric, find_selection_metrics, ) from clinicadl.networks.old_network.network import Network -======== -from clinicadl.maps_manager.maps_manager import MapsManager -from clinicadl.metrics.metric_module import MetricModule -from clinicadl.metrics.utils import ( - check_selection_metric, - find_selection_metrics, -) -from clinicadl.network.network import Network ->>>>>>>> 1ae72275 (Cb extract validator (#666)):clinicadl/predictor/predictor.py + from clinicadl.predictor.config import PredictConfig from clinicadl.trainer.tasks_utils import ( columns, @@ -58,12 +50,6 @@ class Predictor: def __init__(self, _config: Union[PredictConfig, InterpretConfig]) -> None: self._config = _config - from clinicadl.splitter.config import SplitterConfig -<<<<<<<< HEAD:clinicadl/predictor/old_predictor.py - from clinicadl.splitter.old_splitter import Splitter -======== - from clinicadl.splitter.splitter import Splitter ->>>>>>>> 1ae72275 (Cb extract validator (#666)):clinicadl/predictor/predictor.py self.maps_manager = MapsManager(_config.maps_manager.maps_dir) self._config.adapt_with_maps_manager_info(self.maps_manager) @@ -1074,11 +1060,15 @@ def _compute_output_tensors( Compute the output tensors and saves them in the MAPS. Args: +<<<<<<< HEAD <<<<<<<< HEAD:clinicadl/predictor/old_predictor.py dataset (clinicadl.dataset.caps_dataset.CapsDataset): wrapper of the data set. ======== dataset (clinicadl.caps_dataset.data.CapsDataset): wrapper of the data set. >>>>>>>> 1ae72275 (Cb extract validator (#666)):clinicadl/predictor/predictor.py +======= + dataset (clinicadl.dataset.caps_dataset.CapsDataset): wrapper of the data set. +>>>>>>> 109ee64a (Base for v2 (#676)) data_group (str): name of the data group used for the task. split (int): split number. selection_metrics (list[str]): metrics used for model selection. diff --git a/clinicadl/predictor/predictor.py b/clinicadl/predictor/predictor.py index 1d6e32e90..157d49c8e 100644 --- a/clinicadl/predictor/predictor.py +++ b/clinicadl/predictor/predictor.py @@ -1,4 +1,4 @@ -<<<<<<< HEAD + from clinicadl.dataset.caps_dataset import CapsDataset from clinicadl.experiment_manager.experiment_manager import ExperimentManager @@ -11,1176 +11,3 @@ def __init__(self, manager: ExperimentManager): def predict(self, dataset_test: CapsDataset, split: int): """TO COMPLETE""" pass -======= -import json -import shutil -from logging import getLogger -from pathlib import Path -from typing import Any, Dict, List, Optional, Set, Tuple, Union - -import pandas as pd -import torch -import torch.distributed as dist -from torch.amp import autocast -from torch.nn.modules.loss import _Loss -from torch.utils.data import DataLoader -from torch.utils.data.distributed import DistributedSampler - -from clinicadl.dataset.caps_dataset import ( - return_dataset, -) -from clinicadl.experiment_manager.maps_manager import MapsManager -from clinicadl.interpret.config import InterpretConfig -<<<<<<<< HEAD:clinicadl/predictor/old_predictor.py -from clinicadl.metrics.old_metrics.metric_module import MetricModule -from clinicadl.metrics.old_metrics.utils import ( - check_selection_metric, - find_selection_metrics, -) -from clinicadl.networks.old_network.network import Network -======== -from clinicadl.maps_manager.maps_manager import MapsManager -from clinicadl.metrics.metric_module import MetricModule -from clinicadl.metrics.utils import ( - check_selection_metric, - find_selection_metrics, -) -from clinicadl.network.network import Network ->>>>>>>> 1ae72275 (Cb extract validator (#666)):clinicadl/predictor/predictor.py -from clinicadl.predictor.config import PredictConfig -from clinicadl.trainer.tasks_utils import ( - columns, - compute_metrics, - generate_label_code, - generate_test_row, - get_criterion, -) -from clinicadl.transforms.config import TransformsConfig -from clinicadl.utils.computational.ddp import DDP, cluster -from clinicadl.utils.enum import Task -from clinicadl.utils.exceptions import ( - ClinicaDLArgumentError, - ClinicaDLDataLeakageError, - MAPSError, -) - -logger = getLogger("clinicadl.predict_manager") -level_list: List[str] = ["warning", "info", "debug"] - - -class Predictor: - def __init__(self, _config: Union[PredictConfig, InterpretConfig]) -> None: - self._config = _config - - from clinicadl.splitter.config import SplitterConfig -<<<<<<<< HEAD:clinicadl/predictor/old_predictor.py - from clinicadl.splitter.old_splitter import Splitter -======== - from clinicadl.splitter.splitter import Splitter ->>>>>>>> 1ae72275 (Cb extract validator (#666)):clinicadl/predictor/predictor.py - - self.maps_manager = MapsManager(_config.maps_manager.maps_dir) - self._config.adapt_with_maps_manager_info(self.maps_manager) - tmp = self._config.data.model_dump( - exclude=set(["preprocessing_dict", "mode", "caps_dict"]) - ) - tmp.update(self._config.split.model_dump()) - tmp.update(self._config.validation.model_dump()) - self.splitter = Splitter(SplitterConfig(**tmp)) - - def predict( - self, - label_code: Union[str, dict[str, int]] = "default", - ): - """Performs the prediction task on a subset of caps_directory defined in a TSV file.""" - - group_df = self._config.data.create_groupe_df() - self._check_data_group(group_df) - criterion = get_criterion( - self.maps_manager.network_task, self.maps_manager.loss - ) - - for split in self.splitter.split_iterator(): - logger.info(f"Prediction of split {split}") - group_df, group_parameters = self.get_group_info( - self._config.maps_manager.data_group, split - ) - # Find label code if not given - if self._config.data.is_given_label_code( - self.maps_manager.label, label_code - ): - generate_label_code( - self.maps_manager.network_task, group_df, self._config.data.label - ) - # Erase previous TSV files on master process - if not self._config.validation.selection_metrics: - split_selection_metrics = find_selection_metrics( - self.maps_manager.maps_path, - split, - ) - else: - split_selection_metrics = self._config.validation.selection_metrics - for selection in split_selection_metrics: - tsv_dir = ( - self.maps_manager.maps_path - / f"split-{split}" - / f"best-{selection}" - / self._config.maps_manager.data_group - ) - tsv_pattern = f"{self._config.maps_manager.data_group}*.tsv" - for tsv_file in tsv_dir.glob(tsv_pattern): - tsv_file.unlink() - - self._config.data.check_label(self.maps_manager.label) - if self.maps_manager.multi_network: - for network in range(self.maps_manager.num_networks): - self._predict_single( - group_parameters, - group_df, - self._config.transforms, - label_code, - criterion, - split, - split_selection_metrics, - network, - ) - else: - self._predict_single( - group_parameters, - group_df, - self._config.transforms, - label_code, - criterion, - split, - split_selection_metrics, - ) - if cluster.master: - self._ensemble_prediction( - self.maps_manager, - self._config.maps_manager.data_group, - split, - self._config.validation.selection_metrics, - self._config.data.use_labels, - self._config.validation.skip_leak_check, - ) - - def _predict_single( - self, - group_parameters, - group_df, - transforms, - label_code, - criterion, - split, - split_selection_metrics, - network: Optional[int] = None, - ): - """_summary_""" - - assert isinstance(self._config, PredictConfig) - # assert self._config.data.label - - data_test = return_dataset( - group_parameters["caps_directory"], - group_df, - self.maps_manager.preprocessing_dict, - transforms_config=self._config.transforms, - multi_cohort=group_parameters["multi_cohort"], - label_presence=self._config.data.use_labels, - label=self._config.data.label, - label_code=( - self.maps_manager.label_code if label_code == "default" else label_code - ), - cnn_index=network, - ) - test_loader = DataLoader( - data_test, - batch_size=( - self._config.dataloader.batch_size - if self._config.dataloader.batch_size is not None - else self.maps_manager.batch_size - ), - shuffle=False, - sampler=DistributedSampler( - data_test, - num_replicas=cluster.world_size, - rank=cluster.rank, - shuffle=False, - ), - num_workers=self._config.dataloader.n_proc - if self._config.dataloader.n_proc is not None - else self.maps_manager.n_proc, - ) - self._test_loader( - maps_manager=self.maps_manager, - dataloader=test_loader, - criterion=criterion, - data_group=self._config.maps_manager.data_group, - split=split, - selection_metrics=split_selection_metrics, - use_labels=self._config.data.use_labels, - gpu=self._config.computational.gpu, - amp=self._config.computational.amp, - network=network, - ) - if self._config.maps_manager.save_tensor: - logger.debug("Saving tensors") - self._compute_output_tensors( - maps_manager=self.maps_manager, - dataset=data_test, - data_group=self._config.maps_manager.data_group, - split=split, - selection_metrics=self._config.validation.selection_metrics, - gpu=self._config.computational.gpu, - network=network, - ) - if self._config.maps_manager.save_nifti: - self._compute_output_nifti( - dataset=data_test, - split=split, - network=network, - ) - if self._config.maps_manager.save_latent_tensor: - self._compute_latent_tensors( - dataset=data_test, - split=split, - network=network, - ) - - def _compute_latent_tensors( - self, - dataset, - split: int, - nb_images: Optional[int] = None, - network: Optional[int] = None, - ): - """ - Compute the output tensors and saves them in the MAPS. - Parameters - ---------- - dataset : _type_ - wrapper of the data set. - data_group : _type_ - name of the data group used for the task. - split : _type_ - split number. - selection_metrics : _type_ - metrics used for model selection. - nb_images : _type_ (optional, default=None) - number of full images to write. Default computes the outputs of the whole data set. - gpu : _type_ (optional, default=None) - If given, a new value for the device of the model will be computed. - network : _type_ (optional, default=None) - Index of the network tested (only used in multi-network setting). - """ - for selection_metric in self._config.validation.selection_metrics: - # load the best trained model during the training - model, _ = self.maps_manager._init_model( - transfer_path=self.maps_manager.maps_path, - split=split, - transfer_selection=selection_metric, - gpu=self._config.computational.gpu, - network=network, - nb_unfrozen_layer=self.maps_manager.nb_unfrozen_layer, - ) - model = DDP( - model, - fsdp=self.maps_manager.fully_sharded_data_parallel, - amp=self.maps_manager.amp, - ) - model.eval() - tensor_path = ( - self.maps_manager.maps_path - / f"split-{split}" - / f"best-{selection_metric}" - / self._config.maps_manager.data_group - / "latent_tensors" - ) - if cluster.master: - tensor_path.mkdir(parents=True, exist_ok=True) - dist.barrier() - if nb_images is None: # Compute outputs for the whole data set - nb_modes = len(dataset) - else: - nb_modes = nb_images * dataset.elem_per_image - for i in [ - *range(cluster.rank, nb_modes, cluster.world_size), - *range(int(nb_modes % cluster.world_size <= cluster.rank)), - ]: - data = dataset[i] - image = data["image"] - logger.debug(f"Image for latent representation {image}") - with autocast("cuda", enabled=self.maps_manager.std_amp): - _, latent, _ = model.module._forward( - image.unsqueeze(0).to(model.device) - ) - latent = latent.squeeze(0).cpu().float() - participant_id = data["participant_id"] - session_id = data["session_id"] - mode_id = data[f"{self.maps_manager.mode}_id"] - output_filename = f"{participant_id}_{session_id}_{self.maps_manager.mode}-{mode_id}_latent.pt" - torch.save(latent, tensor_path / output_filename) - - @torch.no_grad() - def _compute_output_nifti( - self, - dataset, - split: int, - network: Optional[int] = None, - ): - """Computes the output nifti images and saves them in the MAPS. - Parameters - ---------- - dataset : _type_ - _description_ - data_group : str - name of the data group used for the task. - split : int - split number. - selection_metrics : list[str] - metrics used for model selection. - gpu : bool (optional, default=None) - If given, a new value for the device of the model will be computed. - network : int (optional, default=None) - Index of the network tested (only used in multi-network setting). - Raises - -------- - ClinicaDLException if not an image - """ - import nibabel as nib - from numpy import eye - - for selection_metric in self._config.validation.selection_metrics: - # load the best trained model during the training - model, _ = self.maps_manager._init_model( - transfer_path=self.maps_manager.maps_path, - split=split, - transfer_selection=selection_metric, - gpu=self._config.computational.gpu, - network=network, - nb_unfrozen_layer=self.maps_manager.nb_unfrozen_layer, - ) - model = DDP( - model, - fsdp=self.maps_manager.fully_sharded_data_parallel, - amp=self.maps_manager.amp, - ) - model.eval() - nifti_path = ( - self.maps_manager.maps_path - / f"split-{split}" - / f"best-{selection_metric}" - / self._config.maps_manager.data_group - / "nifti_images" - ) - if cluster.master: - nifti_path.mkdir(parents=True, exist_ok=True) - dist.barrier() - nb_imgs = len(dataset) - for i in [ - *range(cluster.rank, nb_imgs, cluster.world_size), - *range(int(nb_imgs % cluster.world_size <= cluster.rank)), - ]: - data = dataset[i] - image = data["image"] - x = image.unsqueeze(0).to(model.device) - with autocast("cuda", enabled=self.maps_manager.std_amp): - output = model(x) - output = output.squeeze(0).detach().cpu().float() - # Convert tensor to nifti image with appropriate affine - input_nii = nib.nifti1.Nifti1Image( - image[0].detach().cpu().numpy(), eye(4) - ) - output_nii = nib.nifti1.Nifti1Image(output[0].numpy(), eye(4)) - # Create file name according to participant and session id - participant_id = data["participant_id"] - session_id = data["session_id"] - input_filename = f"{participant_id}_{session_id}_image_input.nii.gz" - output_filename = f"{participant_id}_{session_id}_image_output.nii.gz" - nib.loadsave.save(input_nii, nifti_path / input_filename) - nib.loadsave.save(output_nii, nifti_path / output_filename) - - def interpret(self): - """Performs the interpretation task on a subset of caps_directory defined in a TSV file. - The mean interpretation is always saved, to save the individual interpretations set save_individual to True. - """ - assert isinstance(self._config, InterpretConfig) - - self._config.adapt_with_maps_manager_info(self.maps_manager) - - if self.maps_manager.multi_network: - raise NotImplementedError( - "The interpretation of multi-network framework is not implemented." - ) - transforms = TransformsConfig( - normalize=self.maps_manager.normalize, - data_augmentation=self.maps_manager.data_augmentation, - size_reduction=self.maps_manager.size_reduction, - size_reduction_factor=self.maps_manager.size_reduction_factor, - ) - group_df = self._config.data.create_groupe_df() - self._check_data_group(group_df) - - for split in self.splitter.split_iterator(): - logger.info(f"Interpretation of split {split}") - df_group, parameters_group = self.get_group_info( - self._config.maps_manager.data_group, split - ) - data_test = return_dataset( - parameters_group["caps_directory"], - df_group, - self.maps_manager.preprocessing_dict, - transforms_config=transforms, - multi_cohort=parameters_group["multi_cohort"], - label_presence=False, - label_code=self.maps_manager.label_code, - label=self.maps_manager.label, - ) - test_loader = DataLoader( - data_test, - batch_size=self._config.dataloader.batch_size, - shuffle=False, - num_workers=self._config.dataloader.n_proc, - ) - if not self._config.validation.selection_metrics: - self._config.validation.selection_metrics = find_selection_metrics( - self.maps_manager.maps_path, - split, - ) - for selection_metric in self._config.validation.selection_metrics: - logger.info(f"Interpretation of metric {selection_metric}") - results_path = ( - self.maps_manager.maps_path - / f"split-{split}" - / f"best-{selection_metric}" - / self._config.maps_manager.data_group - / f"interpret-{self._config.interpret.name}" - ) - if (results_path).is_dir(): - if self._config.interpret.overwrite_name: - shutil.rmtree(results_path) - else: - raise MAPSError( - f"Interpretation name {self._config.interpret.name} is already written. " - f"Please choose another name or set overwrite_name to True." - ) - results_path.mkdir(parents=True) - model, _ = self.maps_manager._init_model( - transfer_path=self.maps_manager.maps_path, - split=split, - transfer_selection=selection_metric, - gpu=self._config.computational.gpu, - ) - interpreter = self._config.interpret.get_method()(model) - cum_maps = [0] * data_test.elem_per_image - for data in test_loader: - images = data["image"].to(model.device) - map_pt = interpreter.generate_gradients( - images, - self._config.interpret.target_node, - level=self._config.interpret.level, - amp=self._config.computational.amp, - ) - for i in range(len(data["participant_id"])): - mode_id = data[f"{self.maps_manager.mode}_id"][i] - cum_maps[mode_id] += map_pt[i] - if self._config.interpret.save_individual: - single_path = ( - results_path - / f"{data['participant_id'][i]}_{data['session_id'][i]}_{self.maps_manager.mode}-{data[f'{self.maps_manager.mode}_id'][i]}_map.pt" - ) - torch.save(map_pt[i], single_path) - if self._config.maps_manager.save_nifti: - import nibabel as nib - from numpy import eye - - single_nifti_path = ( - results_path - / f"{data['participant_id'][i]}_{data['session_id'][i]}_{self.maps_manager.mode}-{data[f'{self.maps_manager.mode}_id'][i]}_map.nii.gz" - ) - output_nii = nib.nifti1.Nifti1Image( - map_pt[i].numpy(), eye(4) - ) - nib.loadsave.save(output_nii, single_nifti_path) - for i, mode_map in enumerate(cum_maps): - mode_map /= len(data_test) - torch.save( - mode_map, - results_path / f"mean_{self.maps_manager.mode}-{i}_map.pt", - ) - if self._config.maps_manager.save_nifti: - import nibabel as nib - from numpy import eye - - output_nii = nib.nifti1.Nifti1Image(mode_map.numpy(), eye(4)) - nib.loadsave.save( - output_nii, - results_path - / f"mean_{self.maps_manager.mode}-{i}_map.nii.gz", - ) - - def _check_data_group( - self, - df: Optional[pd.DataFrame] = None, - ): - """Check if a data group is already available if other arguments are None. - Else creates a new data_group. - - Parameters - ---------- - - Raises - ------ - MAPSError - when trying to overwrite train or validation data groups - ClinicaDLArgumentError - when caps_directory or df are given but data group already exists - ClinicaDLArgumentError - when caps_directory or df are not given and data group does not exist - - """ - group_dir = ( - self.maps_manager.maps_path - / "groups" - / self._config.maps_manager.data_group - ) - logger.debug(f"Group path {group_dir}") - if group_dir.is_dir(): # Data group already exists - if self._config.maps_manager.overwrite: - if self._config.maps_manager.data_group in ["train", "validation"]: - raise MAPSError("Cannot overwrite train or validation data group.") - else: - if not self._config.split.split: - self._config.split.split = self.maps_manager.find_splits() - assert self._config.split - for split in self._config.split.split: - selection_metrics = find_selection_metrics( - self.maps_manager.maps_path, - split, - ) - for selection in selection_metrics: - results_path = ( - self.maps_manager.maps_path - / f"split-{split}" - / f"best-{selection}" - / self._config.maps_manager.data_group - ) - if results_path.is_dir(): - shutil.rmtree(results_path) - elif df is not None or ( - self._config.data.caps_directory is not None - and self._config.data.caps_directory != Path("") - ): - raise ClinicaDLArgumentError( - f"Data group {self._config.maps_manager.data_group} is already defined. " - f"Please do not give any caps_directory, tsv_path or multi_cohort to use it. " - f"To erase {self._config.maps_manager.data_group} please set overwrite to True." - ) - - elif not group_dir.is_dir() and ( - self._config.data.caps_directory is None or df is None - ): # Data group does not exist yet / was overwritten + missing data - raise ClinicaDLArgumentError( - f"The data group {self._config.maps_manager.data_group} does not already exist. " - f"Please specify a caps_directory and a tsv_path to create this data group." - ) - elif ( - not group_dir.is_dir() - ): # Data group does not exist yet / was overwritten + all data is provided - if self._config.validation.skip_leak_check: - logger.info("Skipping data leakage check") - else: - self._check_leakage(self._config.maps_manager.data_group, df) - self._write_data_group( - self._config.maps_manager.data_group, - df, - self._config.data.caps_directory, - self._config.data.multi_cohort, - label=self._config.data.label, - ) - - def get_group_info( - self, data_group: str, split: int = None - ) -> Tuple[pd.DataFrame, Dict[str, Any]]: - """Gets information from corresponding data group - (list of participant_id / session_id + configuration parameters). - split is only needed if data_group is train or validation. - - Parameters - ---------- - data_group : str - _description_ - split : int (optional, default=None) - _description_ - - Returns - ------- - Tuple[pd.DataFrame, Dict[str, Any]] - _description_ - - Raises - ------ - MAPSError - _description_ - MAPSError - _description_ - MAPSError - _description_ - """ - group_path = self.maps_manager.maps_path / "groups" / data_group - if not group_path.is_dir(): - raise MAPSError( - f"Data group {data_group} is not defined. " - f"Please run a prediction to create this data group." - ) - if data_group in ["train", "validation"]: - if split is None: - raise MAPSError( - "Information on train or validation data can only be " - "loaded if a split number is given" - ) - elif not (group_path / f"split-{split}").is_dir(): - raise MAPSError( - f"Split {split} is not available for data group {data_group}." - ) - else: - group_path = group_path / f"split-{split}" - - df = pd.read_csv(group_path / "data.tsv", sep="\t") - json_path = group_path / "maps.json" - from clinicadl.utils.iotools.utils import path_decoder - - with json_path.open(mode="r") as f: - parameters = json.load(f, object_hook=path_decoder) - return df, parameters - - def _check_leakage(self, data_group: str, test_df: pd.DataFrame): - """Checks that no intersection exist between the participants used for training and those used for testing. - - Parameters - ---------- - data_group : str - name of the data group - test_df : pd.DataFrame - Table of participant_id / session_id of the data group - - Raises - ------ - ClinicaDLDataLeakageError - if data_group not in ["train", "validation"] and there is an intersection - between the participant IDs in test_df and the ones used for training. - """ - if data_group not in ["train", "validation"]: - train_path = self.maps_manager.maps_path / "groups" / "train+validation.tsv" - train_df = pd.read_csv(train_path, sep="\t") - participants_train = set(train_df.participant_id.values) - participants_test = set(test_df.participant_id.values) - intersection = participants_test & participants_train - - if len(intersection) > 0: - raise ClinicaDLDataLeakageError( - "Your evaluation set contains participants who were already seen during " - "the training step. The list of common participants is the following: " - f"{intersection}." - ) - - def _write_data_group( - self, - data_group, - df, - caps_directory: Path = None, - multi_cohort: bool = None, - label=None, - ): - """Check that a data_group is not already written and writes the characteristics of the data group - (TSV file with a list of participant / session + JSON file containing the CAPS and the preprocessing). - - Parameters - ---------- - data_group : _type_ - name whose presence is checked. - df : _type_ - DataFrame containing the participant_id and session_id (and label if use_labels is True) - caps_directory : Path (optional, default=None) - caps_directory if different from the training caps_directory, - multi_cohort : bool (optional, default=None) - multi_cohort used if different from the training multi_cohort. - label : _type_ (optional, default=None) - _description_ - """ - group_path = self.maps_manager.maps_path / "groups" / data_group - group_path.mkdir(parents=True) - - columns = ["participant_id", "session_id", "cohort"] - if self._config.data.label in df.columns.values: - columns += [self._config.data.label] - if label is not None and label in df.columns.values: - columns += [label] - - df.to_csv(group_path / "data.tsv", sep="\t", columns=columns, index=False) - self.maps_manager.write_parameters( - group_path, - { - "caps_directory": ( - caps_directory - if caps_directory is not None - else self._config.caps_directory - ), - "multi_cohort": ( - multi_cohort - if multi_cohort is not None - else self._config.multi_cohort - ), - }, - ) - - # this function is never used ??? - - def get_interpretation( - self, - data_group: str, - name: str, - split: int = 0, - selection_metric: Optional[str] = None, - verbose: bool = True, - participant_id: Optional[str] = None, - session_id: Optional[str] = None, - mode_id: int = 0, - ) -> torch.Tensor: - """ - Get the individual interpretation maps for one session if participant_id and session_id are filled. - Else load the mean interpretation map. - - Args: - data_group (str): Name of the data group used for the interpretation task. - name (str): name of the interpretation task. - split (int): Index of the split used for training. - selection_metric (str): Metric used for best weights selection. - verbose (bool): if True will print associated prediction.log. - participant_id (str): ID of the participant (if not given load mean map). - session_id (str): ID of the session (if not give load the mean map). - mode_id (int): Index of the mode used. - Returns: - (torch.Tensor): Tensor of the interpretability map. - """ - - selection_metric = check_selection_metric( - self.maps_manager.maps_path, - split, - selection_metric, - ) - if verbose: - self.maps_manager._print_description_log( - data_group, split, selection_metric - ) - map_dir = ( - self.maps_manager.maps_path - / f"split-{split}" - / f"best-{selection_metric}" - / data_group - / f"interpret-{name}" - ) - if not map_dir.is_dir(): - raise MAPSError( - f"No prediction corresponding to data group {data_group} and " - f"interpretation {name} was found." - ) - if participant_id is None and session_id is None: - map_pt = torch.load( - map_dir / f"mean_{self.maps_manager.mode}-{mode_id}_map.pt", - weights_only=True, - ) - elif participant_id is None or session_id is None: - raise ValueError( - "To load the mean interpretation map, " - "please do not give any participant_id or session_id.\n " - "Else specify both parameters" - ) - else: - map_pt = torch.load( - map_dir - / f"{participant_id}_{session_id}_{self.maps_manager.mode}-{mode_id}_map.pt", - weights_only=True, - ) - return map_pt - - def test( - self, - mode: str, - metrics_module: MetricModule, - n_classes: int, - network_task, - model: Network, - dataloader: DataLoader, - criterion: _Loss, - use_labels: bool = True, - amp: bool = False, - report_ci=False, - ) -> Tuple[pd.DataFrame, Dict[str, float]]: - """ - Computes the predictions and evaluation metrics. - - Parameters - ---------- - model: Network - The model trained. - dataloader: DataLoader - Wrapper of a CapsDataset. - criterion: _Loss - Function to calculate the loss. - use_labels: bool - If True the true_label will be written in output DataFrame - and metrics dict will be created. - amp: bool - If True, enables Pytorch's automatic mixed precision. - - Returns - ------- - the results and metrics on the image level. - """ - model.eval() - dataloader.dataset.eval() - - results_df = pd.DataFrame(columns=columns(network_task, mode, n_classes)) - total_loss = {} - with torch.no_grad(): - for i, data in enumerate(dataloader): - # initialize the loss list to save the loss components - with autocast("cuda", enabled=amp): - outputs, loss_dict = model(data, criterion, use_labels=use_labels) - - if i == 0: - for loss_component in loss_dict.keys(): - total_loss[loss_component] = 0 - for loss_component in total_loss.keys(): - total_loss[loss_component] += loss_dict[loss_component].float() - - # Generate detailed DataFrame - for idx in range(len(data["participant_id"])): - row = generate_test_row( - network_task, - mode, - metrics_module, - n_classes, - idx, - data, - outputs.float(), - ) - row_df = pd.DataFrame( - row, columns=columns(network_task, mode, n_classes) - ) - results_df = pd.concat([results_df, row_df]) - - del outputs, loss_dict - dataframes = [None] * dist.get_world_size() - dist.gather_object( - results_df, dataframes if dist.get_rank() == 0 else None, dst=0 - ) - if dist.get_rank() == 0: - results_df = pd.concat(dataframes) - del dataframes - results_df.reset_index(inplace=True, drop=True) - - if not use_labels: - metrics_dict = None - else: - metrics_dict = compute_metrics( - network_task, results_df, metrics_module, report_ci=report_ci - ) - for loss_component in total_loss.keys(): - dist.reduce(total_loss[loss_component], dst=0) - loss_value = total_loss[loss_component].item() / cluster.world_size - - if report_ci: - metrics_dict["Metric_names"].append(loss_component) - metrics_dict["Metric_values"].append(loss_value) - metrics_dict["Lower_CI"].append("N/A") - metrics_dict["Upper_CI"].append("N/A") - metrics_dict["SE"].append("N/A") - - else: - metrics_dict[loss_component] = loss_value - - torch.cuda.empty_cache() - - return results_df, metrics_dict - - def test_da( - self, - mode: str, - metrics_module: MetricModule, - n_classes: int, - network_task: Union[str, Task], - model: Network, - dataloader: DataLoader, - criterion: _Loss, - alpha: float = 0, - use_labels: bool = True, - target: bool = True, - report_ci=False, - ) -> Tuple[pd.DataFrame, Dict[str, float]]: - """ - Computes the predictions and evaluation metrics. - - Args: - model: the model trained. - dataloader: wrapper of a CapsDataset. - criterion: function to calculate the loss. - use_labels: If True the true_label will be written in output DataFrame - and metrics dict will be created. - Returns: - the results and metrics on the image level. - """ - model.eval() - dataloader.dataset.eval() - results_df = pd.DataFrame(columns=columns(network_task, mode, n_classes)) - total_loss = 0 - with torch.no_grad(): - for i, data in enumerate(dataloader): - outputs, loss_dict = model.compute_outputs_and_loss_test( - data, criterion, alpha, target - ) - total_loss += loss_dict["loss"].item() - - # Generate detailed DataFrame - for idx in range(len(data["participant_id"])): - row = generate_test_row( - network_task, - mode, - metrics_module, - n_classes, - idx, - data, - outputs, - ) - row_df = pd.DataFrame( - row, columns=columns(network_task, mode, n_classes) - ) - results_df = pd.concat([results_df, row_df]) - - del outputs, loss_dict - results_df.reset_index(inplace=True, drop=True) - - if not use_labels: - metrics_dict = None - else: - metrics_dict = compute_metrics( - network_task, results_df, metrics_module, report_ci=report_ci - ) - if report_ci: - metrics_dict["Metric_names"].append("loss") - metrics_dict["Metric_values"].append(total_loss) - metrics_dict["Lower_CI"].append("N/A") - metrics_dict["Upper_CI"].append("N/A") - metrics_dict["SE"].append("N/A") - - else: - metrics_dict["loss"] = total_loss - - torch.cuda.empty_cache() - - return results_df, metrics_dict - - def _test_loader( - self, - maps_manager: MapsManager, - dataloader, - criterion, - data_group: str, - split: int, - selection_metrics, - use_labels=True, - gpu=None, - amp=False, - network=None, - report_ci=True, - ): - """ - Launches the testing task on a dataset wrapped by a DataLoader and writes prediction TSV files. - - Args: - dataloader (torch.utils.data.DataLoader): DataLoader wrapping the test CapsDataset. - criterion (torch.nn.modules.loss._Loss): optimization criterion used during training. - data_group (str): name of the data group used for the testing task. - split (int): Index of the split used to train the model tested. - selection_metrics (list[str]): List of metrics used to select the best models which are tested. - use_labels (bool): If True, the labels must exist in test meta-data and metrics are computed. - gpu (bool): If given, a new value for the device of the model will be computed. - amp (bool): If enabled, uses Automatic Mixed Precision (requires GPU usage). - network (int): Index of the network tested (only used in multi-network setting). - """ - for selection_metric in selection_metrics: - if cluster.master: - log_dir = ( - maps_manager.maps_path - / f"split-{split}" - / f"best-{selection_metric}" - / data_group - ) - maps_manager.write_description_log( - log_dir, - data_group, - dataloader.dataset.config.data.caps_dict, - dataloader.dataset.config.data.data_df, - ) - - # load the best trained model during the training - model, _ = maps_manager._init_model( - transfer_path=maps_manager.maps_path, - split=split, - transfer_selection=selection_metric, - gpu=gpu, - network=network, - ) - model = DDP( - model, - fsdp=maps_manager.fully_sharded_data_parallel, - amp=maps_manager.amp, - ) - - prediction_df, metrics = self.test( - mode=maps_manager.mode, - metrics_module=maps_manager.metrics_module, - n_classes=maps_manager.n_classes, - network_task=maps_manager.network_task, - model=model, - dataloader=dataloader, - criterion=criterion, - use_labels=use_labels, - amp=amp, - report_ci=report_ci, - ) - if use_labels: - if network is not None: - metrics[f"{maps_manager.mode}_id"] = network - - loss_to_log = ( - metrics["Metric_values"][-1] if report_ci else metrics["loss"] - ) - - logger.info( - f"{maps_manager.mode} level {data_group} loss is {loss_to_log} for model selected on {selection_metric}" - ) - - if cluster.master: - # Replace here - maps_manager._mode_level_to_tsv( - prediction_df, - metrics, - split, - selection_metric, - data_group=data_group, - ) - - @torch.no_grad() - def _compute_output_tensors( - self, - maps_manager: MapsManager, - dataset, - data_group, - split, - selection_metrics, - nb_images=None, - gpu=None, - network=None, - ): - """ - Compute the output tensors and saves them in the MAPS. - - Args: -<<<<<<<< HEAD:clinicadl/predictor/old_predictor.py - dataset (clinicadl.dataset.caps_dataset.CapsDataset): wrapper of the data set. -======== - dataset (clinicadl.caps_dataset.data.CapsDataset): wrapper of the data set. ->>>>>>>> 1ae72275 (Cb extract validator (#666)):clinicadl/predictor/predictor.py - data_group (str): name of the data group used for the task. - split (int): split number. - selection_metrics (list[str]): metrics used for model selection. - nb_images (int): number of full images to write. Default computes the outputs of the whole data set. - gpu (bool): If given, a new value for the device of the model will be computed. - network (int): Index of the network tested (only used in multi-network setting). - """ - for selection_metric in selection_metrics: - # load the best trained model during the training - model, _ = maps_manager._init_model( - transfer_path=maps_manager.maps_path, - split=split, - transfer_selection=selection_metric, - gpu=gpu, - network=network, - nb_unfrozen_layer=maps_manager.nb_unfrozen_layer, - ) - model = DDP( - model, - fsdp=maps_manager.fully_sharded_data_parallel, - amp=maps_manager.amp, - ) - model.eval() - - tensor_path = ( - maps_manager.maps_path - / f"split-{split}" - / f"best-{selection_metric}" - / data_group - / "tensors" - ) - if cluster.master: - tensor_path.mkdir(parents=True, exist_ok=True) - dist.barrier() - - if nb_images is None: # Compute outputs for the whole data set - nb_modes = len(dataset) - else: - nb_modes = nb_images * dataset.elem_per_image - - for i in [ - *range(cluster.rank, nb_modes, cluster.world_size), - *range(int(nb_modes % cluster.world_size <= cluster.rank)), - ]: - data = dataset[i] - image = data["image"] - x = image.unsqueeze(0).to(model.device) - with autocast("cuda", enabled=maps_manager.std_amp): - output = model(x) - output = output.squeeze(0).cpu().float() - participant_id = data["participant_id"] - session_id = data["session_id"] - mode_id = data[f"{maps_manager.mode}_id"] - input_filename = f"{participant_id}_{session_id}_{maps_manager.mode}-{mode_id}_input.pt" - output_filename = f"{participant_id}_{session_id}_{maps_manager.mode}-{mode_id}_output.pt" - torch.save(image, tensor_path / input_filename) - torch.save(output, tensor_path / output_filename) - logger.debug(f"File saved at {[input_filename, output_filename]}") - - def _ensemble_prediction( - self, - maps_manager: MapsManager, - data_group, - split, - selection_metrics, - use_labels=True, - skip_leak_check=False, - ): - """Computes the results on the image-level.""" - - if not selection_metrics: - selection_metrics = find_selection_metrics(maps_manager.maps_path, split) - - for selection_metric in selection_metrics: - ##################### - # Soft voting - if maps_manager.num_networks > 1 and not skip_leak_check: - maps_manager._ensemble_to_tsv( - split, - selection=selection_metric, - data_group=data_group, - use_labels=use_labels, - ) - elif maps_manager.mode != "image" and not skip_leak_check: - maps_manager._mode_to_image_tsv( - split, - selection=selection_metric, - data_group=data_group, - use_labels=use_labels, - ) ->>>>>>> 1ae72275 (Cb extract validator (#666)) diff --git a/clinicadl/utils/iotools/train_utils.py b/clinicadl/utils/iotools/train_utils.py index 06423ca31..7989f7142 100644 --- a/clinicadl/utils/iotools/train_utils.py +++ b/clinicadl/utils/iotools/train_utils.py @@ -220,11 +220,9 @@ def merge_cli_and_maps_json_options(maps_json: Path, **kwargs) -> Dict[str, Any] Dict[str, Any] A dictionary with training options. """ -<<<<<<< HEAD + from clinicadl.dataset.caps_dataset_utils import read_json -======= - from clinicadl.caps_dataset.caps_dataset_utils import read_json ->>>>>>> 1ae72275 (Cb extract validator (#666)) + options = read_json(maps_json) for arg in kwargs: @@ -257,11 +255,9 @@ def merge_options_and_maps_json_options(maps_json: Path, **kwargs) -> Dict[str, Dict[str, Any] A dictionary with training options. """ -<<<<<<< HEAD + from clinicadl.dataset.caps_dataset_utils import read_json -======= - from clinicadl.caps_dataset.caps_dataset_utils import read_json ->>>>>>> 1ae72275 (Cb extract validator (#666)) + options = read_json(maps_json) for arg in kwargs: