Skip to content

Commit

Permalink
Some cleaning (#629)
Browse files Browse the repository at this point in the history
* remove check_gpu from cmdline utils and adapt qc to config
  • Loading branch information
camillebrianceau authored Jun 24, 2024
1 parent 2e5bfa0 commit 7e35b44
Show file tree
Hide file tree
Showing 10 changed files with 51 additions and 63 deletions.
2 changes: 1 addition & 1 deletion clinicadl/cmdline.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@
from clinicadl.commandline.pipelines.prepare_data.prepare_data_from_bids_cli import (
cli as prepare_data_from_bids_cli,
)
from clinicadl.commandline.pipelines.quality_check.cli import cli as qc_cli
from clinicadl.commandline.pipelines.train.cli import cli as train_cli
from clinicadl.hugging_face.hugging_face_cli import cli as hf_cli
from clinicadl.quality_check.qc_cli import cli as qc_cli
from clinicadl.random_search.random_search_cli import cli as random_search_cli
from clinicadl.tsvtools.cli import cli as tsvtools_cli
from clinicadl.utils.logger import setup_logging
Expand Down
4 changes: 2 additions & 2 deletions clinicadl/commandline/modules_options/computational.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@
"this flag is already set to FSDP to that the zero flag is never actually removed.",
)
gpu = click.option(
"--gpu/--no-gpu",
default=get_default("gpu", ComputationalConfig),
"--no-gpu",
is_flag=True,
help="Use GPU by default. Please specify `--no-gpu` to force using CPU.",
show_default=True,
)
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,9 @@ def cli(
"""
from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig

from .quality_check import quality_check as pet_linear_qc
from .....quality_check.pet_linear.quality_check import (
quality_check as pet_linear_qc,
)

config = CapsDatasetConfig.from_preprocessing_and_extraction_method(
caps_directory=caps_directory,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,11 @@

import click

from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig
from clinicadl.commandline import arguments
from clinicadl.commandline.modules_options import computational, data, dataloader
from clinicadl.config.config.computational import ComputationalConfig
from clinicadl.utils.enum import ExtractionMethod, Preprocessing


@click.command(name="t1-linear", no_args_is_help=True)
Expand Down Expand Up @@ -31,7 +34,6 @@
@click.option(
"--use_tensor",
type=bool,
default=False,
is_flag=True,
help="Flag allowing the pipeline to run on the extracted tensors and not on the nifti images",
)
Expand All @@ -42,7 +44,7 @@ def cli(
threshold,
batch_size,
n_proc,
gpu,
no_gpu,
amp,
network,
use_tensor,
Expand All @@ -54,23 +56,28 @@ def cli(
OUTPUT_TSV is the path to the tsv file where results will be saved.
"""
from clinicadl.utils.cmdline_utils import check_gpu

if gpu:
check_gpu()
from clinicadl.quality_check.t1_linear.quality_check import (
quality_check as linear_qc,
)

from .quality_check import quality_check as linear_qc
computational_config = ComputationalConfig(amp=amp, gpu=not no_gpu)
config = CapsDatasetConfig.from_preprocessing_and_extraction_method(
caps_directory=caps_directory,
extraction=ExtractionMethod.IMAGE,
preprocessing_type=Preprocessing.T1_LINEAR,
preprocessing=Preprocessing.T1_LINEAR,
use_uncropped_image=use_uncropped_image,
data_tsv=participants_tsv,
n_proc=n_proc,
batch_size=batch_size,
use_tensor=use_tensor,
)

linear_qc(
caps_directory,
output_path=results_tsv,
tsv_path=participants_tsv,
threshold=threshold,
batch_size=batch_size,
n_proc=n_proc,
gpu=gpu,
amp=amp,
network=network,
use_tensor=use_tensor,
use_uncropped_image=use_uncropped_image,
config=config,
computational_config=computational_config,
)
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def cli(
GROUP_LABEL is the group associated to the gray matter DARTEL template in CAPS_DIRECTORY.
"""
from .quality_check import quality_check as volume_qc
from .....quality_check.t1_volume.quality_check import quality_check as volume_qc

volume_qc(
caps_directory,
Expand Down
10 changes: 7 additions & 3 deletions clinicadl/config/config/computational.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
from pydantic import BaseModel, ConfigDict, model_validator
from typing_extensions import Self

from clinicadl.utils.cmdline_utils import check_gpu
from clinicadl.utils.exceptions import ClinicaDLArgumentError

logger = getLogger("clinicadl.computational_config")
Expand All @@ -19,9 +18,14 @@ class ComputationalConfig(BaseModel):
model_config = ConfigDict(validate_assignment=True)

@model_validator(mode="after")
def validator_gpu(self) -> Self:
def check_gpu(self) -> Self:
if self.gpu:
check_gpu()
import torch

if not torch.cuda.is_available():
raise ClinicaDLArgumentError(
"No GPU is available. To run on CPU, please set gpu to false or add the --no-gpu flag if you use the commandline."
)
elif self.amp:
raise ClinicaDLArgumentError(
"AMP is designed to work with modern GPUs. Please add the --gpu flag."
Expand Down
40 changes: 16 additions & 24 deletions clinicadl/quality_check/t1_linear/quality_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,17 @@

from logging import getLogger
from pathlib import Path
from typing import Optional

import pandas as pd
import torch
from torch.cuda.amp import autocast
from torch.utils.data import DataLoader

from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig
from clinicadl.config.config.computational import ComputationalConfig
from clinicadl.generate.generate_utils import load_and_check_tsv
from clinicadl.utils.clinica_utils import RemoteFileStructure, fetch_file
from clinicadl.utils.enum import ExtractionMethod, Preprocessing
from clinicadl.utils.exceptions import ClinicaDLArgumentError

from .models import resnet_darq_qc_18 as darq_r18
Expand All @@ -25,17 +26,12 @@


def quality_check(
caps_dir: Path,
config: CapsDatasetConfig,
output_path: Path,
tsv_path: Path = None,
threshold: float = 0.5,
batch_size: int = 1,
n_proc: int = 0,
gpu: bool = True,
amp: bool = False,
network: str = "darq",
use_tensor: bool = False,
use_uncropped_image: bool = True,
use_tensor: bool = True,
computational_config: Optional[ComputationalConfig] = None,
):
"""
Performs t1-linear quality-check
Expand Down Expand Up @@ -64,16 +60,9 @@ def quality_check(
To use uncropped images instead of the cropped ones.
"""

if computational_config is None:
computational_config = ComputationalConfig()
logger = getLogger("clinicadl.quality_check")
config = CapsDatasetConfig.from_preprocessing_and_extraction_method(
caps_directory=caps_dir,
extraction=ExtractionMethod.IMAGE,
preprocessing_type=Preprocessing.T1_LINEAR,
preprocessing=Preprocessing.T1_LINEAR,
use_uncropped_image=use_uncropped_image,
data_tsv=tsv_path,
)

if output_path.suffix != ".tsv":
raise ValueError("please enter a tsv path")
Expand Down Expand Up @@ -124,10 +113,10 @@ def quality_check(
logger.debug("Loading quality check model.")
model.load_state_dict(torch.load(model_file))
model.eval()
if gpu:
if computational_config.gpu:
logger.debug("Working on GPU.")
model = model.cuda()
elif amp:
elif computational_config.amp:
raise ClinicaDLArgumentError(
"AMP is designed to work with modern GPUs. Please add the --gpu flag."
)
Expand All @@ -139,12 +128,15 @@ def quality_check(
# Load DataFrame
logger.debug("Loading data to check.")
config.data.data_df = load_and_check_tsv(
tsv_path, caps_dict, output_path.resolve().parent
config.data.data_tsv, caps_dict, output_path.resolve().parent
)

dataset = QCDataset(config, use_extracted_tensors=use_tensor)
dataloader = DataLoader(
dataset, num_workers=n_proc, batch_size=batch_size, pin_memory=True
dataset,
num_workers=config.dataloader.n_proc,
batch_size=config.dataloader.batch_size,
pin_memory=True,
)

columns = ["participant_id", "session_id", "pass_probability", "pass"]
Expand All @@ -159,9 +151,9 @@ def quality_check(
for data in dataloader:
logger.debug(f"Processing subject {data['participant_id']}.")
inputs = data["image"]
if gpu:
if computational_config.gpu:
inputs = inputs.cuda()
with autocast(enabled=amp):
with autocast(enabled=computational_config.amp):
outputs = softmax(model(inputs))
# We cast back to 32bits. It should be a no-op as softmax is not eligible
# to fp16 and autocast is forbidden on CPU (output would be bf16 otherwise).
Expand Down
10 changes: 0 additions & 10 deletions clinicadl/utils/cmdline_utils.py

This file was deleted.

7 changes: 0 additions & 7 deletions clinicadl/utils/maps_manager/maps_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
)
from clinicadl.caps_dataset.extraction.utils import path_encoder
from clinicadl.transforms.config import TransformsConfig
from clinicadl.utils.cmdline_utils import check_gpu
from clinicadl.utils.exceptions import (
ClinicaDLArgumentError,
ClinicaDLConfigurationError,
Expand Down Expand Up @@ -423,12 +422,6 @@ def _check_args(self, parameters):
f"No value was given for {arg}."
)
self.parameters = add_default_values(parameters)
if self.parameters["gpu"]:
check_gpu()
elif self.parameters["amp"]:
raise ClinicaDLArgumentError(
"AMP is designed to work with modern GPUs. Please add the --gpu flag."
)

transfo_config = TransformsConfig(
normalize=self.normalize,
Expand Down

0 comments on commit 7e35b44

Please sign in to comment.