From f473a73407b0df834a3583ab3d38db1f0ee3d2c6 Mon Sep 17 00:00:00 2001 From: InnopolisU Date: Thu, 31 Oct 2024 22:46:57 +0300 Subject: [PATCH] Fixed errors and added threshold configuration --- config/datasets/anomaly_detection_images.yaml | 2 +- ...0724_ba083ak_anomaly_detection_images.yaml | 8 +------ .../experiments/detection/KG_220524_coco.yaml | 2 +- .../example_image_classification_resnet.yaml | 1 - .../semantic-segmentation/THRESHOLD.yaml | 18 +++++++++++++++ config/models/detection/yolov5default.yaml | 3 +++ dui/pages/config_edition.py | 9 +++++--- infer.py | 2 +- .../anomaly_detection_images.py | 4 +++- .../lightning_datamodules/image_folder_dm.py | 15 ++++++------ .../anomaly_detection_images.py | 7 ++---- .../torch/lightning_modules/segmentation.py | 4 ++-- innofw/core/models/torch_adapter.py | 23 +++++++++++++------ innofw/pipeline.py | 1 + 14 files changed, 63 insertions(+), 36 deletions(-) create mode 100644 config/experiments/semantic-segmentation/THRESHOLD.yaml create mode 100644 config/models/detection/yolov5default.yaml diff --git a/config/datasets/anomaly_detection_images.yaml b/config/datasets/anomaly_detection_images.yaml index 8496966c..97acb7dc 100644 --- a/config/datasets/anomaly_detection_images.yaml +++ b/config/datasets/anomaly_detection_images.yaml @@ -22,4 +22,4 @@ test: infer: source: https://api.blackhole.ai.innopolis.university/public-datasets/anomaly_detection_mvtec/test.zip - target: ./data/MVTEC/test + target: ./data/MVTEC/infer diff --git a/config/experiments/anomaly-detection/KG_210724_ba083ak_anomaly_detection_images.yaml b/config/experiments/anomaly-detection/KG_210724_ba083ak_anomaly_detection_images.yaml index 9ec8aa85..dcaac11a 100644 --- a/config/experiments/anomaly-detection/KG_210724_ba083ak_anomaly_detection_images.yaml +++ b/config/experiments/anomaly-detection/KG_210724_ba083ak_anomaly_detection_images.yaml @@ -14,11 +14,5 @@ task: "anomaly-detection-images" random_seed: 0 epochs: 50 batch_size: 8 -accelerator: gpu +ckpt_path: https://api.blackhole.ai.innopolis.university/pretrained/anomaly_det/anomaly50.pt -wandb: - enable: True - project: anomaly_detect_mvtec - entity: "k-galliamov" - group: none - job_type: training diff --git a/config/experiments/detection/KG_220524_coco.yaml b/config/experiments/detection/KG_220524_coco.yaml index 16e24fb3..71a77e2d 100644 --- a/config/experiments/detection/KG_220524_coco.yaml +++ b/config/experiments/detection/KG_220524_coco.yaml @@ -11,4 +11,4 @@ project: coco_pose_recognition random_seed: 0 task: image-detection weights_freq: 1 -ckpt_path: https://api.blackhole.ai.innopolis.university/pretrained/pose_det/pose073.pt \ No newline at end of file +ckpt_path: https://api.blackhole.ai.innopolis.university/pretrained/pose_det/pose90.pt \ No newline at end of file diff --git a/config/experiments/example_image_classification_resnet.yaml b/config/experiments/example_image_classification_resnet.yaml index c0beebad..2f7c26c4 100644 --- a/config/experiments/example_image_classification_resnet.yaml +++ b/config/experiments/example_image_classification_resnet.yaml @@ -7,7 +7,6 @@ defaults: - override /augmentations_test: none - override /losses: log_loss.yaml - override /optimizers: adam - - override /schedulers: project: "mnist_classification" task: "image-classification" diff --git a/config/experiments/semantic-segmentation/THRESHOLD.yaml b/config/experiments/semantic-segmentation/THRESHOLD.yaml new file mode 100644 index 00000000..3e9444c2 --- /dev/null +++ b/config/experiments/semantic-segmentation/THRESHOLD.yaml @@ -0,0 +1,18 @@ +# @package _global_ +defaults: + - override /models: semantic-segmentation/unet + - override /datasets: semantic-segmentation/segmentation_brain.yaml + - override /augmentations_train: none #segmentation_stroke.yaml + - override /augmentations_val: none + - override /augmentations_test: none + - override /losses: segmentation_losses.yaml +models: + in_channels: 1 +project: "segmentation" +task: "image-segmentation" +random_seed: 42 +stop_param: 10 +epochs: 300 +weights_freq: 1 +batch_size: 10 +threshold: 0.2 \ No newline at end of file diff --git a/config/models/detection/yolov5default.yaml b/config/models/detection/yolov5default.yaml new file mode 100644 index 00000000..83c0d85a --- /dev/null +++ b/config/models/detection/yolov5default.yaml @@ -0,0 +1,3 @@ +_target_: ultralytics.YOLO +description: yolov5 by ultralytics +name: yolov5 diff --git a/dui/pages/config_edition.py b/dui/pages/config_edition.py index aab1da18..d1ede4f1 100644 --- a/dui/pages/config_edition.py +++ b/dui/pages/config_edition.py @@ -255,13 +255,16 @@ def on_strain_btn_button_click(set_progress, n, config_name): out_err = subprocess_errin.read() if out_err: - err_output.append(html.P(out_err)) + out_err = out_err.split("\n") + for e in out_err: + err_output.append(html.P(e)) if not out_text and process.poll() is None: time.sleep(0.5) continue - - text_output.append(html.P(out_text)) + out_text = out_text.split("\n") + for t in out_text: + text_output.append(html.P(t)) out = dbc.Row(id="process_output", children=[dbc.Col(text_output), dbc.Col(err_output)]) set_progress(out) diff --git a/infer.py b/infer.py index 4a7f22c7..e1bd6159 100644 --- a/infer.py +++ b/infer.py @@ -35,7 +35,7 @@ def main(config): ] config.experiment_name = experiment_name setup_clear_ml(config) - setup_wandb(config) + # setup_wandb(config) return run_pipeline(config, predict=True, test=False, train=False) diff --git a/innofw/core/datamodules/lightning_datamodules/anomaly_detection_images.py b/innofw/core/datamodules/lightning_datamodules/anomaly_detection_images.py index d95b1d28..7fdd81f2 100644 --- a/innofw/core/datamodules/lightning_datamodules/anomaly_detection_images.py +++ b/innofw/core/datamodules/lightning_datamodules/anomaly_detection_images.py @@ -1,7 +1,7 @@ import os import logging import pathlib - +from pathlib import Path import pandas as pd import torch import cv2 @@ -82,6 +82,8 @@ def predict_dataloader(self): return test_dataloader def setup_infer(self): + if str(self.predict_source).endswith("labels"): + self.predict_source = Path(str(self.predict_source)[:-6]+"images") self.predict_dataset = AnomaliesDataset(self.predict_source, self.get_aug(self.aug, 'test')) def save_preds(self, out_batches, stage: Stages, dst_path: pathlib.Path): diff --git a/innofw/core/datamodules/lightning_datamodules/image_folder_dm.py b/innofw/core/datamodules/lightning_datamodules/image_folder_dm.py index 20e72a2f..644b513f 100755 --- a/innofw/core/datamodules/lightning_datamodules/image_folder_dm.py +++ b/innofw/core/datamodules/lightning_datamodules/image_folder_dm.py @@ -84,7 +84,7 @@ def setup_train_test_val(self, **kwargs): # divide into train, val, test n = len(train_dataset) train_size = int(n * (1 - self.val_size)) - + random_train_dataset, random_val_dataset = random_split(train_dataset, [train_size, n - train_size]) # stratify if self.stratify: @@ -98,11 +98,12 @@ def setup_train_test_val(self, **kwargs): strat_val = Counter(second_set_labels).values() coefsstrat_val = [i/len(second_set_labels) for i in strat_val] - random_train = Counter([self.train_dataset.dataset.targets[i] for i in self.train_dataset.indices]).values() - coefsrandom_train = [i/len(self.train_dataset.indices) for i in random_train] - random_val = Counter([self.val_dataset.dataset.targets[i] for i in self.val_dataset.indices]).values() - coefsrandom_val = [i/len(self.val_dataset.indices) for i in random_val] + random_train = Counter([random_train_dataset.dataset.targets[i] for i in random_train_dataset.indices]).values() + coefsrandom_train = [i/len(random_train_dataset.indices) for i in random_train] + + random_val = Counter([random_val_dataset.dataset.targets[i] for i in random_val_dataset.indices]).values() + coefsrandom_val = [i/len(random_val_dataset.indices) for i in random_val] all_train = Counter(train_dataset.targets).values() coefsall_train = [i/len(train_dataset.targets) for i in all_train] @@ -117,9 +118,9 @@ def setup_train_test_val(self, **kwargs): random train split - {coefsrandom_train}\n random val split - {coefsrandom_val}\n ''') else: - self.train_dataset, self.val_dataset = random_split( - train_dataset, [train_size, n - train_size]) + self.train_dataset, self.val_dataset = random_train_dataset, random_val_dataset # Set validatoin augmentations for val + setattr(self.train_dataset.dataset, "transform", train_aug) setattr(self.val_dataset.dataset, "transform", val_aug) def save_preds(self, preds, stage: Stages, dst_path: pathlib.Path): diff --git a/innofw/core/models/torch/lightning_modules/anomaly_detection_images.py b/innofw/core/models/torch/lightning_modules/anomaly_detection_images.py index c2f902b4..f414a1a9 100644 --- a/innofw/core/models/torch/lightning_modules/anomaly_detection_images.py +++ b/innofw/core/models/torch/lightning_modules/anomaly_detection_images.py @@ -99,13 +99,10 @@ def test_step(self, batch, batch_idx): return {"loss": loss} def predict_step(self, x, batch_idx, **kwargs): - return (x, self.compute_anomaly_mask(x)) - - def compute_anomaly_mask(self, x): x_rec = self.forward(x) # (B, C, W, H) diff = ((x - x_rec) ** 2).sum(dim=1) # sum across channels - mask = diff >= self.model.anomaly_threshold - return mask + mask = (diff >= self.model.anomaly_threshold).to(torch.uint8) + return x, mask def log_metrics(self, stage, metrics_res, *args, **kwargs): for key, value in metrics_res.items(): diff --git a/innofw/core/models/torch/lightning_modules/segmentation.py b/innofw/core/models/torch/lightning_modules/segmentation.py index ac1581d9..1642cad3 100755 --- a/innofw/core/models/torch/lightning_modules/segmentation.py +++ b/innofw/core/models/torch/lightning_modules/segmentation.py @@ -1,4 +1,4 @@ -__all__ = ["SegmentationLM"] +__all__ = ["SemanticSegmentationLightningModule", "MulticlassSemanticSegmentationLightningModule"] # standard libraries from typing import Any, Optional @@ -177,7 +177,7 @@ def test_step(self, batch, *args, **kwargs) -> Optional[STEP_OUTPUT]: def model_load_checkpoint(self, path): self.model.load_state_dict(torch.load(path)["state_dict"]) - def predict_step(self, batch: Any, batch_indx: int) -> torch.Tensor: + def predict_step(self, batch: Any, batch_indx: int, **kwargs) -> torch.Tensor: """Predict and output binary predictions""" if isinstance(batch, dict): input_tensor = batch[SegDataKeys.image] diff --git a/innofw/core/models/torch_adapter.py b/innofw/core/models/torch_adapter.py index 2c389397..ccdbe26c 100755 --- a/innofw/core/models/torch_adapter.py +++ b/innofw/core/models/torch_adapter.py @@ -116,13 +116,22 @@ def __init__( if not value: objects[key] = get_default(key, framework, task) - - self.pl_module = objects["lightning_module"]( - model, - objects["losses"], - objects["optimizers_cfg"], - objects["schedulers_cfg"], - ) + if "threshold" in kwargs.keys() and kwargs["threshold"] is not None: + threshold = kwargs["threshold"] + self.pl_module = objects["lightning_module"]( + model, + objects["losses"], + objects["optimizers_cfg"], + objects["schedulers_cfg"], + threshold=threshold + ) + else: + self.pl_module = objects["lightning_module"]( + model, + objects["losses"], + objects["optimizers_cfg"], + objects["schedulers_cfg"], + ) try: self.pl_module.setup_metrics(self.metrics) except AttributeError: diff --git a/innofw/pipeline.py b/innofw/pipeline.py index 5fe4d058..940dbe35 100755 --- a/innofw/pipeline.py +++ b/innofw/pipeline.py @@ -154,6 +154,7 @@ def run_pipeline( "weights_freq": cfg.get("weights_freq"), "logger": logger, "random_state": cfg.get("random_seed"), + "threshold": cfg.get("threshold") } inno_model = InnoModel(**model_params) result = None