From 8898f0939bf4894eb9efa978a802f95e8ef63032 Mon Sep 17 00:00:00 2001 From: Camille Brianceau Date: Tue, 19 Jul 2022 13:41:57 +0200 Subject: [PATCH 1/5] Add new network (InceptionNet) --- clinicadl/utils/network/__init__.py | 2 +- clinicadl/utils/network/cnn/inception.py | 99 ++++++++++++++++++++++++ clinicadl/utils/network/cnn/models.py | 56 ++++++++++++++ 3 files changed, 156 insertions(+), 1 deletion(-) create mode 100644 clinicadl/utils/network/cnn/inception.py diff --git a/clinicadl/utils/network/__init__.py b/clinicadl/utils/network/__init__.py index bf155524d..cf211b336 100644 --- a/clinicadl/utils/network/__init__.py +++ b/clinicadl/utils/network/__init__.py @@ -1,5 +1,5 @@ from .autoencoder.models import AE_Conv4_FC3, AE_Conv5_FC3 -from .cnn.models import Conv4_FC3, Conv5_FC3, Stride_Conv5_FC3, resnet18 +from .cnn.models import Conv4_FC3, Conv5_FC3, Stride_Conv5_FC3, resnet18, Inception from .cnn.random import RandomArchitecture from .vae.vanilla_vae import ( Vanilla3DdenseVAE, diff --git a/clinicadl/utils/network/cnn/inception.py b/clinicadl/utils/network/cnn/inception.py new file mode 100644 index 000000000..13438bc03 --- /dev/null +++ b/clinicadl/utils/network/cnn/inception.py @@ -0,0 +1,99 @@ + +import math +from typing import Any, Callable, List, Optional, Tuple + +import torch +from torch import nn +from torchvision.models.inception import ( + BasicConv2d, + InceptionA, + InceptionAux, + InceptionB, + InceptionC, + InceptionD, + InceptionE, +) + +model_urls = {"Inception_v3":"https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth"} + + +class InceptionDesigner(nn.Module): + def __init__( + self, + input_size, + InceptionBlocks: Optional[List[Callable[..., nn.Module]]] = None, + num_classes=1000, + aux_logits: bool = True, + dropout: float = 0.5)-> None: + super(InceptionDesigner, self).__init__() + + if InceptionBlocks is None: + InceptionBlocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux] + + + self.aux_logits = aux_logits + self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2) + self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) + self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) + self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) + self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) + self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Mixed_5b = InceptionA(192, pool_features=32) + self.Mixed_5c = InceptionA(256, pool_features=64) + self.Mixed_5d = InceptionA(288, pool_features=64) + self.Mixed_6a = InceptionB(288) + self.Mixed_6b = InceptionC(768, channels_7x7=128) + self.Mixed_6c = InceptionC(768, channels_7x7=160) + self.Mixed_6d = InceptionC(768, channels_7x7=160) + self.Mixed_6e = InceptionC(768, channels_7x7=192) + self.AuxLogits: Optional[nn.Module] = None + if aux_logits: + self.AuxLogits = InceptionAux(768, num_classes) + self.Mixed_7a = InceptionD(768) + self.Mixed_7b = InceptionE(1280) + self.Mixed_7c = InceptionE(2048) + + + + + input_tensor = self._transform_input(input_size) + out = self.Conv2d_1a_3x3(input_tensor) + out = self.Conv2d_2a_3x3(out) + out = self.Conv2d_2b_3x3(out) + out = self.maxpool1(out) + out = self.Conv2d_3b_1x1(out) + out = self.Conv2d_4a_3x3(out) + out = self.maxpool2(out) + out = self.Mixed_5b(out) + out = self.Mixed_5c(out) + out = self.Mixed_6a(out) + out = self.Mixed_6b(out) + out = self.Mixed_6c(out) + out = self.Mixed_6d(out) + out = self.Mixed_6e(out) + if aux_logits: + out = self.AuxLogits + out = self.Mixed_7a(out) + out = self.Mixed_7b(out) + out = self.Mixed_7c(out) + + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(2048, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2.0 / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + + def _transform_input(self, x): + if self.transform_input: + x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 + x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5 + x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5 + x = torch.cat((x_ch0, x_ch1, x_ch2), 1) + return x diff --git a/clinicadl/utils/network/cnn/models.py b/clinicadl/utils/network/cnn/models.py index c706fd99a..3ccb8946f 100644 --- a/clinicadl/utils/network/cnn/models.py +++ b/clinicadl/utils/network/cnn/models.py @@ -170,7 +170,63 @@ def __init__(self, input_size, gpu=False, output_size=2, dropout=0.5): gpu=gpu, ) +class Inception(CNN): + """ + Deep 2D convolutional neural network architecture codenamed Inception, + which was responsible for setting the new state of the art for classification + and detection in the ImageNet Large-Scale Visual Recognition Challenge 2014 (ILSVRC14). + Improved utilization of the computing resources inside the network. + Increasing the depth and width of the network while keeping the computational budget constant. + To optimize quality, the architectural decisions were based on the Hebbian principle and the intuition of multi-scale processing. + + https://arxiv.org/pdf/1512.00567v3.pdf + + """ + def __init___(self, + input_size = (299,299,3), + gpu : bool= False): + + model = InceptionDesigner(self, input_size,num_classes =1000, aux_logits = True, dropout =0.5) + model.load_state_dict(model_zoo.load_url("https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth")) + + convolutions = nn.Sequential( + model.Conv2d_1a_3x3, + model.Conv2d_2a_3x3, + model.Conv2d_2b_3x3, + model.maxpool1, + model.Conv2d_3b_1x1, + model.Conv2d_4a_3x3, + model.maxpool2, + model.Mixed_5b, + model.Mixed_5c, + model.Mixed_5d, + model.Mixed_6a, + model.Mixed_6b, + model.Mixed_6c, + model.Mixed_6d, + model.Mixed_6e, + model.AuxLogits, + model.Mixed_7a, + model.Mixed_7b, + model.Mixed_7c, + model.avgpool + ) + + fc = nn.Sequential( + model.dropout, + model.fc + ) + + super().__init__( + convolutions = convolutions, + fc = fc, + gpu = gpu + + ) + + + class Stride_Conv5_FC3(CNN): """ Reduce the 2D or 3D input image to an array of size output_size. From 41ec4c6ddce5b6c3b7a899f5d35e9d623f0344c2 Mon Sep 17 00:00:00 2001 From: Camille Brianceau Date: Tue, 19 Jul 2022 13:49:43 +0200 Subject: [PATCH 2/5] Run pre-commit (black) --- clinicadl/utils/network/cnn/inception.py | 33 ++++++++++++++---------- clinicadl/utils/network/cnn/models.py | 31 ++++++++++------------ 2 files changed, 33 insertions(+), 31 deletions(-) diff --git a/clinicadl/utils/network/cnn/inception.py b/clinicadl/utils/network/cnn/inception.py index 13438bc03..29849ac52 100644 --- a/clinicadl/utils/network/cnn/inception.py +++ b/clinicadl/utils/network/cnn/inception.py @@ -1,4 +1,3 @@ - import math from typing import Any, Callable, List, Optional, Tuple @@ -14,22 +13,32 @@ InceptionE, ) -model_urls = {"Inception_v3":"https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth"} +model_urls = { + "Inception_v3": "https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth" +} class InceptionDesigner(nn.Module): def __init__( - self, - input_size, - InceptionBlocks: Optional[List[Callable[..., nn.Module]]] = None, - num_classes=1000, - aux_logits: bool = True, - dropout: float = 0.5)-> None: + self, + input_size, + InceptionBlocks: Optional[List[Callable[..., nn.Module]]] = None, + num_classes=1000, + aux_logits: bool = True, + dropout: float = 0.5, + ) -> None: super(InceptionDesigner, self).__init__() if InceptionBlocks is None: - InceptionBlocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux] - + InceptionBlocks = [ + BasicConv2d, + InceptionA, + InceptionB, + InceptionC, + InceptionD, + InceptionE, + InceptionAux, + ] self.aux_logits = aux_logits self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2) @@ -54,9 +63,6 @@ def __init__( self.Mixed_7b = InceptionE(1280) self.Mixed_7c = InceptionE(2048) - - - input_tensor = self._transform_input(input_size) out = self.Conv2d_1a_3x3(input_tensor) out = self.Conv2d_2a_3x3(out) @@ -89,7 +95,6 @@ def __init__( m.weight.data.fill_(1) m.bias.data.zero_() - def _transform_input(self, x): if self.transform_input: x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 diff --git a/clinicadl/utils/network/cnn/models.py b/clinicadl/utils/network/cnn/models.py index 3ccb8946f..f6b151da9 100644 --- a/clinicadl/utils/network/cnn/models.py +++ b/clinicadl/utils/network/cnn/models.py @@ -170,6 +170,7 @@ def __init__(self, input_size, gpu=False, output_size=2, dropout=0.5): gpu=gpu, ) + class Inception(CNN): """ @@ -183,12 +184,17 @@ class Inception(CNN): https://arxiv.org/pdf/1512.00567v3.pdf """ - def __init___(self, - input_size = (299,299,3), - gpu : bool= False): - model = InceptionDesigner(self, input_size,num_classes =1000, aux_logits = True, dropout =0.5) - model.load_state_dict(model_zoo.load_url("https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth")) + def __init___(self, input_size=(299, 299, 3), gpu: bool = False): + + model = InceptionDesigner( + self, input_size, num_classes=1000, aux_logits=True, dropout=0.5 + ) + model.load_state_dict( + model_zoo.load_url( + "https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth" + ) + ) convolutions = nn.Sequential( model.Conv2d_1a_3x3, @@ -210,23 +216,14 @@ def __init___(self, model.Mixed_7a, model.Mixed_7b, model.Mixed_7c, - model.avgpool - ) - - fc = nn.Sequential( - model.dropout, - model.fc + model.avgpool, ) - super().__init__( - convolutions = convolutions, - fc = fc, - gpu = gpu + fc = nn.Sequential(model.dropout, model.fc) - ) + super().__init__(convolutions=convolutions, fc=fc, gpu=gpu) - class Stride_Conv5_FC3(CNN): """ Reduce the 2D or 3D input image to an array of size output_size. From 8db81e3ba2c95bfac61e1eb74263da8b7c876069 Mon Sep 17 00:00:00 2001 From: Camille Brianceau Date: Tue, 19 Jul 2022 14:42:07 +0200 Subject: [PATCH 3/5] Run pre-commit (black) --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4f40635fe..8cf8f8bf6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,10 +1,10 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v2.3.0 + rev: v4.3.0 hooks: - id: check-yaml - repo: https://github.com/psf/black - rev: 22.1.0 + rev: 22.6.0 hooks: - id: black - repo: https://github.com/pycqa/isort From 55f2beeac9bbb99019bcf5ab2ce4eed8cc535a25 Mon Sep 17 00:00:00 2001 From: Camille Brianceau Date: Tue, 19 Jul 2022 14:50:52 +0200 Subject: [PATCH 4/5] Run pre-commit (black) --- clinicadl/utils/network/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clinicadl/utils/network/__init__.py b/clinicadl/utils/network/__init__.py index cf211b336..e08fb8185 100644 --- a/clinicadl/utils/network/__init__.py +++ b/clinicadl/utils/network/__init__.py @@ -1,5 +1,5 @@ from .autoencoder.models import AE_Conv4_FC3, AE_Conv5_FC3 -from .cnn.models import Conv4_FC3, Conv5_FC3, Stride_Conv5_FC3, resnet18, Inception +from .cnn.models import Conv4_FC3, Conv5_FC3, Inception, Stride_Conv5_FC3, resnet18 from .cnn.random import RandomArchitecture from .vae.vanilla_vae import ( Vanilla3DdenseVAE, From a3e891d1b10ea29417bc1c784094965400a668f1 Mon Sep 17 00:00:00 2001 From: Camille Brianceau Date: Thu, 21 Jul 2022 10:14:15 +0200 Subject: [PATCH 5/5] Fix syntax problems --- clinicadl/utils/network/cnn/inception.py | 2 +- clinicadl/utils/network/cnn/models.py | 7 ++----- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/clinicadl/utils/network/cnn/inception.py b/clinicadl/utils/network/cnn/inception.py index 29849ac52..7808196b9 100644 --- a/clinicadl/utils/network/cnn/inception.py +++ b/clinicadl/utils/network/cnn/inception.py @@ -13,7 +13,7 @@ InceptionE, ) -model_urls = { +inception_urls = { "Inception_v3": "https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth" } diff --git a/clinicadl/utils/network/cnn/models.py b/clinicadl/utils/network/cnn/models.py index f6b151da9..55e519315 100644 --- a/clinicadl/utils/network/cnn/models.py +++ b/clinicadl/utils/network/cnn/models.py @@ -4,6 +4,7 @@ from torch import nn from torchvision.models.resnet import BasicBlock +from clinicadl.utils.network.cnn.inception import InceptionDesigner, inception_urls from clinicadl.utils.network.cnn.resnet import ResNetDesigner, model_urls from clinicadl.utils.network.network_utils import PadMaxPool2d, PadMaxPool3d from clinicadl.utils.network.sub_network import CNN @@ -190,11 +191,7 @@ def __init___(self, input_size=(299, 299, 3), gpu: bool = False): model = InceptionDesigner( self, input_size, num_classes=1000, aux_logits=True, dropout=0.5 ) - model.load_state_dict( - model_zoo.load_url( - "https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth" - ) - ) + model.load_state_dict(model_zoo.load_url(inception_urls["Inception_v3"])) convolutions = nn.Sequential( model.Conv2d_1a_3x3,