Skip to content

Commit

Permalink
test started failing
Browse files Browse the repository at this point in the history
  • Loading branch information
Sarvesh-Kesharwani authored Sep 12, 2023
1 parent dacca59 commit 3da8c14
Show file tree
Hide file tree
Showing 4 changed files with 21 additions and 26 deletions.
11 changes: 6 additions & 5 deletions ivy_models/regnet/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ def __init__(
kernel_size,
stride,
padding,
groups,
norm_layer,
activation_layer,
dilations,
Expand Down Expand Up @@ -183,9 +184,9 @@ def __init__(
scale_activation: Callable[..., ivy.Module] = ivy.Sigmoid,
) -> None:
super().__init__()
self.avgpool = ivy.AdaptiveAvgPool2D(1)
self.fc1 = ivy.Conv2D(input_channels, squeeze_channels, [1, 1])
self.fc2 = ivy.Conv2D(squeeze_channels, input_channels, [1, 1])
self.avgpool = ivy.AdaptiveAvgPool2d(1)
self.fc1 = ivy.Conv2D(input_channels, squeeze_channels, [1, 1], 1, 0)
self.fc2 = ivy.Conv2D(squeeze_channels, input_channels, [1, 1], 1, 0)
self.activation = activation()
self.scale_activation = scale_activation()

Expand Down Expand Up @@ -296,7 +297,7 @@ def __init__(
bottleneck_multiplier,
se_ratio,
)
self.activation = activation_layer(inplace=True)
self.activation = activation_layer() # inplace=True

def _forward(self, x: ivy.Array) -> ivy.Array:
if self.proj is not None:
Expand Down Expand Up @@ -336,7 +337,7 @@ def __init__(
bottleneck_multiplier,
se_ratio,
)
self._submodules = list(block)
self._submodules = block


def _make_divisible(v: float, divisor: int, min_value: Optional[int] = None) -> int:
Expand Down
20 changes: 2 additions & 18 deletions ivy_models/regnet/regnet.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
# global
import math
from collections import OrderedDict
from typing import Callable, Optional, Type
import builtins
Expand Down Expand Up @@ -138,23 +137,8 @@ def _build(self, *args, **kwargs):

self.trunk_output = ivy.Sequential(OrderedDict(blocks))

self.avgpool = ivy.AdaptiveAvgPool2D((1, 1))
self.fc = ivy.Linear(
in_featuReg=current_width, out_featuReg=self.spec.num_classes
)

# Performs RegNet-style weight initialization
for m in self.modules():
if isinstance(m, ivy.Conv2D):
# Note that there is no bias due to BN
fan_out = m._filter_shape[0] * m._filter_shape[1] * m._output_channels
ivy.init.normal_(m.weight, mean=0.0, std=math.sqrt(2.0 / fan_out))
elif isinstance(m, ivy.BatchNorm2D):
ivy.init.ones_(m.weight)
ivy.init.zeros_(m.bias)
elif isinstance(m, ivy.Linear):
ivy.init.normal_(m.weight, mean=0.0, std=0.01)
ivy.init.zeros_(m.bias)
self.avgpool = ivy.AdaptiveAvgPool2d((1, 1))
self.fc = ivy.Linear(current_width, self.spec.num_classes)

def _forward(self, x):
x = self.stem(x)
Expand Down
5 changes: 2 additions & 3 deletions ivy_models/regnet/regnet_variant_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
torch_img = torch.unsqueeze(torch_img, 0)


from torchvision.models import *
from torchvision.models import RegNet_X_16GF_Weights, RegNet_X_1_6GF_Weights, RegNet_X_32GF_Weights, RegNet_X_3_2GF_Weights, RegNet_X_400MF_Weights, RegNet_X_800MF_Weights, RegNet_X_8GF_Weights, RegNet_Y_16GF_Weights, RegNet_Y_1_6GF_Weights, RegNet_Y_32GF_Weights, RegNet_Y_3_2GF_Weights, RegNet_Y_400MF_Weights, RegNet_Y_800MF_Weights, RegNet_Y_8GF_Weights, regnet_x_16gf, regnet_x_1_6gf, regnet_x_32gf, regnet_x_3_2gf, regnet_x_400mf, regnet_x_800mf, regnet_x_8gf, regnet_y_16gf, regnet_y_1_6gf, regnet_y_32gf, regnet_y_3_2gf, regnet_y_400mf, regnet_y_800mf, regnet_y_8gf

VARIANTS = {
RegNet_Y_400MF_Weights: regnet_y_400mf,
Expand All @@ -43,7 +43,6 @@
RegNet_X_32GF_Weights: regnet_x_32gf,
}

import numpy as np

variant_code_list = [
"y_400mf",
Expand Down Expand Up @@ -74,7 +73,7 @@
torch_logits = torch.take(torch_output[0], torch_classes)

with open("output_4.txt", "a") as file:
file.write(f" {variant_code_list[variant_count]} : np.array({torch_logits}\n")
file.write(f" {variant_code_list[variant_count]} : np.array({torch_classes}\n")
variant_count += 1

# print("Indices of the top 3 classes are:", torch_classes)
Expand Down
11 changes: 11 additions & 0 deletions output_4.txt
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,14 @@ x_3_2gf400 : np.array([0.8212, 0.1721, 0.0055])
x_8gf : np.array([0.8271, 0.1659, 0.0063])
x_16gf : np.array([0.7613, 0.2340, 0.0043])
x_32gf : np.array([0.7828, 0.2146, 0.0022])
y_400mf : np.array(tensor([282, 281, 285], device='cuda:0')
y_800mf : np.array(tensor([282, 281, 285], device='cuda:0')
y_1_6gf : np.array(tensor([282, 281, 285], device='cuda:0')
y_3_2gf : np.array(tensor([282, 281, 285], device='cuda:0')
y_8gf : np.array(tensor([282, 281, 285], device='cuda:0')
y_16gf : np.array(tensor([282, 281, 285], device='cuda:0')
y_32gf : np.array(tensor([282, 281, 285], device='cuda:0')
x_400mf : np.array(tensor([282, 281, 285], device='cuda:0')
x_800mf : np.array(tensor([282, 281, 285], device='cuda:0')
x_1_6gf : np.array(tensor([282, 281, 285], device='cuda:0')
x_3_2gf400 : np.array(tensor([282, 281, 285], device='cuda:0')

0 comments on commit 3da8c14

Please sign in to comment.