Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Lazy Config #82

Open
wants to merge 45 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 33 commits
Commits
Show all changes
45 commits
Select commit Hold shift + click to select a range
d4f742a
intial commit
abhi-glitchhg Mar 16, 2022
0edb1fa
LazyCall implementation
abhi-glitchhg Mar 31, 2022
422e68e
test added
abhi-glitchhg Mar 31, 2022
bd63e31
minor change
abhi-glitchhg Mar 31, 2022
3eb6c78
pre-commit
abhi-glitchhg Mar 31, 2022
e288838
Delete vanilla.yaml
abhi-glitchhg Mar 31, 2022
370e6f5
pre-commit
abhi-glitchhg Mar 31, 2022
0f1942b
hopefully last
abhi-glitchhg Mar 31, 2022
b1907cb
pre-commit
abhi-glitchhg Mar 31, 2022
9a0f6b5
pre-commit
abhi-glitchhg Mar 31, 2022
fd9f3df
pre-commit
abhi-glitchhg Mar 31, 2022
b15323a
changing click version from 8.0.1 to 8.0.4
abhi-glitchhg Mar 31, 2022
84987e5
this commit contains only minor changes due to diffetent black hook; …
abhi-glitchhg Mar 31, 2022
137ec6c
Update lazy.py
abhi-glitchhg Mar 31, 2022
368662d
Merge branch 'main' into config
abhi-glitchhg Mar 31, 2022
aa6aac6
fixes black hook issue
abhi-glitchhg Mar 31, 2022
9a19923
Merge branch 'main' into config
abhi-glitchhg Apr 2, 2022
8688a51
lazyconfig
abhi-glitchhg Apr 8, 2022
220d1d8
lazyconfig added
abhi-glitchhg Apr 11, 2022
bb1c126
lil bit of formatting
abhi-glitchhg Apr 11, 2022
4fa6098
added relative loading method in test
abhi-glitchhg Apr 11, 2022
bf155b0
added packages to requirements.txt
abhi-glitchhg Apr 11, 2022
18e9ed9
WIP
abhi-glitchhg Apr 12, 2022
1bfa3ff
WIP
abhi-glitchhg Apr 13, 2022
1bed82f
[WIP]
abhi-glitchhg Apr 13, 2022
f1ed4dc
trying to increase coverge
abhi-glitchhg Apr 13, 2022
d7fdf8a
Merge branch 'main' into config
abhi-glitchhg Apr 15, 2022
473de0d
Update requirements.txt
abhi-glitchhg Apr 15, 2022
1ffab02
adding hydra.utils._locate for dynamically locating objects.
abhi-glitchhg Apr 17, 2022
1f3a8cd
Merge branch 'config' of https://github.com/abhi-glitchhg/vformer int…
abhi-glitchhg Apr 17, 2022
1c5f7be
test coverage
abhi-glitchhg Apr 17, 2022
e30502a
minor nits
abhi-glitchhg Apr 19, 2022
21581e6
added config file for swin transformer.
abhi-glitchhg Apr 19, 2022
bcb3a29
change variable name in config file
abhi-glitchhg Apr 19, 2022
befa87e
Merge branch 'SforAiDl:main' into config
abhi-glitchhg Apr 25, 2022
a1ade82
rename the config file
abhi-glitchhg May 26, 2022
517aeb8
removed md files
abhi-glitchhg May 29, 2022
4eeb3c1
formatting
abhi-glitchhg May 29, 2022
bd50892
code formatting - lint issue resolved
abhi-glitchhg May 29, 2022
1b1f816
hopefully final
abhi-glitchhg May 31, 2022
a51faae
final
abhi-glitchhg May 31, 2022
9e1ea6f
link config files with setup.py
abhi-glitchhg Jun 5, 2022
d9f9623
checking coverage
abhi-glitchhg Jun 5, 2022
083bf2e
rollback
abhi-glitchhg Jun 5, 2022
3ebf701
minor change
abhi-glitchhg Jun 9, 2022
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .isort.cfg
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[settings]
known_third_party = cv2,einops,numpy,setuptools,torch,torchvision
known_third_party = cloudpickle,cv2,einops,hydra,numpy,omegaconf,pytest,setuptools,torch,torchvision,yaml
multi_line_output=3
include_trailing_comma=True
force_grid_wrap=0
Expand Down
2 changes: 2 additions & 0 deletions configs/Swin/readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
#Config File for Swin Transformer

22 changes: 22 additions & 0 deletions configs/Swin/swin_base_patch4_window7_224.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
from vformer.config import LazyCall as L
from vformer.models import SwinTransformer

model = L(SwinTransformer)(
img_size=224,
in_channels=3,
patch_size=4,
n_classes=1000,
embedding_dim=128,
depths=(2, 2, 18, 2),
num_heads=(4, 8, 16, 32),
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
p_dropout=0.1,
attn_dropout=0.1,
drop_path_rate=0.1,
ape=True,
decoder_config=None,
patch_norm=True,
)
1 change: 1 addition & 0 deletions configs/Vanilla/readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
#Config file for VanillaViT
18 changes: 18 additions & 0 deletions configs/Vanilla/vit_tiny_patch_16_224.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
from vformer.config import LazyCall as L
from vformer.models import VanillaViT

model = L(VanillaViT)(
img_size=224,
in_channels=3,
patch_size=16,
embedding_dim=192,
head_dim=192,
depth=12,
attn_heads=3,
encoder_mlp_dim=192,
decoder_config=None,
pool="cls",
p_dropout_encoder=0.1,
p_dropout_embedding=0.1,
n_classes=1000,
)
6 changes: 6 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,18 @@ arrow==1.1.1
attrs==21.2.0
backports.entry-points-selectable==1.1.0
binaryornot==0.4.4
black==22.3.0
certifi==2021.5.30
cfgv==3.3.1
chardet==4.0.0
charset-normalizer==2.0.4
click==8.0.1
cloudpickle==2.0.0
cookiecutter==1.7.3
distlib==0.3.2
einops==0.3.2
filelock==3.0.12
hydra-core==1.1.1
identify==2.2.13
idna==3.2
iniconfig==1.1.1
Expand All @@ -19,6 +22,7 @@ jinja2-time==0.2.0
MarkupSafe==2.0.1
nodeenv==1.6.0
olefile
omegaconf
packaging==21.0
Pillow
platformdirs==2.3.0
Expand All @@ -40,3 +44,5 @@ torchvision>=0.11.0
typing-extensions
urllib3==1.26.6
virtualenv==20.7.2
PYyaml==5.4.1

2 changes: 2 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import codecs
import glob
import os
import shutil

from setuptools import find_packages, setup

Expand Down
2 changes: 2 additions & 0 deletions tests/dir1/dir1_a.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
dir1a_str = "base_a_1"
dir1a_dict = {"a": 1, "b": 2}
10 changes: 10 additions & 0 deletions tests/dir1/dir1_b.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
from vformer.config import LazyConfig

# equivalent to relative import
dir1a_str, dir1a_dict = LazyConfig.load_rel("dir1_a.py", ("dir1a_str", "dir1a_dict"))

dir1b_str = dir1a_str + "_from_b"
dir1b_dict = dir1a_dict

# Every import is a reload: not modified by other config files
assert dir1a_dict.a == 1
12 changes: 12 additions & 0 deletions tests/root_cfg.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
from itertools import count

from vformer.config import LazyCall as L

from .dir1.dir1_a import dir1a_dict, dir1a_str

dir1a_dict.a = "modified"

# modification above won't affect future imports
from .dir1.dir1_b import dir1b_dict, dir1b_str

lazyobj = L(count)(x=dir1a_str, y=dir1b_str)
192 changes: 192 additions & 0 deletions tests/test_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,192 @@
import os
import tempfile
from itertools import count

import pytest
import torch
from omegaconf import DictConfig

from vformer.config import LazyCall
from vformer.config import LazyCall as L
from vformer.config import LazyConfig, instantiate
from vformer.models import PVTSegmentation, SwinTransformer, VanillaViT, ViViTModel2


def test_lazy():
# classification models
vanilla_config = LazyCall(VanillaViT)(img_size=224, patch_size=7, n_classes=10)
swin_config = LazyCall(SwinTransformer)(
img_size=224,
patch_size=4,
in_channels=3,
n_classes=10,
embedding_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
p_dropout=0.2,
)
vivit_config = LazyCall(ViViTModel2)(
img_size=224,
in_channels=3,
patch_size=16,
embedding_dim=192,
depth=4,
num_heads=3,
head_dim=64,
num_frames=1,
n_classes=10,
)

# dense models
pvt_config = LazyCall(PVTSegmentation)()
pvt_config["img_size"] = 224
rand_img_tensor = torch.randn(4, 3, 224, 224)
rand_vdo_tensor = torch.randn([32, 16, 3, 224, 224])

vanilla_vit = instantiate(vanilla_config)
swin_vit = instantiate(swin_config)
vivit = instantiate(vivit_config)

pvt = instantiate(pvt_config)

assert vanilla_vit(rand_img_tensor).shape == (4, 10)
assert swin_vit(rand_img_tensor).shape == (4, 10)
assert pvt(rand_img_tensor).shape == (4, 1, 224, 224)
assert vivit(rand_vdo_tensor).shape == (32, 10)


def test_raise_errors():
a = "strings"
with pytest.raises(TypeError):
LazyConfig(a)
with pytest.raises(TypeError):
LazyCall(2)

cfg = [1, 2, 3, 4]
cfg2 = instantiate(cfg)
assert cfg2 == cfg, "it should return same object"

with pytest.raises(AssertionError):
instantiate({"_target_": "test"})


def test_load():
abhi-glitchhg marked this conversation as resolved.
Show resolved Hide resolved
root_filename = os.path.join(os.path.dirname(__file__), "root_cfg.py")
cfg = LazyConfig.load(root_filename)

assert cfg.dir1a_dict.a == "modified"

assert cfg.dir1b_dict.a == 1
assert cfg.lazyobj.x == "base_a_1"

cfg.lazyobj.x = "new_x"
# reload
cfg = LazyConfig.load(root_filename)
assert cfg.lazyobj.x == "base_a_1"


def test_save_load():
root_filename = os.path.join(os.path.dirname(__file__), "root_cfg.py")

cfg = LazyConfig.load(root_filename)
with tempfile.TemporaryDirectory(prefix="vformer") as d:
fname = os.path.join(d, "test_config.yaml")
LazyConfig.save(cfg, fname)
cfg2 = LazyConfig.load(fname)

assert cfg2.lazyobj._target_ == "itertools.count"
assert cfg.lazyobj._target_ == count
cfg2.lazyobj.pop("_target_")
cfg.lazyobj.pop("_target_")
# the rest are equal
assert cfg == cfg2


def test_failed_save():
cfg = DictConfig({"x": lambda: 3}, flags={"allow_objects": True})
with tempfile.TemporaryDirectory(prefix="vformer") as d:
fname = os.path.join(d, "test_config.yaml")
LazyConfig.save(cfg, fname)
assert os.path.exists(fname) == True
assert os.path.exists(fname + ".pkl") == True


def test_overrides():
root_filename = os.path.join(os.path.dirname(__file__), "root_cfg.py")

cfg = LazyConfig.load(root_filename)
LazyConfig.apply_overrides(cfg, ["lazyobj.x=123", 'dir1b_dict.a="123"'])
assert cfg.dir1b_dict.a == "123"
assert cfg.lazyobj.x == 123


def test_invalid_overrides():
root_filename = os.path.join(os.path.dirname(__file__), "root_cfg.py")

cfg = LazyConfig.load(root_filename)
with pytest.raises(KeyError):
LazyConfig.apply_overrides(cfg, ["lazyobj.x.xxx=123"])


def test_to_py():
root_filename = os.path.join(os.path.dirname(__file__), "root_cfg.py")

cfg = LazyConfig.load(root_filename)
cfg.lazyobj.x = {
"a": 1,
"b": 2,
"c": L(count)(x={"r": "a", "s": 2.4, "t": [1, 2, 3, "z"]}),
}
cfg.list = ["a", 1, "b", 3.2]
py_str = LazyConfig.to_py(cfg)
expected = """cfg.dir1a_dict.a = "modified"
cfg.dir1a_dict.b = 2
cfg.dir1b_dict.a = 1
cfg.dir1b_dict.b = 2
cfg.lazyobj = itertools.count(
x={
"a": 1,
"b": 2,
"c": itertools.count(x={"r": "a", "s": 2.4, "t": [1, 2, 3, "z"]}),
},
y="base_a_1_from_b",
)
cfg.list = ["a", 1, "b", 3.2]
"""
assert py_str == expected

root_filename = os.path.join(os.path.dirname(__file__), "testing.yaml")
cfg = LazyConfig.load(root_filename)
obj = LazyConfig.to_py(cfg)


def test_check_configs():
config_dir = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"configs",
"Vanilla",
"vit_tiny_patch_16_224.py",
)

cfg = LazyConfig.load(config_dir)
cfg.model.img_size = 224
cfg.model.in_channels = 3
cfg.model.n_classes = 1000

new_model = instantiate(cfg.model)
assert new_model(torch.randn(4, 3, 224, 224)).shape == (4, 1000)

cfg.model.num_classes = 10
with pytest.raises(TypeError):
new_model = instantiate((cfg.model))

config_dir = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"configs",
"Swin",
"swin_base_patch4_window7_224.py",
)
cfg = LazyConfig.load(config_dir)
new_model = instantiate(cfg.model)
assert new_model(torch.randn(4, 3, 224, 224)).shape == (4, 1000)
2 changes: 2 additions & 0 deletions tests/testing.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
x : 3
y : 4
1 change: 1 addition & 0 deletions vformer/attention/cross.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ def __init__(self, cls_dim, patch_dim, num_heads=8, head_dim=64):
inner_dim = num_heads * head_dim
self.num_heads = num_heads
self.scale = head_dim**-0.5

self.fl = (
nn.Linear(cls_dim, patch_dim) if not cls_dim == patch_dim else nn.Identity()
)
Expand Down
2 changes: 2 additions & 0 deletions vformer/config/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from .config_utils import instantiate
from .lazy import LazyCall, LazyConfig
Loading