forked from alex04072000/FuSta
-
Notifications
You must be signed in to change notification settings - Fork 0
/
utility.py
106 lines (80 loc) · 2.96 KB
/
utility.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
import torch
from torch.autograd import Variable
import torch.nn as nn
class MeanShift(nn.Conv2d):
def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.weight.data.div_(std.view(3, 1, 1, 1))
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)
self.bias.data.div_(std)
self.requires_grad = False
def make_optimizer(args, my_model):
trainable = filter(lambda x: x.requires_grad, my_model.parameters())
if args.optimizer == 'SGD':
optimizer_function = optim.SGD
kwargs = {'momentum': 0.9}
elif args.optimizer == 'ADAM':
optimizer_function = optim.Adam
kwargs = {
'betas': (0.9, 0.9999),
'eps': 1e-08
}
elif args.optimizer == 'ADAMax':
optimizer_function = optim.Adamax
kwargs = {
'betas': (0.9, 0.999),
'eps': 1e-08
}
elif args.optimizer == 'RMSprop':
optimizer_function = optim.RMSprop
kwargs = {'eps': 1e-08}
kwargs['lr'] = args.lr
# kwargs['weight_decay'] = args.weight_decay
return optimizer_function(trainable, **kwargs)
def make_scheduler(args, my_optimizer):
if args.decay_type == 'step':
scheduler = lrs.StepLR(
my_optimizer,
step_size=args.lr_decay,
gamma=args.gamma
)
elif args.decay_type.find('step') >= 0:
milestones = args.decay_type.split('_')
milestones.pop(0)
milestones = list(map(lambda x: int(x), milestones))
scheduler = lrs.MultiStepLR(
my_optimizer,
milestones=milestones,
gamma=args.gamma
)
return scheduler
def CharbonnierFunc(data, epsilon=0.001):
return torch.mean(torch.sqrt(data ** 2 + epsilon ** 2))
class Module_CharbonnierLoss(nn.Module):
def __init__(self, epsilon=0.001):
super(Module_CharbonnierLoss, self).__init__()
self.epsilon = epsilon
def forward(self, output, gt):
return torch.mean(torch.sqrt((output - gt) ** 2 + self.epsilon ** 2))
def to_variable(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def moduleNormalize(frame):
return torch.cat([(frame[:, 0:1, :, :] - 0.4631), (frame[:, 1:2, :, :] - 0.4352), (frame[:, 2:3, :, :] - 0.3990)], 1)
def normalize(x):
return x * 2.0 - 1.0
def denormalize(x):
return (x + 1.0) / 2.0
def meshgrid(height, width, grid_min, grid_max):
x_t = torch.matmul(
torch.ones(height, 1), torch.linspace(grid_min, grid_max, width).view(1, width))
y_t = torch.matmul(
torch.linspace(grid_min, grid_max, height).view(height, 1), torch.ones(1, width))
grid_x = x_t.view(1, height, width)
grid_y = y_t.view(1, height, width)
return grid_x, grid_y