-
Notifications
You must be signed in to change notification settings - Fork 4
/
loss.py
60 lines (49 loc) · 2.89 KB
/
loss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import torch
from torch.autograd import Variable
#######################################################
# STATISTICAL DISTANCES(LOSSES) IN PYTORCH #
#######################################################
## Statistial Distances for 1D weight distributions
## Inspired by Scipy.Stats Statistial Distances for 1D
## Pytorch Version, supporting Autograd to make a valid Loss
## Supposing Inputs are Groups of Same-Length Weight Vectors
## Instead of (Points,Weight), full-length Weight Vectors are taken as Inputs
## Code Written by E.Bao, CASIA
def torch_wasserstein_loss(tensor_a, tensor_b):
#Compute the first Wasserstein distance between two 1D distributions.
return(torch_cdf_loss(tensor_a, tensor_b, p=1))
def torch_energy_loss(tensor_a,tensor_b):
# Compute the energy distance between two 1D distributions.
return((2 ** 0.5) * torch_cdf_loss(tensor_a, tensor_b, p=2))
def torch_cdf_loss(tensor_a,tensor_b,p=1):
# last-dimension is weight distribution
# p is the norm of the distance, p=1 --> First Wasserstein Distance
# to get a positive weight with our normalized distribution
# we recommend combining this loss with other difference-based losses like L1
# normalize distribution, add 1e-14 to divisor to avoid 0/0
tensor_a = tensor_a / (torch.sum(tensor_a, dim=-1, keepdim=True) + 1e-14)
tensor_b = tensor_b / (torch.sum(tensor_b, dim=-1, keepdim=True) + 1e-14)
# make cdf with cumsum
cdf_tensor_a = torch.cumsum(tensor_a, dim=-1)
cdf_tensor_b = torch.cumsum(tensor_b, dim=-1)
# choose different formulas for different norm situations
if p == 1:
cdf_distance = torch.sum(torch.abs((cdf_tensor_a - cdf_tensor_b)), dim=-1)
elif p == 2:
cdf_distance = torch.sqrt(torch.sum(torch.pow((cdf_tensor_a - cdf_tensor_b), 2), dim=-1))
else:
cdf_distance = torch.pow(torch.sum(torch.pow(torch.abs(cdf_tensor_a - cdf_tensor_b), p), dim=-1), 1 / p)
cdf_loss = cdf_distance.mean()
return cdf_loss
def torch_l2_loss(tensor_a, tensor_b, core):
# return torch.mean(torch.true_divide(torch.sum(torch.pow(torch.matmul(tensor_a, core) - torch.matmul(tensor_b, core), 2), dim=-1), torch.sum(tensor_a * tensor_b > 0, dim=-1) + 1e-3))
# return torch.norm(torch.matmul(tensor_a, core) - torch.matmul(tensor_b, core), dim=-1).mean()
return torch.norm(torch.matmul(tensor_a, core) - tensor_b, dim=-1).mean()
# return torch.norm(torch.matmul(tensor_a, core) - tensor_b, dim=-1).mean()
def torch_validate_distibution(tensor_a, tensor_b):
# Zero sized dimension is not supported by pytorch, we suppose there is no empty inputs
# Weights should be non-negetive, and with a positive and finite sum
# We suppose all conditions will be corrected by network training
# We only check the match of the size here
if tensor_a.size() != tensor_b.size():
raise ValueError("Input weight tensors must be of the same size")