forked from yjxiao/ds-ga-1008-a1
-
Notifications
You must be signed in to change notification settings - Fork 0
/
3_loss.lua
104 lines (80 loc) · 3.01 KB
/
3_loss.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
----------------------------------------------------------------------
-- This script demonstrates how to define a couple of different
-- loss functions:
-- + negative-log likelihood, using log-normalized output units (SoftMax)
-- + mean-square error
-- + margin loss (SVM-like)
--
-- Clement Farabet
----------------------------------------------------------------------
require 'torch' -- torch
require 'nn' -- provides all sorts of loss functions
----------------------------------------------------------------------
-- parse command line arguments
if not opt then
print '==> processing options'
cmd = torch.CmdLine()
cmd:text()
cmd:text('MNIST Loss Function')
cmd:text()
cmd:text('Options:')
cmd:option('-loss', 'nll', 'type of loss function to minimize: nll | mse | margin')
cmd:text()
opt = cmd:parse(arg or {})
-- to enable self-contained execution:
model = nn.Sequential()
end
-- 10-class problem
noutputs = 10
----------------------------------------------------------------------
print '==> define loss'
if opt.loss == 'margin' then
-- This loss takes a vector of classes, and the index of
-- the grountruth class as arguments. It is an SVM-like loss
-- with a default margin of 1.
criterion = nn.MultiMarginCriterion()
elseif opt.loss == 'nll' then
-- This loss requires the outputs of the trainable model to
-- be properly normalized log-probabilities, which can be
-- achieved using a softmax function
model:add(nn.LogSoftMax())
-- The loss works like the MultiMarginCriterion: it takes
-- a vector of classes, and the index of the grountruth class
-- as arguments.
criterion = nn.ClassNLLCriterion()
elseif opt.loss == 'mse' then
-- for MSE, we add a tanh, to restrict the model's output
model:add(nn.Tanh())
-- The mean-square error is not recommended for classification
-- tasks, as it typically tries to do too much, by exactly modeling
-- the 1-of-N distribution. For the sake of showing more examples,
-- we still provide it here:
criterion = nn.MSECriterion()
criterion.sizeAverage = false
-- Compared to the other losses, the MSE criterion needs a distribution
-- as a target, instead of an index. Indeed, it is a regression loss!
-- So we need to transform the entire label vectors:
if trainData then
-- convert training labels:
local trsize = (#trainData.labels)[1]
local trlabels = torch.Tensor( trsize, noutputs )
trlabels:fill(-1)
for i = 1,trsize do
trlabels[{ i,trainData.labels[i] }] = 1
end
trainData.labels = trlabels
-- convert test labels
local tesize = (#testData.labels)[1]
local telabels = torch.Tensor( tesize, noutputs )
telabels:fill(-1)
for i = 1,tesize do
telabels[{ i,testData.labels[i] }] = 1
end
testData.labels = telabels
end
else
error('unknown -loss')
end
----------------------------------------------------------------------
print '==> here is the loss function:'
print(criterion)