-
Notifications
You must be signed in to change notification settings - Fork 17
/
SequencerCriterion.lua
70 lines (60 loc) · 2.4 KB
/
SequencerCriterion.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
------------------------------------------------------------------------
--[[ SequencerCriterion ]]--
-- Applies a criterion to each of the inputs and targets in the
-- corresponding input and target Tables.
-- Useful for nn.Repeater and nn.Sequencer.
-- WARNING : assumes that the decorated criterion is stateless, i.e.
-- the backward doesn't need to be preceded by a commensurate forward.
------------------------------------------------------------------------
local SequencerCriterion, parent = torch.class('nn.SequencerCriterion', 'nn.AbstractSequencerCriterion')
function SequencerCriterion:updateOutput(input, target)
self.output = 0
local seqlen
if torch.isTensor(input) then
assert(torch.isTensor(target), "expecting target Tensor since input is a Tensor")
assert(target:size(1) == input:size(1), "target should have as many elements as input")
seqlen = input:size(1)
else
assert(torch.type(target) == 'table', "expecting target table")
assert(#target == #input, "target should have as many elements as input")
seqlen = #input
end
for i=1,seqlen do
local criterion = self:getStepCriterion(i)
self.output = self.output + criterion:forward(input[i], target[i])
end
if self.sizeAverage then
self.output = self.output / seqlen
end
return self.output
end
function SequencerCriterion:updateGradInput(input, target)
local seqlen
if torch.isTensor(input) then
assert(torch.isTensor(target), "expecting target Tensor since input is a Tensor")
assert(target:size(1) == input:size(1), "target should have as many elements as input")
seqlen = input:size(1)
else
assert(torch.type(target) == 'table', "expecting gradOutput table")
assert(#target == #input, "target should have as many elements as input")
seqlen = #input
end
local tableGradInput = {}
for i=1,seqlen do
local criterion = self:getStepCriterion(i)
tableGradInput[i] = criterion:backward(input[i], target[i])
end
if self.sizeAverage then
nn.utils.recursiveDiv(tableGradInput, seqlen)
end
if torch.isTensor(input) then
self.gradInput = tableGradInput[1].new()
self.gradInput:resize(seqlen, unpack(tableGradInput[1]:size():totable()))
for step=1,seqlen do
self.gradInput[step]:copy(tableGradInput[step])
end
else
self.gradInput = tableGradInput
end
return self.gradInput
end