-
Notifications
You must be signed in to change notification settings - Fork 0
/
MaskRNN.lua
54 lines (46 loc) · 1.97 KB
/
MaskRNN.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
------------------------------------------------------------------------
--[[ MaskRNN ]] --
-- Filter out outputs and grads of unnecessary timesteps to support
-- variable lengths in minibatch.
-- Input is of dimensions (T*N)*H ( the same as RNN) and output of (T*N)*H.
-- seqLengths: N, indicate the real length of each sample in a minibatch.
------------------------------------------------------------------------
require 'dpnn'
local MaskRNN, parent = torch.class("nn.MaskRNN", "nn.Decorator")
function MaskRNN:__init(module)
parent.__init(self, module)
assert(torch.isTypeOf(module, 'nn.Module'))
end
function MaskRNN:filter(input, seqLengths)
local batchsize = input:size(2)
assert(batchsize == seqLengths:size(1))
local T = input:size(1)
for i = 1, batchsize do
if seqLengths[i] < T then
input:sub(seqLengths[i] + 1, T, i, i):zero()
end
end
end
function MaskRNN:updateOutput(input)
self._input = input[1]:view(-1, input[2]:size(1), input[1]:size(2))
self.output = self.module:updateOutput(self._input)
-- self:filter(self.output, input[2])
self.output = self.output:view(self._input:size(1) * self._input:size(2), -1)
return self.output
end
function MaskRNN:updateGradInput(input, gradOutput)
self._gradOutput = gradOutput:view(self._input:size(1), input[2]:size(1), -1)
-- self:filter(self._gradOutput, input[2])
self.gradInput = self.module:updateGradInput(self._input, self._gradOutput)
self.gradInput = self.gradInput:viewAs(input[1])
return { self.gradInput, nil }
end
function MaskRNN:accGradParameters(input, gradOutput, scale)
self.module:accGradParameters(self._input, self._gradOutput, scale)
end
function MaskRNN:accUpdateGradParameters(input, gradOutput, lr)
self.module:accUpdateGradParameters(self._input, self._gradOutput, lr)
end
function MaskRNN:sharedAccUpdateGradParameters(input, gradOutput, lr)
self.module:sharedAccUpdateGradParameters(self._input, self._gradOutput, lr)
end