-
Notifications
You must be signed in to change notification settings - Fork 53
/
recycle-embedding.lua
61 lines (50 loc) · 1.71 KB
/
recycle-embedding.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
--------------------------------------------------------------------------------
-- Recycling embedding training example
--------------------------------------------------------------------------------
-- Alfredo Canziani, Apr 15
--------------------------------------------------------------------------------
package.path = "../?.lua;" .. package.path
require 'nn'
require 'TripletEmbedding'
colour = require 'trepl.colorize'
local b = colour.blue
torch.manualSeed(0)
batch = 5
embeddingSize = 3
imgSize = 20
-- Ancore training samples/images
aImgs = torch.rand(batch, 3, imgSize, imgSize)
-- Positive embedding batch
p = torch.rand(batch, embeddingSize)
-- Negativep embedding batch
n = torch.rand(batch, embeddingSize)
-- Network definition
convNet = nn.Sequential()
convNet:add(nn.SpatialConvolution(3, 8, 5, 5))
convNet:add(nn.SpatialMaxPooling(2, 2, 2, 2))
convNet:add(nn.ReLU())
convNet:add(nn.SpatialConvolution(8, 8, 5, 5))
convNet:add(nn.SpatialMaxPooling(2, 2, 2, 2))
convNet:add(nn.ReLU())
convNet:add(nn.View(8*2*2))
convNet:add(nn.Linear(8*2*2, embeddingSize))
convNet:add(nn.BatchNormalization(0))
-- Parallel container
parallel = nn.ParallelTable()
parallel:add(convNet)
parallel:add(nn.Identity())
parallel:add(nn.Identity())
print(b('Recycling-previous-epoch-embeddings network:')); print(parallel)
-- Cost function
loss = nn.TripletEmbeddingCriterion()
for i = 1, 4 do
print(colour.green('Epoch ' .. i))
predict = parallel:forward({aImgs, p, n})
err = loss:forward(predict)
errGrad = loss:backward(predict)
parallel:zeroGradParameters()
parallel:backward({aImgs, p, n}, errGrad)
parallel:updateParameters(0.01)
print(colour.red('loss: '), err)
print(b('gradInput[1]:')); print(errGrad[1])
end