forked from TLESORT/Baxter_Learning
-
Notifications
You must be signed in to change notification settings - Fork 0
/
functions.lua
353 lines (294 loc) · 9.39 KB
/
functions.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
require "lfs"
function createModels()
if LOADING then
print("Loading Model")
model = torch.load(Log_Folder..'20e.t7')
else
model=getModel()
end
model=model:cuda()
parameters,gradParameters = model:getParameters()
model2=model:clone('weight','bias','gradWeight','gradBias','running_mean','running_std')
model3=model:clone('weight','bias','gradWeight','gradBias','running_mean','running_std')
model4=model:clone('weight','bias','gradWeight','gradBias','running_mean','running_std')
models={model1=model,model2=model2,model3=model3,model4=model4}
return models
end
function loadTrainTest(list_folders_images, crossValStep)
imgs = {}
preload_name = PRELOAD_FOLDER..'saveImgsRaw.t7'
print("Loading Images")
if not file_exists(preload_name) then
print("nbList",nbList)
for i=1,nbList do
list=images_Paths(list_folders_images[i])
table.insert(imgs,load_list(list,image_width,image_height,false))
end
torch.save(preload_name,imgs)
else
imgs = torch.load(preload_name)
end
-- switch value, because all functions consider the last element to be the test element
imgs[crossValStep], imgs[#imgs] = imgs[#imgs], imgs[crossValStep]
print("Preprocessing")
imgs,mean,std = preprocessing(imgs)
imgs_test = imgs[#imgs]
return imgs, imgs_test
end
function save_model(model,path)
--print("Saved at : "..path)
model:float()
parameters, gradParameters = model:getParameters()
local lightModel = model:clone():float()
lightModel:clearState()
torch.save(path,lightModel)
end
function load_list(list)
local im={}
local lenght=image_width or 200
local height=image_height or 200
for i=1, #list do
table.insert(im,getImage(list[i]))
end
return im
end
function getImage(im)
if im=='' or im==nil then return nil end
local image1=image.load(im,3,'byte')
return image1
-- local format=length.."x"..height
-- local img1_rsz=image.scale(image1,format)
-- return img1_rsz:float()
end
function meanAndStd(imgs)
local length,height = imgs[1][1][1]:size(1), imgs[1][1][1]:size(2)
local mean = {torch.zeros(length,height),torch.zeros(length,height),torch.zeros(length,height)}
local std = {torch.zeros(length,height),torch.zeros(length,height),torch.zeros(length,height)}
for i=1,3 do
mean[i] = mean[i]:float()
std[i] = std[i]:float()
end
local numSeq = #imgs-1
local totImg = 0
for i=1,numSeq do
for j=1,#(imgs[i]) do
mean[1] = mean[1]:add(imgs[i][j][{1,{},{}}]:float())
mean[2] = mean[2]:add(imgs[i][j][{2,{},{}}]:float())
mean[3] = mean[3]:add(imgs[i][j][{3,{},{}}]:float())
totImg = totImg+1
end
end
mean[1] = mean[1] / totImg
mean[2] = mean[2] / totImg
mean[3] = mean[3] / totImg
for i=1,numSeq do
for j=1,#(imgs[i]) do
std[1] = std[1]:add(torch.pow(imgs[i][j][{1,{},{}}]:float() - mean[1],2))
std[2] = std[2]:add(torch.pow(imgs[i][j][{2,{},{}}]:float() - mean[2],2))
std[3] = std[3]:add(torch.pow(imgs[i][j][{3,{},{}}]:float() - mean[3],2))
end
end
std[1] = torch.sqrt(std[1] / totImg)
std[2] = torch.sqrt(std[2] / totImg)
std[3] = torch.sqrt(std[3] / totImg)
torch.save('Log/meanStdImages.t7',{mean,std})
return mean,std
end
function normalize(im,mean,std)
for i=1,3 do
im[{i,{},{}}] = (im[{i,{},{}}]:add(-mean[i])):cdiv(std[i])
end
return im
end
function preprocessingTest(imgs,mean,std)
--Normalizing all images
for i=1,#imgs do
im = imgs[i]
imgs[i] = normalize(im,mean,std)
end
return imgs
end
function preprocessing(imgs,meanStd)
-- Calculate reformat imgs, mean and std for images in train set
-- normalize train set and apply to test
print('preprocessing(imgs,meanStd)')
print(imgs)
print(meanStd)
imgs = scaleAndCrop(imgs)
if not meanStd then
mean, std = meanAndStd(imgs)
else
mean, std = meanStd[1], meanStd[2]
end
numSeq = #imgs-1
for i=1,numSeq do
for j=1,#(imgs[i]) do
im = imgs[i][j]
imgs[i][j] = dataAugmentation(im, mean,std)
end
end
imgs[#imgs] = preprocessingTest(imgs[#imgs], mean,std)
return imgs, mean, std
end
function scaleAndCrop(imgs, length, height)
-- Why do i scale and crop after ? Because this is the way it's done under python,
-- so we need to do the same conversion
local lengthBeforeCrop = 320
local lengthAfterCrop = length or 200
local height = height or 200
local formatBefore=lengthBeforeCrop.."x"..height
for s=1,#imgs do
for i=1,#imgs[s] do
local img=image.scale(imgs[s][i],formatBefore)
local img= image.crop(img, 'c', lengthAfterCrop, height)
imgs[s][i] = img:float()
-- image.display(img)
-- io.read()
end
end
return imgs
end
function scaleAndRandomCrop(imgs, length, height)
local length = length or 200
local height = height or 200
local cropSize = 32
for s=1,#imgs do
-- Apply random modification on the images for the whole sequence
local format=length+cropSize.."x"..height+cropSize
local posX, posY = torch.random(cropSize),torch.random(cropSize)
for i=1,#imgs[s] do
local img1_rsz=image.scale(imgs[s][i],format)
local img = image.crop(img1_rsz, posX, posY, posX+length, posY+height)
imgs[s][i] = img:float()
-- image.display(img)
-- io.read()
end
end
return imgs
end
function dataAugmentation(im, mean, std)
local channels = {'r','g','b'}
local noiseReductionFactor = 4 -- the bigger, less noise
local length = im:size(2)
local width = im:size(3)
local maxShift = 1
im = normalize(im, mean, std)
return im
-- for i=1,3 do
-- colorShift = torch.uniform(-maxShift,maxShift)
-- im[{i,{},{}}] = im[{i,{},{}}] + colorShift
-- end
-- -- Adding Gaussian noise to the data
-- noise=torch.rand(3,length,width)/noiseReductionFactor
-- noise = noise - 0.5/noiseReductionFactor --center noise
-- im = normalize(im, mean, std):add(noise:float())
-- return im
end
function getRandomBatch(imgs1, imgs2, txt1, txt2, lenght, Mode)
local width=image_width or 200
local height=image_height or 200
if Mode=="Prop" or Mode=="Rep" then
Batch=torch.Tensor(4, lenght,3, width, height)
else
Batch=torch.Tensor(2, lenght,3, width, height)
end
for i=1, lenght do
if Mode=="Prop" or Mode=="Rep" then
if txt1==txt2 then Set=get_one_random_Prop_Set(txt1)
else Set=get_two_Prop_Pair(txt1, txt2) end
Batch[1][i]=imgs1[Set.im1]
Batch[2][i]=imgs1[Set.im2]
Batch[3][i]=imgs2[Set.im3]
Batch[4][i]=imgs2[Set.im4]
elseif Mode=="Temp" then
Set=get_one_random_Temp_Set(#imgs1)
Batch[1][i]=imgs1[Set.im1]
Batch[2][i]=imgs1[Set.im2]
elseif Mode=="Caus" then
Set=get_one_random_Caus_Set(txt1, txt2)
Batch[1][i]=imgs1[Set.im1]
Batch[2][i]=imgs2[Set.im2]
else
print "getRandomBatch Wrong mode "
end
end
return Batch
end
function saveMeanAndStdRepr(imgs, show, model)
local Log_Folder='./Log/'
local allRepr = {}
local totImgs = 0
local mean = nil
local std = nil
-- ===== Uncomment if you want to display images (and use qlua instead of th)
if show then
w = image.display(image.lena()) -- with positional arguments mode
end
for s=1,#imgs do
local imgs_test = imgs[s]
for i=1,#imgs_test do
local img = torch.zeros(1,3,200,200)
img[1] = imgs_test[i]
if model then
allRepr[#allRepr+1] = model:forward(img:float())
else
allRepr[#allRepr+1] = models.model1:forward(img:cuda()):float()
end
--====== Printing the state corresponding to the image =====
-- ====== don't forget to uncomment the line "w = image ... " above
if show then
image.display{image=img, win=w}
print(allRepr[#allRepr][1])
io.read()
end
if mean then
mean = torch.add(mean, allRepr[#allRepr])
else
mean = allRepr[#allRepr]
end
end
end
mean = mean / #allRepr
-- print("mean",mean)
-- print("allRepr",allRepr[5],allRepr[150],allRepr[400],allRepr[250])
for i=1,#allRepr do
if std then
std = std:add(torch.pow(allRepr[i] - mean,2))
else
std = torch.pow(allRepr[i] - mean,2)
end
end
std = torch.sqrt(std / #allRepr)
-- print("sumStd", std)
torch.save(Log_Folder..'meanStdRepr.t7',{mean,std})
end
local function gamma(im)
local Gamma= torch.Tensor(3,3)
local channels = {'r','g','b'}
local mean = {}
local std = {}
for i,channel in ipairs(channels) do
for j,channel in ipairs(channels) do
if i==j then Gamma[i][i] = im[{i,{},{}}]:var()
else
chan_i=im[{i,{},{}}]-im[{i,{},{}}]:mean()
chan_j=im[{j,{},{}}]-im[{j,{},{}}]:mean()
Gamma[i][j]=(chan_i:t()*chan_j):mean()
end
end
end
return Gamma
end
local function transformation(im, v,e)
local transfo=torch.Tensor(3,200,200)
local Gamma=torch.mv(v,e)
for i=1, 3 do
transfo[i]=im[i]+Gamma[i]
io.read()
end
return transfo
end
function file_exists(name)
local f=io.open(name,"r")
if f~=nil then io.close(f) return true else return false end
end