-
Notifications
You must be signed in to change notification settings - Fork 0
/
amplify_spatial_Gdown_temporal_ideal.py
119 lines (82 loc) · 3.45 KB
/
amplify_spatial_Gdown_temporal_ideal.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# -*- coding: utf-8 -*-
# amplify_spatial_Gdown_temporal_ideal(vidFile, outDir, alpha,
# level, fl, fh,
# chromAttenuation,colourSpace)
#
# Spatial Filtering: Gaussian blur and down sample
# Temporal Filtering: Ideal bandpass
def amplify_spatial_Gdown_temporal_ideal(vidFile,outDir, alpha,level,
fl,fh, chromAttenuation, colourSpace = 'rgb'):
import sys
import cv2
import numpy as np
from Filter import ideal_bandpassing
from build_GDown_stack import build_GDown_stack
import os
vidName = os.path.basename(vidFile)
vidName = vidName[:-4]
outName = "proc.mov"
#outName = (outDir + vidName + '_' + str(fl) +
# '-to-' + str(fh) +
# '-alpha-' + str(alpha) +
# '-lvl-' + str(level)
# + colourSpace + '.mov')
#Read video information
vid = cv2.VideoCapture(vidFile)
fr = vid.get(cv2.CAP_PROP_FPS)
len = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
vidWidth = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
vidHeight = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
startIndex = 0
endIndex = len-1
#print('vidWidth, vidHeight ', str(vidWidth) + ', ' + str(vidHeight))
#print('fps of captured video ', fr)
#print('length ',len)
# Define the codec and create VideoWriter object
capSize = (vidWidth,vidHeight) # this is the size of my source video
fourcc = cv2.VideoWriter_fourcc('j', 'p', 'e', 'g') # note the lower case
vidOut = cv2.VideoWriter()
success = vidOut.open(outName,fourcc,fr,capSize,True)
#print(outName)
# compute Gaussian blur stack
#print('Spatial filtering...')
Gdown_stack = build_GDown_stack(vidFile, startIndex, endIndex, level, colourSpace)
#print('Finished')
# Temporal filtering
#print('Temporal filtering...')
filtered_stack = ideal_bandpassing(Gdown_stack, 1, fl, fh, fr)
#print('Finished')
## amplify
if colourSpace == 'yuv':
filtered_stack[:,:,:,0] = filtered_stack[:,:,:,0] * alpha
filtered_stack[:,:,:,1] = filtered_stack[:,:,:,1] * alpha * chromAttenuation
filtered_stack[:,:,:,2] = filtered_stack[:,:,:,2] * alpha * chromAttenuation
elif colourSpace == 'rgb':
filtered_stack = filtered_stack * alpha
## Render on the input video
#print('Rendering...')
# output video
for k in range(0,endIndex-startIndex+1):
retval,temp = vid.read()
frame = temp.astype(np.float32)
filtered = np.squeeze(filtered_stack[k,:,:,:])
filtered = cv2.resize(filtered,(vidWidth, vidHeight),0,0,cv2.INTER_LINEAR)
if colourSpace == 'yuv':
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2YUV)
frame[:,:,1:] = frame[:,:,1:] + filtered[:,:,1:]
frame = cv2.cvtColor(frame,cv2.COLOR_YUV2BGR)
elif colourSpace == 'rgb':
frame[:,:,:] = frame[:,:,:] + filtered[:,:]
frame = np.clip(frame,0,255)
frame = cv2.convertScaleAbs(frame)
vidOut.write(frame)
#sys.stdout.write('.')
#sys.stdout.flush()
#print('Finished')
vid.release()
vidOut.release()
def main():
return
if __name__=="__main__":
#amplify_spatial_Gdown_temporal_ideal('sample\\video.mp4','sample\\',50,4,50/60.0,60/60.0,30,'rgb')
main()