forked from ericjang/draw
-
Notifications
You must be signed in to change notification settings - Fork 0
/
texture_descriptor_main.py
107 lines (84 loc) · 3.57 KB
/
texture_descriptor_main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
from skimage.color import rgb2gray
from texture_loss import filter_response2histogram
from texture_loss import im2filter_response
import glob
import numpy as np
import os.path as osp
import pickle as pkl
import scipy.io as sio
import skimage.io
import tensorflow as tf
def toy_example():
# Load images and convert to grayscale
img1 = skimage.io.imread('texture/trivial/0.jpg')
# img2 = skimage.io.imread('texture/trivial/1.jpg')
img2 = skimage.io.imread('texture/trivial/0.jpg')
img1 = rgb2gray(img1)
img2 = rgb2gray(img2)
img1 = img1[0:50, 0:50]
img2 = img2[0:50, 0:50]
# Construct mini-batch
img1 = img1.reshape(img1.shape + (1,)).astype(np.float32)
img2 = img2.reshape(img2.shape + (1,)).astype(np.float32)
mini_batch = np.stack((img1, img2))
# Load filters
filter_kernel = pkl.load(open('filters/np_LM_filter_p2.pkl', 'rb'))
lm_centroids = pkl.load(open('filters/np_centroids_p2.pkl', 'rb'))
lm_centroids = lm_centroids.astype(np.float32)
# Change filters to 4D (for convolution)
filter_kernel = filter_kernel.reshape((49, 49, 1, 48)).astype(np.float32)
# Build operations
filter_response_op = im2filter_response(
tf.convert_to_tensor(mini_batch), tf.convert_to_tensor(filter_kernel))
# centroids_op = tf.convert_to_tensor(lm_centroids)
num_bins = 20
hists_op = filter_response2histogram(filter_response_op, lm_centroids,
num_bins, 2)
# Run im2filter_response in tensorflow
with tf.Session() as sess:
filter_response, hists = sess.run([filter_response_op, hists_op])
print(hists)
def debug_descriptor(input_path, file_ext, num_bins):
path = osp.join(input_path, '*.%s' % file_ext)
output_path = 'data/debug_histograms.mat'
hists_all = []
sess = tf.InteractiveSession()
for f in glob.glob(path):
img = skimage.io.imread(f)
img = rgb2gray(img)
# img = img[0:50, 0:50]
assert img.shape[0] < 100 and img.shape[1] < 100, (
"Image size is too big and might cause memory problems in KNN")
# Reshape to 1xHxWx1 (a mini_batch of size 1)
mini_batch = img.reshape((1,) + img.shape + (1,)).astype(np.float32)
mini_batch = tf.convert_to_tensor(mini_batch)
# Load filters
filter_kernel = pkl.load(open('filters/np_LM_filter_p2.pkl', 'rb'))
lm_centroids = pkl.load(open('filters/np_centroids_p2.pkl', 'rb'))
lm_centroids = lm_centroids.astype(np.float32)
# Change filters to 4D (for convolution)
filter_kernel = filter_kernel.reshape((49, 49, 1, 48)).astype(
np.float32)
filter_kernel_tf = tf.convert_to_tensor(filter_kernel)
# Build operations
filter_response_op = im2filter_response(
mini_batch, tf.convert_to_tensor(filter_kernel_tf))
hists_op = filter_response2histogram(filter_response_op, lm_centroids,
num_bins, 1)
# Run im2filter_response in tensorflow
hist = sess.run(hists_op)
hists_all.append(np.squeeze(hist))
hist_dict = {'histograms': hists_all}
sio.savemat(output_path, hist_dict)
sess.close()
def main(_):
# toy_example()
dataset_path = 'texture/trivial'
file_ext = 'jpg'
num_bins = 200
# this function will save a file 'data/debug_histograms.mat' with the
# histograms of all images in dataset_path
debug_descriptor(dataset_path, file_ext, num_bins)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()