-
Notifications
You must be signed in to change notification settings - Fork 4
/
feature_extractor.py
executable file
·121 lines (92 loc) · 3.27 KB
/
feature_extractor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import cv2
import numpy as np
np.set_printoptions(suppress=True)
from skimage.measure import ransac
from skimage.transform import FundamentalMatrixTransform
from skimage.transform import EssentialMatrixTransform
def poseRt(R, t):
ret = np.eye(4)
ret[:3, :3] = R
ret[:3, 3] = t
return ret
def extract(image):
orb = cv2.ORB_create()
features = cv2.goodFeaturesToTrack(
image,
1000,
qualityLevel=0.01,
minDistance=7
)
keypoints = [
cv2.KeyPoint(
x = feature[0][0],
y = feature[0][1],
_size = 20
) for feature in features
]
keypoints, descriptors = orb.compute(image, keypoints)
points = np.array([(keypoint.pt[0], keypoint.pt[1]) for keypoint in keypoints])
return points, descriptors
def add_ones(x):
return np.concatenate([x, np.ones((x.shape[0], 1))], axis = 1)
def normalize(Kinv, point):
return np.dot(Kinv, add_ones(point).T).T[:, 0:2]
def denormalize(K, point):
denorm = np.dot(K, np.array([point[0], point[1], 1.0]))
denorm/=denorm[2]
return int(round(denorm[0])), int(round(denorm[1]))
def match_frames(f1, f2):
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
matches = bf.knnMatch(f1.des, f2.des, k=2)
ret = []
idx1 = []
idx2 = []
for m, n in matches:
if m.distance < 0.75*n.distance:
keypoint_1 = f1.key_points[m.queryIdx]
keypoint_2 = f2.key_points[m.trainIdx]
if np.linalg.norm((keypoint_1-keypoint_2)) < 0.1*np.linalg.norm([f1.w, f1.h]) \
and m.distance < 32:
if m.queryIdx not in idx1 and m.trainIdx not in idx2:
idx1.append(m.queryIdx)
idx2.append(m.trainIdx)
ret.append((keypoint_1, keypoint_2))
# avoid duplicates
assert(len(set(idx1)) == len(idx1))
assert(len(set(idx2)) == len(idx2))
assert len(ret) >= 8
ret = np.array(ret)
idx1 = np.array(idx1)
idx2 = np.array(idx2)
model, inliers = ransac(
(ret[:, 0], ret[:, 1]),
FundamentalMatrixTransform,
min_samples = 8, residual_threshold = 0.001,
max_trials = 100
)
Rt = calc_pose_matrices(model)
# print('FEATUREEXTRACTOR: matches: {}'.format(len(ret)))
return idx1[inliers], idx2[inliers], Rt
def calc_pose_matrices(model):
W = np.mat([[0, -1, 0], [1, 0, 0], [0, 0, 1]], dtype=np.float)
U, w, V = np.linalg.svd(model.params)
if np.linalg.det(U) < 0:
U *= -1.0
if np.linalg.det(V) < 0:
V *= -1.0
R = np.dot(np.dot(U, W), V)
if np.sum(R.diagonal()) < 0:
R = np.dot(np.dot(U, W.T), V)
t = U[:, 2]
return poseRt(R, t)
class Frame(object):
def __init__(self, img, K, global_map):
self.K = K
self.Kinv = np.linalg.inv(self.K)
self.pose = np.eye(4)
self.h, self.w = img.shape[0:2]
self.key_points_unsorted, self.des = extract(img)
self.key_points = normalize(self.Kinv, self.key_points_unsorted)
self.points = [None]*len(self.key_points)
self.id = len(global_map.frames)
global_map.frames.append(self)