-
Notifications
You must be signed in to change notification settings - Fork 0
/
lensdistorsion.py
153 lines (136 loc) · 6.17 KB
/
lensdistorsion.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
"""
This module calculates the lensdistorsion for a camera. With this
information it is possible to create orthorectified and
georeferenced images.
The lensdistorsion calculation is based on some calibration
parameters which can be generated by a software like the
free AGISoft Lens.
"""
import numpy as np
from scipy import optimize
import math
class CameraModel(object):
""" In the moment there exists just the browns camera model.
It is the formula wihich is used to calculate the lens distorsion.
"""
BROWN = 1
class LensDistorsion(object):
"""
The Brown Camera Model uses a pinhole camera model for lens calibration.
The distortions are modeled using Brown's distortion model.
The camera model specifies the transformation from point coordinates in the
local camera coordinate system to the pixel coordinates in the image frame.
The local camera coordinate system is selected with the origin at the
camera projection center. The Z axis points towards the viewing direction,
X axis points to the right, Y axis points down.
The image coordinate system has the origin at the top left image pixel,
whith the center of the top left pixel having coordinates (0.5, 0.5).
The X axis in the image coordinate system points to the right, Y axis points
down. For the point with (X, Y, Z) coordinates in the local camera
coordinate system, the projected coordinates in the image frame can be
calculated using the following equations:
(X,Y,Z)= (0,0,0) is the Projectioncenter inside of the camera.
x = X / Z
y = Y / Z
x' = x(1 + k1r2 + k2r4 + k3r6) + p2(r2+2x2) + 2p1xy
y' = y(1 + k1r2 + k2r4 + k3r6) + p1(r2+2y2) + 2p2xy
u = cx + x'fx + y'skew
v = cy + y'fy
where:
r = sqrt(x2 + y2),
(X, Y, Z) - point coordinates in the local camera coorinate system,
(u, v) - projected point coordinates in the image coordinate system (in px)
(fx, fy) - focal lengths,
(cx, cy) - principal point coordinates,
k1, k2, k3 - radial distortion coefficients,
p1, p2 - tangential distortion coefficients,
skew - skew coefficient between the x and the y axis.
"""
width, height = [640, 512]
cx, cy = [width/2, height/2]
fx, fy = [1000, 1000]
k1, k2, k3, p1, p2, skew = [0.0 for n in range(6)]
def __init__(self, model=CameraModel.BROWN):
self.model = model
def set_params(self, **parameters):
for key, val in parameters.items():
setattr(self, key, val)
def get_params(self):
members = [attr for attr in dir(self)
if not callable(getattr(self,attr)) and not attr.startswith("__")]
values = [getattr(self, key) for key in members]
return dict(zip(members, values))
def get_pixel_position(self, x_world, y_world, z_world):
"""
This is the forward problem. Input are the Realworld-Coordinates
z_world: Distance to the camera, (x_world=0,y_world=0) lies on
the optical axis which penetrates the centerpixel (cx,cy)
x_world and y_world are parallel to the image coordinates (x,y)
the resulting imagecoordinates (x,y) start at the most upper left
pixel of the image.
"""
x = x_world/float(z_world)
y = y_world/float(z_world)
return self.__space_to_image_transform__((x, y))
def __space_to_image_transform__(self, X):
(x, y) = X
r = math.sqrt(x**2+y**2)
a = 1 + self.k1*r**2 + self.k2*r**4 + self.k3*r**6
xi = x*a + self.p2*(r**2+2*x**2) + 2*self.p1*x*y
yi = y*a + self.p1*(r**2+2*y**2) + 2*self.p2*x*y
u = self.cx + xi*self.fx + yi*self.skew
v = self.cy + yi*self.fy
return np.array([u, v])
def __jacobian__(self, X):
"""
The jacobian Matrix of the space_to_image_transform function
input: (tan(x_angle),tan(y_angle)) of one pixel of the image
returns the x and y derivatives of the space_to_image_transform
function at that pixel.
"""
(x, y) = X
r = x**2+y**2
a = 2*self.k1 + 4*self.k2*r + 6*self.k3*r**2
b = 1 + self.k1*r + self.k2*r**2 + self.k3*r**3
c = y**2*a + 6*self.p1*y + 2*self.p2*x
return np.array([ [ (b + 6*self.p2*x + 2*self.p1*y + a*x**2 )*self.fx +
(x*y*a + 2*self.p1*x + 2*self.p2*y)*self.skew,
(x*y*a + 2*self.p2*y + 2*self.p1*x)*self.fx + (b+c)*self.skew],
[ ( y*x*a + 2*self.p1*x + 2*self.p2*y )*self.fy , (b+c)*self.fy]])
def create_lut(self):
"""
The created Lookup Tables (LUT) have the size (width,height).
x_angle is the LUT with the tan(omega)-angle in X direction for each
pixel
y_angle is the LUT with the tan(phi)-angle in Y direction for each
pixel
if you know the distance to the camera (Z), you can calculate the
distances in X and Y direction with the Pythagoras.
X(x_pixel,y_pixel) = Z * x_angle[x_pixel][y_pixel]
Y(x_pixel,y_pixel) = Z * y_angle[x_pixel][y_pixel]
"""
self.x_angle = np.zeros(shape = (self.width, self.height))
self.y_angle = np.zeros(shape = (self.width, self.height))
for y in range(0, self.height):
print str("Row %i of %i" % (y, self.height))
for x in range(0, self.width):
f = lambda L : self.__space_to_image_transform__(L) -\
np.array([x+0.5, y+0.5])
sol = optimize.root(f, [self.cx, self.cy],
jac = self.__jacobian__, method='hybr')
self.x_angle[x][y] = sol.x[0]
self.y_angle[x][y] = sol.x[1]
return self.x_angle, self.y_angle
def main():
print "LensDistorsion Module Test"
lens = LensDistorsion()
lens.set_params(width=64, height=48)
lens.set_params(fx=1135.0525882457587, fy=1137.3517442305001)
lens.set_params(cx=3.23567851340823, cy=2.5332402866359234)
lens.set_params(k1=0.40126256086674955, k2=-0.20129011551800177)
lens.set_params(skew=3.5108549724158693e+000, k3=5.9550587592191713)
lens.set_params(p1=1.1002843701506441e-003, p2=-4.0214923185717157e-003)
print lens.get_params()
lens.create_lut()
if __name__ == "__main__":
main()