main.py(主文件): 原理可参考https://zhuanlan.zhihu.com/p/30033898
import matplotlib.pyplot as plt
from scipy.linalg import lstsq
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import least_squares
def extract_features(image_names):
sift = cv2.SIFT_create(0, 3, 0.04, 10)
for image_name in image_names:
image = cv2.imread(image_name)
key_points, descriptor = sift.detectAndCompute(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), None)
if len(key_points) <= 10:
key_points_for_all.append(key_points)
descriptor_for_all.append(descriptor)
colors = np.zeros((len(key_points), 3))
for i, key_point in enumerate(key_points):
colors[i] = image[int(p[1])][int(p[0])]
colors_for_all.append(colors)
return np.array(key_points_for_all), np.array(descriptor_for_all), np.array(colors_for_all)
def match_features(query, train):
bf = cv2.BFMatcher(cv2.NORM_L2)
knn_matches = bf.knnMatch(query, train, k=2)
if m.distance < config.MRT * n.distance:
def match_all_features(descriptor_for_all):
for i in range(len(descriptor_for_all) - 1):
matches = match_features(descriptor_for_all[i], descriptor_for_all[i + 1])
matches_for_all.append(matches)
return np.array(matches_for_all)
def find_transform(K, p1, p2):
focal_length = 0.5 * (K[0, 0] + K[1, 1])
principle_point = (K[0, 2], K[1, 2])
E,mask = cv2.findEssentialMat(p1, p2, focal_length, principle_point, cv2.RANSAC, 0.999, 1.0)
cameraMatrix = np.array([[focal_length, 0, principle_point[0]], [0, focal_length, principle_point[1]], [0, 0, 1]])
pass_count, R, T, mask = cv2.recoverPose(E, p1, p2, cameraMatrix, mask)
def get_matched_points(p1, p2, matches):
src_pts = np.asarray([p1[m.queryIdx].pt for m in matches])
dst_pts = np.asarray([p2[m.trainIdx].pt for m in matches])
def get_matched_colors(c1, c2, matches):
color_src_pts = np.asarray([c1[m.queryIdx] for m in matches])
color_dst_pts = np.asarray([c2[m.trainIdx] for m in matches])
return color_src_pts, color_dst_pts
def maskout_points(p1, mask):
for i in range(len(mask)):
def init_structure(K, key_points_for_all, colors_for_all, matches_for_all):
p1, p2 = get_matched_points(key_points_for_all[0], key_points_for_all[1], matches_for_all[0])
c1, c2 = get_matched_colors(colors_for_all[0], colors_for_all[1], matches_for_all[0])
if find_transform(K, p1, p2):
R,T,mask = find_transform(K, p1, p2)
R,T,mask = np.array([]), np.array([]), np.array([])
p1 = maskout_points(p1, mask)
p2 = maskout_points(p2, mask)
colors = maskout_points(c1, mask)
structure = reconstruct(K, R0, T0, R, T, p1, p2)
correspond_struct_idx = []
for key_p in key_points_for_all:
correspond_struct_idx.append(np.ones(len(key_p)) *- 1)
correspond_struct_idx = np.array(correspond_struct_idx)
matches = matches_for_all[0]
print("####################init_structure:queryIdx & trainIdx")
for i, match in enumerate(matches):
correspond_struct_idx[0][int(match.queryIdx)] = idx
print(match.queryIdx," & ", match.trainIdx)
correspond_struct_idx[1][int(match.trainIdx)] = idx
return structure, correspond_struct_idx, colors, rotations, motions
def reconstruct(K, R1, T1, R2, T2, p1, p2):
proj1[0:3, 0:3] = np.float32(R1)
proj1[:, 3] = np.float32(T1.T)
proj2[0:3, 0:3] = np.float32(R2)
proj2[:, 3] = np.float32(T2.T)
proj1 = np.dot(fk, proj1)
proj2 = np.dot(fk, proj2)
s = cv2.triangulatePoints(proj1, proj2, p1.T, p2.T)
for i in range(len(s[0])):
structure.append([col[0], col[1], col[2]])
return np.array(structure)
def fusion_structure(matches, struct_indices, next_struct_indices, structure, next_structure, colors, next_colors):
for i,match in enumerate(matches):
query_idx = match.queryIdx
train_idx = match.trainIdx
struct_idx = struct_indices[query_idx]
next_struct_indices[train_idx] = struct_idx
structure = np.append(structure, [next_structure[i]], axis = 0)
colors = np.append(colors, [next_colors[i]], axis = 0)
struct_indices[query_idx] = next_struct_indices[train_idx] = len(structure) - 1
return struct_indices, next_struct_indices, structure, colors
def get_objpoints_and_imgpoints(matches, struct_indices, structure, key_points):
print("####################get_objpoints_and_imgpoints:queryIdx & trainIdx:")
query_idx = match.queryIdx
train_idx = match.trainIdx
print(query_idx," & ",train_idx)
struct_idx = struct_indices[0][query_idx]
object_points.append(structure[int(struct_idx)])
image_points.append(key_points[train_idx].pt)
return np.array(object_points), np.array(image_points)
def get_3dpos(pos, ob, r, t, K):
p,J = cv2.projectPoints(x.reshape(1, 1, 3), r, t, K, np.array([]))
res = least_squares(F, pos)
def get_3dpos_v1(pos,ob,r,t,K):
p,J = cv2.projectPoints(pos.reshape(1, 1, 3), r, t, K, np.array([]))
if abs(e[0]) > config.x or abs(e[1]) > config.y:
def bundle_adjustment(rotations, motions, K, correspond_struct_idx, key_points_for_all, structure):
for i in range(len(rotations)):
r, _ = cv2.Rodrigues(rotations[i])
for i in range(len(correspond_struct_idx)):
point3d_ids = correspond_struct_idx[i]
key_points = key_points_for_all[i]
for j in range(len(point3d_ids)):
point3d_id = int(point3d_ids[j])
new_point = get_3dpos_v1(structure[point3d_id], key_points[j].pt, r, t, K)
structure[point3d_id] = new_point
def fig(structure, colors):
for i in range(len(colors)):
colors[i, :] = colors[i, :][[2, 1, 0]]
ax = fig.gca(projection = '3d')
for i in range(len(structure)):
ax.scatter(structure[i, 0], structure[i, 1], structure[i, 2], color = colors[i, :], s = 5)
ax.view_init(elev = 135, azim = 90)
mlab.points3d(structure[:, 0], structure[:, 1], structure[:, 2], mode = 'point', name = 'dinosaur')
def fig_v2(structure, colors):
for i in range(len(structure)):
mlab.points3d(structure[i][0], structure[i][1], structure[i][2],
mode = 'point', name = 'dinosaur', color = (colors[i][0],colors[i][1],colors[i][2]))
imgdir = config.image_dir
img_names = os.listdir(imgdir)
img_names = sorted(img_names)
for i in range(len(img_names)):
img_names[i] = imgdir + img_names[i]
key_points_for_all, descriptor_for_all, colors_for_all = extract_features(img_names)
matches_for_all = match_all_features(descriptor_for_all)
structure, correspond_struct_idx, colors, rotations, motions = init_structure(K, key_points_for_all, colors_for_all, matches_for_all)
for i in range(1, len(matches_for_all)):
object_points, image_points = get_objpoints_and_imgpoints(matches_for_all[0], correspond_struct_idx, structure, key_points_for_all[i + 1])
if len(image_points) < 7:
while len(image_points) < 7:
object_points = np.append(object_points, [object_points[0]], axis = 0)
image_points = np.append(image_points, [image_points[0]], axis = 0)
_, r, T, _ = cv2.solvePnPRansac(object_points, image_points, K, np.array([]))
报错:error: (-215:Assertion failed) npoints >= 4 && npoints == std::max(ipoints.checkVector(2, CV_32F), i
p1, p2 = get_matched_points(key_points_for_all[i], key_points_for_all[i + 1], matches_for_all[i])
c1, c2 = get_matched_colors(colors_for_all[i], colors_for_all[i + 1], matches_for_all[i])
next_structure = reconstruct(K, rotations[i], motions[i], R, T, p1, p2)
correspond_struct_idx[i], correspond_struct_idx[i + 1], structure, colors = fusion_structure(matches_for_all[i],correspond_struct_idx[i],correspond_struct_idx[i+1],structure,next_structure,colors,c1)
structure = bundle_adjustment(rotations, motions, K, correspond_struct_idx, key_points_for_all, structure)
while i < len(structure):
if math.isnan(structure[i][0]):
structure = np.delete(structure, i, 0)
colors = np.delete(colors, i, 0)
fig_v2(structure, colors)
if __name__ == '__main__':
