forked from filchy/slam-python
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathextractor.py
More file actions
70 lines (51 loc) · 2.25 KB
/
extractor.py
File metadata and controls
70 lines (51 loc) · 2.25 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import cv2
import numpy as np
class Extractor(object):
def __init__(self):
self.orb = cv2.orb = cv2.ORB_create(nfeatures=1, scoreType=cv2.ORB_FAST_SCORE)
self.bf = cv2.BFMatcher(cv2.NORM_HAMMING)
self.last = None
def extract_keypoints(self, img):
# detection
# goodFeaturesToTrack basically detects corners/ "strong points"
if len(img.shape) > 2: # if color image
pts = cv2.goodFeaturesToTrack(image=np.mean(img, axis=2).astype(np.uint8), maxCorners=4500,
qualityLevel=0.02, minDistance=3)
else: # if not color image
pts = cv2.goodFeaturesToTrack(image=np.array(img).astype(np.uint8), maxCorners=4500,
qualityLevel=0.02, minDistance=3)
# extraction --> convert the above images to KeyPoint objects
kpts = [cv2.KeyPoint(p[0][0],p[0][1], size=30) for p in pts]
kpts, des = self.orb.compute(img, kpts)
# matching
ret = [] # for any given two frames, contains the potential matches between the two images
if self.last is not None:
matches = self.bf.knnMatch(des, self.last["des"], k=2)
# print("MATTTCHHHESSSS", matches)
# ((< cv2.DMatch 0x14d48b730>, < cv2.DMatch 0x14d48b710>),
# it returns tuples of objects like this
for m, n in matches:
# print("m in matches: ", m)
# print("n in matches: ", n)
# print()
if m.distance < 0.55* n.distance:
if m.distance < 64:
kpt1_match = kpts[m.queryIdx]
# print("kpts1_match: ", kpt1_match )
kpt2_match = self.last["kpts"][m.trainIdx]
# print("kpts2_match: ", kpt2_match )
ret.append((kpt1_match, kpt2_match))
coords1_match_pts = np.asarray([kpts[m.queryIdx].pt for m,n in matches]) # (3,8)
coords2_match_pts = np.asarray([self.last["kpts"][m.trainIdx].pt for m,n in matches]) # (3, 15)
# find transformation between two matched points
retval, mask = cv2.findHomography(coords1_match_pts, coords2_match_pts, cv2.RANSAC, 100.0)
mask = mask.ravel()
pts1 = coords1_match_pts[mask==1]
pts2 = coords2_match_pts[mask==1]
# so you don't save the image, you save the key points of this image
# and the descriptors of this image
self.last = {"kpts":kpts, "des":des}
return pts1.T, pts2.T, kpts, ret
else:
self.last = {"kpts":kpts, "des":des}
return np.array([0]),np.array([0]), 0, 0