find_obj.py 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. #!/usr/bin/env python
  2. '''
  3. Feature-based image matching sample.
  4. Note, that you will need the https://github.com/opencv/opencv_contrib repo for SIFT and SURF
  5. USAGE
  6. find_obj.py [--feature=<sift|surf|orb|akaze|brisk>[-flann]] [ <image1> <image2> ]
  7. --feature - Feature to use. Can be sift, surf, orb or brisk. Append '-flann'
  8. to feature name to use Flann-based matcher instead bruteforce.
  9. Press left mouse button on a feature point to see its matching point.
  10. '''
  11. # Python 2/3 compatibility
  12. from __future__ import print_function
  13. import numpy as np
  14. import cv2 as cv
  15. from common import anorm, getsize
  16. FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
  17. FLANN_INDEX_LSH = 6
  18. def init_feature(name):
  19. chunks = name.split('-')
  20. if chunks[0] == 'sift':
  21. detector = cv.SIFT_create()
  22. norm = cv.NORM_L2
  23. elif chunks[0] == 'surf':
  24. detector = cv.xfeatures2d.SURF_create(800)
  25. norm = cv.NORM_L2
  26. elif chunks[0] == 'orb':
  27. detector = cv.ORB_create(400)
  28. norm = cv.NORM_HAMMING
  29. elif chunks[0] == 'akaze':
  30. detector = cv.AKAZE_create()
  31. norm = cv.NORM_HAMMING
  32. elif chunks[0] == 'brisk':
  33. detector = cv.BRISK_create()
  34. norm = cv.NORM_HAMMING
  35. else:
  36. return None, None
  37. if 'flann' in chunks:
  38. if norm == cv.NORM_L2:
  39. flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
  40. else:
  41. flann_params= dict(algorithm = FLANN_INDEX_LSH,
  42. table_number = 6, # 12
  43. key_size = 12, # 20
  44. multi_probe_level = 1) #2
  45. matcher = cv.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
  46. else:
  47. matcher = cv.BFMatcher(norm)
  48. return detector, matcher
  49. def filter_matches(kp1, kp2, matches, ratio = 0.75):
  50. mkp1, mkp2 = [], []
  51. for m in matches:
  52. if len(m) == 2 and m[0].distance < m[1].distance * ratio:
  53. m = m[0]
  54. mkp1.append( kp1[m.queryIdx] )
  55. mkp2.append( kp2[m.trainIdx] )
  56. p1 = np.float32([kp.pt for kp in mkp1])
  57. p2 = np.float32([kp.pt for kp in mkp2])
  58. kp_pairs = zip(mkp1, mkp2)
  59. return p1, p2, list(kp_pairs)
  60. def explore_match(win, img1, img2, kp_pairs, status = None, H = None):
  61. h1, w1 = img1.shape[:2]
  62. h2, w2 = img2.shape[:2]
  63. vis = np.zeros((max(h1, h2), w1+w2), np.uint8)
  64. vis[:h1, :w1] = img1
  65. vis[:h2, w1:w1+w2] = img2
  66. vis = cv.cvtColor(vis, cv.COLOR_GRAY2BGR)
  67. if H is not None:
  68. corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
  69. corners = np.int32( cv.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) )
  70. cv.polylines(vis, [corners], True, (255, 255, 255))
  71. if status is None:
  72. status = np.ones(len(kp_pairs), np.bool_)
  73. status = status.reshape((len(kp_pairs), 1))
  74. p1, p2 = [], [] # python 2 / python 3 change of zip unpacking
  75. for kpp in kp_pairs:
  76. p1.append(np.int32(kpp[0].pt))
  77. p2.append(np.int32(np.array(kpp[1].pt) + [w1, 0]))
  78. green = (0, 255, 0)
  79. red = (0, 0, 255)
  80. kp_color = (51, 103, 236)
  81. for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
  82. if inlier:
  83. col = green
  84. cv.circle(vis, (x1, y1), 2, col, -1)
  85. cv.circle(vis, (x2, y2), 2, col, -1)
  86. else:
  87. col = red
  88. r = 2
  89. thickness = 3
  90. cv.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness)
  91. cv.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)
  92. cv.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness)
  93. cv.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness)
  94. vis0 = vis.copy()
  95. for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
  96. if inlier:
  97. cv.line(vis, (x1, y1), (x2, y2), green)
  98. cv.imshow(win, vis)
  99. def onmouse(event, x, y, flags, param):
  100. cur_vis = vis
  101. if flags & cv.EVENT_FLAG_LBUTTON:
  102. cur_vis = vis0.copy()
  103. r = 8
  104. m = (anorm(np.array(p1) - (x, y)) < r) | (anorm(np.array(p2) - (x, y)) < r)
  105. idxs = np.where(m)[0]
  106. kp1s, kp2s = [], []
  107. for i in idxs:
  108. (x1, y1), (x2, y2) = p1[i], p2[i]
  109. col = (red, green)[status[i][0]]
  110. cv.line(cur_vis, (x1, y1), (x2, y2), col)
  111. kp1, kp2 = kp_pairs[i]
  112. kp1s.append(kp1)
  113. kp2s.append(kp2)
  114. cur_vis = cv.drawKeypoints(cur_vis, kp1s, None, flags=4, color=kp_color)
  115. cur_vis[:,w1:] = cv.drawKeypoints(cur_vis[:,w1:], kp2s, None, flags=4, color=kp_color)
  116. cv.imshow(win, cur_vis)
  117. cv.setMouseCallback(win, onmouse)
  118. return vis
  119. def main():
  120. import sys, getopt
  121. opts, args = getopt.getopt(sys.argv[1:], '', ['feature='])
  122. opts = dict(opts)
  123. feature_name = opts.get('--feature', 'brisk')
  124. try:
  125. fn1, fn2 = args
  126. except:
  127. fn1 = 'box.png'
  128. fn2 = 'box_in_scene.png'
  129. img1 = cv.imread(cv.samples.findFile(fn1), cv.IMREAD_GRAYSCALE)
  130. img2 = cv.imread(cv.samples.findFile(fn2), cv.IMREAD_GRAYSCALE)
  131. detector, matcher = init_feature(feature_name)
  132. if img1 is None:
  133. print('Failed to load fn1:', fn1)
  134. sys.exit(1)
  135. if img2 is None:
  136. print('Failed to load fn2:', fn2)
  137. sys.exit(1)
  138. if detector is None:
  139. print('unknown feature:', feature_name)
  140. sys.exit(1)
  141. print('using', feature_name)
  142. kp1, desc1 = detector.detectAndCompute(img1, None)
  143. kp2, desc2 = detector.detectAndCompute(img2, None)
  144. print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))
  145. def match_and_draw(win):
  146. print('matching...')
  147. raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
  148. p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
  149. if len(p1) >= 4:
  150. H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0)
  151. print('%d / %d inliers/matched' % (np.sum(status), len(status)))
  152. else:
  153. H, status = None, None
  154. print('%d matches found, not enough for homography estimation' % len(p1))
  155. _vis = explore_match(win, img1, img2, kp_pairs, status, H)
  156. match_and_draw('find_obj')
  157. cv.waitKey()
  158. print('Done')
  159. if __name__ == '__main__':
  160. print(__doc__)
  161. main()
  162. cv.destroyAllWindows()