目标
在本章节中,算法
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
MIN_MATCH_COUNT = 10
img1 = cv.imread('box.png',0) # 索引图像
img2 = cv.imread('box_in_scene.png',0) # 训练图像app
sift = cv.xfeatures2d.SIFT_create()ide
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)函数
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)测试
如今咱们设置一个条件,即至少有10个匹配项(由MIN_MATCH_COUNT定义)能够找到对象。不然,只需显示一条消息,说明没有足够的匹配项。 若是找到足够的匹配项,咱们将在两个图像中提取匹配的关键点的位置。他们被传递以寻找预期的转变。一旦得到了这个3x3转换矩阵,就能够使用它将索引图像的角转换为训练图像中的相应点。而后咱们画出来。
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w,d = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv.perspectiveTransform(pts,M)
img2 = cv.polylines(img2,[np.int32(dst)],True,255,3, cv.LINE_AA)
else:
print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
matchesMask = None3d
最后,咱们绘制内部线(若是成功找到对象)或匹配关键点(若是失败)。
draw_params = dict(matchColor = (0,255,0), # 用绿色绘制匹配
singlePointColor = None,
matchesMask = matchesMask, # 只绘制内部点
flags = 2)
img3 = cv.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
plt.imshow(img3, 'gray'),plt.show(code
请参阅下面的结果。对象在混乱的图像中标记为白色: 