2017-03-21 5 views
1

하나의 이미지에서 한 유형의 여러 객체를 어떻게 찾을 수 있습니까? ORB feature finder와 무차별 대입기 (opencv = 3.2.0)를 사용합니다.여러 객체와 일치하는 OpenCV 기능

내 소스 코드 :

import numpy as np 
import cv2 
from matplotlib import pyplot as plt 

MIN_MATCH_COUNT = 10 

img1 = cv2.imread('box.png', 0) # queryImage 
img2 = cv2.imread('box1.png', 0) # trainImage 

#img2 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) 

# Initiate ORB detector 
# 
orb = cv2.ORB_create(10000, 1.2, nlevels=9, edgeThreshold = 4) 
#orb = cv2.ORB_create() 

# find the keypoints and descriptors with SIFT 
kp1, des1 = orb.detectAndCompute(img1, None) 
kp2, des2 = orb.detectAndCompute(img2, None) 

FLANN_INDEX_KDTREE = 0 
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) 
search_params = dict(checks = 50) 

flann = cv2.FlannBasedMatcher(index_params, search_params) 

des1 = np.float32(des1) 
des2 = np.float32(des2) 

# matches = flann.knnMatch(des1, des2, 2) 

bf = cv2.BFMatcher() 
matches = bf.knnMatch(des1, des2, k=2) 

# store all the good matches as per Lowe's ratio test. 
good = [] 
for m,n in matches: 
    if m.distance < 0.7*n.distance: 
     good.append(m) 

if len(good)>3: 
    src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) 
    dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) 

    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 2) 

    if M is None: 
     print ("No Homography") 
    else: 
     matchesMask = mask.ravel().tolist() 

     h,w = img1.shape 
     pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) 
     dst = cv2.perspectiveTransform(pts,M) 

     img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) 

else: 
    print ("Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)) 
    matchesMask = None 

draw_params = dict(matchColor = (0,255,0), # draw matches in green color 
        singlePointColor = None, 
        matchesMask = matchesMask, # draw only inliers 
        flags = 2) 

img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) 

plt.imshow(img3, 'gray'),plt.show() 

그러나 쿼리 이미지의 하나의 인스턴스를 찾을 수 있습니다.

쿼리 이미지

Query Image

테스트 이미지 Test Image

결과 Result

그래서 그 발견 하나 개의 이미지 만 2 명. 내가 뭘 잘못하고 있니? 다음에 사용할 수있는이 작업을 해결하기

+1

첫 번째 개체를 찾기

Result 2

Result 1

, 당신은 모든 개체를 얻을 때까지 반복, 변형, 발견 된 개체의 마스크 영역을 계산한다. – m3h0w

+0

@ m3h0w 감사합니다! –

+0

@ m3h0w 일 수 있습니다 : 1. 특징을 계산 2. 변환을 계산 3. 첫 번째 개체를 찾으 4. 모든 개체를 얻을 때까지 반복 마스크 영역 –

답변

2

내 소스는 ORB 디스크립터를 사용하여 여러 개체를 찾을

import cv2 
from matplotlib import pyplot as plt 

MIN_MATCH_COUNT = 10 

img1 = cv2.imread('box.png', 0) # queryImage 
img2 = cv2.imread('box1.png', 0) # trainImage 

orb = cv2.ORB_create(10000, 1.2, nlevels=8, edgeThreshold = 5) 

# find the keypoints and descriptors with ORB 
kp1, des1 = orb.detectAndCompute(img1, None) 
kp2, des2 = orb.detectAndCompute(img2, None) 

import numpy as np 
from sklearn.cluster import MeanShift, estimate_bandwidth 

x = np.array([kp2[0].pt]) 

for i in xrange(len(kp2)): 
    x = np.append(x, [kp2[i].pt], axis=0) 

x = x[1:len(x)] 

bandwidth = estimate_bandwidth(x, quantile=0.1, n_samples=500) 

ms = MeanShift(bandwidth=bandwidth, bin_seeding=True, cluster_all=True) 
ms.fit(x) 
labels = ms.labels_ 
cluster_centers = ms.cluster_centers_ 

labels_unique = np.unique(labels) 
n_clusters_ = len(labels_unique) 
print("number of estimated clusters : %d" % n_clusters_) 

s = [None] * n_clusters_ 
for i in xrange(n_clusters_): 
    l = ms.labels_ 
    d, = np.where(l == i) 
    print(d.__len__()) 
    s[i] = list(kp2[xx] for xx in d) 

des2_ = des2 

for i in xrange(n_clusters_): 

    kp2 = s[i] 
    l = ms.labels_ 
    d, = np.where(l == i) 
    des2 = des2_[d, ] 

    FLANN_INDEX_KDTREE = 0 
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) 
    search_params = dict(checks = 50) 

    flann = cv2.FlannBasedMatcher(index_params, search_params) 

    des1 = np.float32(des1) 
    des2 = np.float32(des2) 

    matches = flann.knnMatch(des1, des2, 2) 

    # store all the good matches as per Lowe's ratio test. 
    good = [] 
    for m,n in matches: 
     if m.distance < 0.7*n.distance: 
      good.append(m) 

    if len(good)>3: 
     src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) 
     dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) 

     M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 2) 

     if M is None: 
      print ("No Homography") 
     else: 
      matchesMask = mask.ravel().tolist() 

      h,w = img1.shape 
      pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) 
      dst = cv2.perspectiveTransform(pts,M) 

      img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) 

      draw_params = dict(matchColor=(0, 255, 0), # draw matches in green color 
           singlePointColor=None, 
           matchesMask=matchesMask, # draw only inliers 
           flags=2) 

      img3 = cv2.drawMatches(img1, kp1, img2, kp2, good, None, **draw_params) 

      plt.imshow(img3, 'gray'), plt.show() 

    else: 
     print ("Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)) 
     matchesMask = None 

결과 이미지

Result 3