I want to detect the contours of image. and check if these contours are in the other image or no
2
votes
Use the perspective transform matrix to do a perspective warp on one image to align with the other image. Then do a subtraction between those two images and threshold and get the contours of the regions. The get the bounding boxes and draw the bounding boxes. See pyimagesearch.com/2014/05/05/… and sicara.ai/blog/2019-07-16-image-registration-deep-learning
– fmw42
1 Answers
2
votes
You can do a perspective warp to align one image to the other. Convert both to HSV and get the saturation channels. Then threshold them and get the absolute difference between them. Then get the contours and finally the bounding boxes of the contours, which are drawn on the first image. Here is how to do that with Python/OpenCV. Note, I do not have SIFT installed, so I will use ORB in its place.
Input 1 (reference image):
Input 2 (image to be warped/aligned):
import cv2
import numpy as np
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
def alignImages(im1, im2):
# im2 is reference and im1 in to be warped to match im2
# note: numbering is swapped in function
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw top matches
imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
cv2.imwrite("pipes_matches.png", imMatches)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width, channels = im2.shape
im1Reg = cv2.warpPerspective(im1, h, (width, height))
return im1Reg, h
if __name__ == '__main__':
# Read reference image
refFilename = "pipes1.jpg"
print("Reading reference image : ", refFilename)
imReference = cv2.imread(refFilename, cv2.IMREAD_COLOR)
# Read image to be aligned
imFilename = "pipes2.jpg"
print("Reading image to align : ", imFilename);
im = cv2.imread(imFilename, cv2.IMREAD_COLOR)
# Aligned image will be stored in imReg.
# The estimated homography will be stored in h.
imReg, h = alignImages(im, imReference)
# Print estimated homography
print("Estimated homography : \n", h)
# Convert images to HSV and get saturation channel
refSat = cv2.cvtColor(imReference, cv2.COLOR_BGR2HSV)[:,:,1]
imSat = cv2.cvtColor(imReg, cv2.COLOR_BGR2HSV)[:,:,1]
# Otsu threshold
refThresh = cv2.threshold(refSat, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
imThresh = cv2.threshold(imSat, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# apply morphology open and close
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
refThresh = cv2.morphologyEx(refThresh, cv2.MORPH_OPEN, kernel, iterations=1)
refThresh = cv2.morphologyEx(refThresh, cv2.MORPH_CLOSE, kernel, iterations=1).astype(np.float64)
imThresh = cv2.morphologyEx(imThresh, cv2.MORPH_OPEN, kernel, iterations=1).astype(np.float64)
imThresh = cv2.morphologyEx(imThresh, cv2.MORPH_CLOSE, kernel, iterations=1)
# get absolute difference between the two thresholded images
diff = np.abs(cv2.add(imThresh,-refThresh))
# apply morphology open to remove thin lines caused by slight misalignment of the two images
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (13,13))
diff_cleaned = cv2.morphologyEx(diff, cv2.MORPH_OPEN, kernel, iterations=1).astype(np.uint8)
# Filter using contour area and draw bounding boxes that do not touch the sides of the image
cnts = cv2.findContours(diff_cleaned, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
result = imReference.copy()
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(result, (x, y), (x+w, y+h), (0, 0, 255), 2)
# show images
cv2.imshow('reference', imReference)
cv2.imshow('image', im)
cv2.imshow('image_aligned', imReg)
cv2.imshow('refThresh', refThresh)
cv2.imshow('imThresh', imThresh)
cv2.imshow('diff', diff)
cv2.imshow('diff_cleaned', diff_cleaned)
cv2.imshow('result', result)
cv2.waitKey()
# save images
cv2.imwrite('pipes2_aligned.jpg', imReg)
cv2.imwrite('pipes_diff.png', diff_cleaned)
cv2.imwrite('pipes_result.png', result)
Matches Image:
Image 2 (perspectively) aligned to Image 1:
Absolute difference image (cleaned):
Result: