問題描述
我有一個數字形式的原始頁面和同一頁面的多個掃描版本.我的目標是對掃描的頁面進行糾偏,使其盡可能與原始頁面匹配.我知道我可以使用
原來我非常接近解決自己的問題.這是我的代碼的工作版本:
將 numpy 導入為 np導入簡歷2從 matplotlib 導入 pyplot 作為 plt導入數學def 糾偏():im_out = cv2.warpPerspective(skewed_image, np.linalg.inv(M), (orig_image.shape[1], orig_image.shape[0]))plt.imshow(im_out, '灰色')plt.show()orig_image = cv2.imread(r'image.png', 0)skewed_image = cv2.imread(r'imageSkewed.png', 0)沖浪 = cv2.xfeatures2d.SURF_create(400)kp1, des1 = surf.detectAndCompute(orig_image, None)kp2, des2 = surf.detectAndCompute(skewed_image, 無)FLANN_INDEX_KDTREE = 0index_params = dict(算法=FLANN_INDEX_KDTREE,樹=5)search_params = dict(檢查=50)flann = cv2.FlannBasedMatcher(index_params, search_params)匹配 = flann.knnMatch(des1, des2, k=2)# 根據勞氏比率測試存儲所有好的匹配.好=[]對于 m,n 匹配:如果 m.distance <0.7 * n.距離:好.追加(米)MIN_MATCH_COUNT = 10如果 len(good) >MIN_MATCH_COUNT 個:src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)M, 掩碼 = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)# 詳見 https://ch.mathworks.com/help/images/examples/find-image-rotation-and-scale-using-automated-feature-matching.htmlss = M[0, 1]sc = M[0, 0]scaleRecovered = math.sqrt(ss * ss + sc * sc)thetaRecovered = math.atan2(ss, sc) * 180/math.piprint("計算的比例差:%.2f
計算的旋轉差:%.2f" % (scaleRecovered, thetaRecovered))糾偏()別的:print("沒有找到足夠的匹配 - %d/%d" % (len(good), MIN_MATCH_COUNT))匹配掩碼 = 無
I have an original page in digital form and several scanned versions of the same page. My goal is to deskew the scanned pages such that they match the original page as much as possible. I know that I could use the Probabilistic Hough Transform as described here for fixing the rotation but the scanned papers also differ in size as some people scaled the page to a different paper format. I think that the findHomography() function in OpenCV in combination with the keypoints from SIFT/SURF are exactly what I need to solve this problem. However, I just can't get my deskew() function to work.
Most of my code stems from the following two sources: http://www.learnopencv.com/homography-examples-using-opencv-python-c/ and http://docs.opencv.org/3.1.0/d1/de0/tutorial_py_feature_homography.html.
import numpy as np
import cv2
from matplotlib import pyplot as plt
# FIXME: doesn't work
def deskew():
im_out = cv2.warpPerspective(img1, M, (img2.shape[1], img2.shape[0]))
plt.imshow(im_out, 'gray')
plt.show()
# resizing images to improve speed
factor = 0.4
img1 = cv2.resize(cv2.imread("image.png", 0), None, fx=factor, fy=factor, interpolation=cv2.INTER_CUBIC)
img2 = cv2.resize(cv2.imread("imageSkewed.png", 0), None, fx=factor, fy=factor, interpolation=cv2.INTER_CUBIC)
surf = cv2.xfeatures2d.SURF_create()
kp1, des1 = surf.detectAndCompute(img1, None)
kp2, des2 = surf.detectAndCompute(img2, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
MIN_MATCH_COUNT = 10
if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good
]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good
]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
h, w = img1.shape
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, M)
deskew()
img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
else:
print("Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT))
matchesMask = None
# show matching keypoints
draw_params = dict(matchColor=(0, 255, 0), # draw matches in green color
singlePointColor=None,
matchesMask=matchesMask, # draw only inliers
flags=2)
img3 = cv2.drawMatches(img1, kp1, img2, kp2, good, None, **draw_params)
plt.imshow(img3, 'gray')
plt.show()
Turns out I was very close to solving my own problem. Here's the working version of my code:
import numpy as np
import cv2
from matplotlib import pyplot as plt
import math
def deskew():
im_out = cv2.warpPerspective(skewed_image, np.linalg.inv(M), (orig_image.shape[1], orig_image.shape[0]))
plt.imshow(im_out, 'gray')
plt.show()
orig_image = cv2.imread(r'image.png', 0)
skewed_image = cv2.imread(r'imageSkewed.png', 0)
surf = cv2.xfeatures2d.SURF_create(400)
kp1, des1 = surf.detectAndCompute(orig_image, None)
kp2, des2 = surf.detectAndCompute(skewed_image, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
MIN_MATCH_COUNT = 10
if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good
]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good
]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
# see https://ch.mathworks.com/help/images/examples/find-image-rotation-and-scale-using-automated-feature-matching.html for details
ss = M[0, 1]
sc = M[0, 0]
scaleRecovered = math.sqrt(ss * ss + sc * sc)
thetaRecovered = math.atan2(ss, sc) * 180 / math.pi
print("Calculated scale difference: %.2f
Calculated rotation difference: %.2f" % (scaleRecovered, thetaRecovered))
deskew()
else:
print("Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT))
matchesMask = None
這篇關于使用 OpenCV 和 SIFT/SURF 校正掃描圖像以匹配原始圖像的文章就介紹到這了,希望我們推薦的答案對大家有所幫助,也希望大家多多支持html5模板網!