• Opencv笔记


    1、2023/9/7

    1. import cv2
    2. import numpy as np
    3. if __name__ == '__main__':
    4. print("Hello World!")
    5. src = cv2.imread('pcb.png', 0)
    6. print(src.size) # 360960
    7. print(src.shape) # (480, 752) = (h, w)
    8. crop = src[:128, :64]
    9. print(crop.shape) # (128, 64)
    10. # filter
    11. dst_blur = cv2.blur(src, (5, 5)) # k_size --- kernel size
    12. dst_median = cv2.medianBlur(src, 5) # k_size = 3, 5, 7...
    13. dst_box = cv2.boxFilter(src, -1, (5, 5)) # same as blur depth---8U, 8S, 32F...
    14. dst_bilateral = cv2.bilateralFilter(src, 5, 150, 150) # bilateral filter
    15. dst_gauss = cv2.GaussianBlur(src, (5, 5), 1) # gauss filter k_size sigma
    16. kernel = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]], dtype='float32') / 16 # conv kernel
    17. dst_filter = cv2.filter2D(src, -1, kernel) # convolve
    18. # edge detect
    19. edges = cv2.Canny(src, 100, 200) # edges --- binary image
    20. edges_sch = cv2.Scharr(src, -1, 0, 1) # sch
    21. edges_laplacian = cv2.Laplacian(src, -1) # laplacian --- second order
    22. edges_sobel_x = cv2.Sobel(dst_median, cv2.CV_32F, 1, 0, ksize=1) # sobel k_size=1,3,5,...
    23. edges_sobel_y = cv2.Sobel(dst_median, cv2.CV_32F, 0, 1, ksize=1) # sobel k_size=1,3,5,...
    24. mag, angele = cv2.cartToPolar(edges_sobel_x, edges_sobel_y, angleInDegrees=True) # magnitude angle
    25. mag = cv2.normalize(mag, 0.0, 1.0, norm_type=cv2.NORM_MINMAX) # scale to 0~1
    26. minV, maxV, minLoc, maxLoc = cv2.minMaxLoc(mag)
    27. print(minV, " ", maxV)
    28. # HOG
    29. hog = cv2.HOGDescriptor()
    30. descriptors = hog.compute(crop, (8, 8), (0, 0))
    31. print(descriptors.shape) # (3780,) 3780=7*15*4*9
    32. print(descriptors[:36])
    33. """
    34. # harris
    35. dst1 = cv2.cornerHarris(src, 3, 23, 0.04)
    36. # sift
    37. sift = cv2.SIFT_create(2000)
    38. key_points1, descriptor1 = sift.detectAndCompute(src, None)
    39. # surf
    40. surf = cv2.xfeatures2d.SURF_create(20000)
    41. key_points2, descriptor2 = surf.detectAndCompute(src, None)
    42. # fast
    43. fast = cv2.FastFeatureDetector_create(threshold=50)
    44. key_points3 = fast.detect(src, None)
    45. # orb
    46. orb = cv2.ORB_create(128)
    47. key_points4, descriptors4 = orb.detectAndCompute(src, None)
    48. # match
    49. cv2.BFMatcher_create() # Brute-force
    50. cv2.FlannBasedMatcher_create() # Flann
    51. """
    52. cv2.imshow('src', dst_median)
    53. cv2.imshow('dst', mag)
    54. cv2.waitKey(0)

    2、2023/9/8

    1. if __name__ == '__main__':
    2. img1 = cv2.imread('bga_r_01.png') # queryImage
    3. img2 = cv2.imread('bga_r_06.png') # trainImage
    4. gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    5. gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    6. # sift features
    7. sift = cv2.SIFT_create(100)
    8. kp1, des1 = sift.detectAndCompute(gray1, None)
    9. kp2, des2 = sift.detectAndCompute(gray2, None)
    10. print('kp1 size = ', len(kp1))
    11. print('kp2 size = ', len(kp2))
    12. # 在图像上绘制关键点
    13. flag = cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS
    14. img1 = cv2.drawKeypoints(image=img1, keypoints=kp1, outImage=img1, color=(255, 0, 255), flags=flag)
    15. img2 = cv2.drawKeypoints(image=img2, keypoints=kp2, outImage=img2, color=(255, 0, 255), flags=flag)
    16. # 显示图像
    17. cv2.imshow('sift_keypoints1', img1)
    18. cv2.imshow('sift_keypoints2', img2)
    19. cv2.waitKey(100)
    20. FLANN_INDEX_KDTREE = 0
    21. indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    22. searchParams = dict(checks=50)
    23. flann = cv2.FlannBasedMatcher(indexParams, searchParams)
    24. matches = flann.knnMatch(des1, des2, k=2)
    25. print('matches size = ', len(matches)) # 101 matcher pairs
    26. # Lowe’s算法---最近的距离除以次近的距离得到的比率ratio少于某个阈值T,则接受这一对匹配点
    27. goodMatches = []
    28. for m, n in matches:
    29. if m.distance / n.distance < 0.9:
    30. goodMatches.append(m)
    31. print('goodMatches size = ', len(goodMatches)) # 75 matcher pairs
    32. if len(goodMatches) > 10:
    33. # 获取匹配点坐标
    34. src_pts = np.float32([kp1[m.queryIdx].pt for m in goodMatches]).reshape(-1, 2)
    35. dst_pts = np.float32([kp2[m.trainIdx].pt for m in goodMatches]).reshape(-1, 2)
    36. print('src_pts:', len(src_pts), src_pts[0]) # 75
    37. print('dst_pts:', len(dst_pts), dst_pts[0]) # 75
    38. # 获取单应性
    39. M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) # allow error = 5.0
    40. matchesMask = mask.ravel().tolist() # numpy.flatten() and numpy.ravel()
    41. indices = np.where(np.array(matchesMask) == 1)
    42. print('matchesMask:', len(matchesMask)) # 75
    43. print('mask:', indices[0]) # [23 25 27 31 34 35 40 41 48 50 54 55 56 58]
    44. # homography
    45. pts = np.float32([[217, 221], [434, 222], [432, 440], [216, 437]]).reshape(-1, 1, 2) # (4,1,2)
    46. dst = cv2.perspectiveTransform(pts, M) # (4,1,2)
    47. img2 = cv2.polylines(img2, [np.int32(dst)], True, (0, 255, 0), 2, cv2.LINE_AA) # plot
    48. else:
    49. print("Not enough matches are found - %d/%d" % (len(goodMatches), 10))
    50. matchesMask = None
    51. draw_params = dict(matchColor=(0, 255, 0), singlePointColor=None, matchesMask=matchesMask, flags=2)
    52. img3 = cv2.drawMatches(img1, kp1, img2, kp2, goodMatches, None, **draw_params)
    53. cv2.imshow('matches', img3)
    54. cv2.waitKey(0)
    55. cv2.destroyAllWindows()

  • 相关阅读:
    面对密集型的I/O任务处理,python该如何提高执行效率
    在报表开发工具Stimulsoft Report报表设计中使用存储过程?
    自动驾驶中的数据安全和隐私
    【MySQL】基础SQL语句——库的操作
    遇到的题目
    (10)Qt---项目打包
    暴雪网易事件大讨论:Web3游戏未来发展趋势
    算法练习- LeetCode 剑指 Offer 56 - I. 数组中数字出现的次数
    华为机试:粮油买卖
    计算机网络第三章习题
  • 原文地址:https://blog.csdn.net/Goodness2020/article/details/132742636