这里是博主自己使用不同的方法对相似度的测试
最后还是觉得最后一个方法的准确度比较高,其有效的降低了图片尺寸不一致导致的不准确性
- import cv2
- import imagehash as imagehash
- import numpy as np
- from PIL import Image
- from skimage import io, transform
- from skimage.metrics import structural_similarity as ssim
- # image1 = cv2.imread(r'D:\BusinessProject\desktop-program\test_test\IMG\1.jpg')
- # image2 = cv2.imread(r'D:\BusinessProject\desktop-program\test_test\IMG\2.jpg')
-
- # image1 = cv2.resize(image1, (400, 400))
- # image2 = cv2.resize(image2, (400, 400))
- #
- # gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
- # gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
- #
- # ssim_score = cv2.compareSSIM(gray1, gray2)
- # print("SSIM Score:", ssim_score)
-
- # image1 = io.imread(r'D:\BusinessProject\desktop-program\test_test\IMG\1.jpg')
- # image2 = io.imread(r'D:\BusinessProject\desktop-program\test_test\IMG\2.jpg')
- #
- # # 计算图像尺寸
- # image1_height, image1_width = image1.shape[:2]
- # image2_height, image2_width = image2.shape[:2]
- #
- # # 设置窗口大小为图像中较小的一侧
- # win_size = min(image1_height, image1_width, image2_height, image2_width)
- #
- # # 计算两个图片的相似度
- # similarity = ssim(
- # image1,
- # image2,
- # win_size=min(win_size, 7), # 确保窗口大小小于等于较小的一侧,并至少为 7
- # multichannel=True
- # )
- #
- # # 打印相似度
- # print("图片相似度:", similarity)
-
-
- # image1 = Image.open(r'D:\BusinessProject\desktop-program\test_test\IMG\1.jpg')
- # image2 = Image.open(r'D:\BusinessProject\desktop-program\test_test\IMG\4(1).jpg')
- # phash1 = imagehash.phash(image1)
- # phash2 = imagehash.phash(image2)
- # hamming_distance = phash1 - phash2
- # print("相似度得分: ", 1 - (hamming_distance / len(phash1.hash)) )
-
-
-
- image1 = cv2.imread(r'D:\BusinessProject\desktop-program\test_test\IMG\1.jpg', cv2.IMREAD_GRAYSCALE)
- image2 = cv2.imread(r'D:\BusinessProject\desktop-program\test_test\IMG\4(1).jpg', cv2.IMREAD_GRAYSCALE)
- orb = cv2.ORB_create()
- keypoints1, descriptors1 = orb.detectAndCompute(image1, None)
- keypoints2, descriptors2 = orb.detectAndCompute(image2, None)
-
- bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
- matches = bf.match(descriptors1, descriptors2)
- matches = sorted(matches, key=lambda x: x.distance)
- similarity_score = len(matches) / len(keypoints1) * 100
- print(similarity_score)