opencv/samples/cpp/generic_descriptor_match.cpp at 2.4 · opencv/opencv
samples\cpp\generic_descriptor_match.cpp 多种描述符匹配算法,提取图像特征点
#include "opencv2/opencv_modules.hpp" #include <cstdio> // 检查是否包含OpenCV的非自由模块,如果没有则输出错误信息 #ifndef HAVE_OPENCV_NONFREE int main(int, char**) { printf("The sample requires nonfree module that is not available in your OpenCV distribution.\n"); return -1; } #else // 包含必要的OpenCV头文件 # include "opencv2/opencv_modules.hpp" # include "opencv2/calib3d/calib3d.hpp" # include "opencv2/features2d/features2d.hpp" # include "opencv2/highgui/highgui.hpp" # include "opencv2/imgproc/imgproc.hpp" # include "opencv2/nonfree/nonfree.hpp" using namespace cv; // 帮助函数,显示程序用法 static void help() { printf("使用SURF描述子匹配两幅图像的关键点\n"); printf("格式: \n./generic_descriptor_match <image1> <image2> <algorithm> <XML参数文件>\n"); printf("例如: ./generic_descriptor_match ../c/scene_l.bmp ../c/scene_r.bmp FERN fern_params.xml\n"); } // 函数声明:绘制关键点匹配结果 Mat DrawCorrespondences(const Mat& img1, const vector<KeyPoint>& features1, const Mat& img2, const vector<KeyPoint>& features2, const vector<DMatch>& desc_idx); int main(int argc, char** argv) { // 检查参数数量,不正确则显示帮助信息 if (argc != 5) { help(); return 0; } // 从命令行参数获取输入信息 std::string img1_name = std::string(argv[1]); std::string img2_name = std::string(argv[2]); std::string alg_name = std::string(argv[3]); // 匹配算法名称,如FERN std::string params_filename = std::string(argv[4]); // 参数文件路径 // 创建通用描述子匹配器,根据算法名称和参数文件初始化 Ptr<GenericDescriptorMatcher> descriptorMatcher = GenericDescriptorMatcher::create(alg_name, params_filename); if( descriptorMatcher == 0 ) { printf ("无法创建描述子匹配器\n"); return 0; } // 读取两张输入图像,转为灰度图 Mat img1 = imread(img1_name, CV_LOAD_IMAGE_GRAYSCALE); Mat img2 = imread(img2_name, CV_LOAD_IMAGE_GRAYSCALE); // 使用SURF算法提取关键点 SURF surf_extractor(5.0e3); // SURF特征检测器,阈值设为5000 vector<KeyPoint> keypoints1, keypoints2; // 提取第一幅图像的关键点 surf_extractor(img1, Mat(), keypoints1); printf("从第一幅图像中提取到 %d 个关键点\n", (int)keypoints1.size()); // 提取第二幅图像的关键点 surf_extractor(img2, Mat(), keypoints2); printf("从第二幅图像中提取到 %d 个关键点\n", (int)keypoints2.size()); // 使用描述子匹配器进行关键点匹配 vector<DMatch> matches2to1; // 存储匹配结果,从img2到img1的匹配 printf("正在寻找最近邻匹配... \n"); descriptorMatcher->match( img2, keypoints2, img1, keypoints1, matches2to1 ); printf("匹配完成\n"); // 绘制匹配结果并显示 Mat img_corr = DrawCorrespondences(img1, keypoints1, img2, keypoints2, matches2to1); imshow("correspondences", img_corr); waitKey(0); // 等待按键 } // 函数定义:绘制两幅图像的关键点及匹配连线 Mat DrawCorrespondences(const Mat& img1, const vector<KeyPoint>& features1, const Mat& img2, const vector<KeyPoint>& features2, const vector<DMatch>& desc_idx) { // 创建足够大的画布,将两幅图像并排放置 Mat img_corr(Size(img1.cols + img2.cols, MAX(img1.rows, img2.rows)), CV_8UC3); img_corr = Scalar::all(0); // 初始化为黑色背景 // 将img1绘制在画布左侧 Mat part = img_corr(Rect(0, 0, img1.cols, img1.rows)); cvtColor(img1, part, COLOR_GRAY2RGB); // 转为彩色 // 将img2绘制在画布右侧 part = img_corr(Rect(img1.cols, 0, img2.cols, img2.rows)); cvtColor(img2, part, COLOR_GRAY2RGB); // 在img1的关键点位置绘制红色圆圈 for (size_t i = 0; i < features1.size(); i++) { circle(img_corr, features1[i].pt, 3, Scalar(0, 0, 255), -1); } // 在img2的关键点位置绘制红色圆圈,并连线到img1中的匹配点 for (size_t i = 0; i < features2.size(); i++) { // 计算img2关键点在画布上的位置(右侧) Point pt(cvRound(features2[i].pt.x + img1.cols), cvRound(features2[i].pt.y)); circle(img_corr, pt, 3, Scalar(0, 0, 255), -1); // 绘制红色圆圈 // 绘制从img1到img2的绿色连线 line(img_corr, features1[desc_idx[i].trainIdx].pt, pt, Scalar(0, 255, 0)); } return img_corr; } #endif // HAVE_OPENCV_NONFREE
这个示例文件演示了如何使用OpenCV中的通用描述子匹配器(Generic Descriptor Matcher)对两幅图像进行特征点匹配,主要功能包括:
- 使用SURF算法从两幅图像提取关键点
- 使用指定的匹配算法(如FERN)匹配两幅图像之间的关键点
- 可视化显示匹配结果:将两幅图像并排显示并用线条连接匹配的特征点
我尝试编译运行 发现多种问题
先是老版本Opencv的CMakeLists不支持最新版的Cmake4+
于是我尝试使用旧版Cmake3.10+
发现在编译时,我的cpp编译套件版本又过高.
得出结论 如果没有配套虚拟环境 难以复现老版本opencv的sample
遂放弃旧版,学习新版本
import cv2 import numpy as np # 读取图片 img1 = cv2.imread('img1.jpg', cv2.IMREAD_GRAYSCALE) img2 = cv2.imread('img2.jpg', cv2.IMREAD_GRAYSCALE) if img1 is None or img2 is None: print('请确保 demo 目录下有 img1.jpg 和 img2.jpg 两张图片!') exit(1) # SIFT 特征点匹配 def sift_match(img1, img2): if not hasattr(cv2, 'SIFT_create'): print('当前 OpenCV 不支持 SIFT') return None sift = cv2.SIFT_create() kp1, des1 = sift.detectAndCompute(img1, None) kp2, des2 = sift.detectAndCompute(img2, None) matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True) matches = matcher.match(des1, des2) matches = sorted(matches, key=lambda x: x.distance) img_matches = cv2.drawMatches(img1, kp1, img2, kp2, matches[:30], None, flags=2) return img_matches # Harris + ORB 描述子匹配 def harris_orb_match(img1, img2): orb = cv2.ORB_create() # Harris 角点检测 def get_harris_keypoints(img): harris = cv2.cornerHarris(np.float32(img), 2, 3, 0.04) harris = cv2.dilate(harris, None) thresh = 0.01 * harris.max() keypoints = np.argwhere(harris > thresh) # 转为 cv2.KeyPoint keypoints = [cv2.KeyPoint(float(pt[1]), float(pt[0]), 3) for pt in keypoints] return keypoints kp1 = get_harris_keypoints(img1) kp2 = get_harris_keypoints(img2) # 用 ORB 计算描述子 kp1, des1 = orb.compute(img1, kp1) kp2, des2 = orb.compute(img2, kp2) matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) matches = matcher.match(des1, des2) matches = sorted(matches, key=lambda x: x.distance) img_matches = cv2.drawMatches(img1, kp1, img2, kp2, matches[:30], None, flags=2) return img_matches # ORB 特征点匹配 def orb_match(img1, img2): orb = cv2.ORB_create() kp1, des1 = orb.detectAndCompute(img1, None) kp2, des2 = orb.detectAndCompute(img2, None) matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) matches = matcher.match(des1, des2) matches = sorted(matches, key=lambda x: x.distance) img_matches = cv2.drawMatches(img1, kp1, img2, kp2, matches[:30], None, flags=2) return img_matches # 展示匹配结果 sift_img = sift_match(img1, img2) if sift_img is not None: cv2.imshow('SIFT Feature Matching', sift_img) harris_img = harris_orb_match(img1, img2) cv2.imshow('Harris+ORB Feature Matching', harris_img) orb_img = orb_match(img1, img2) cv2.imshow('ORB Feature Matching', orb_img) cv2.waitKey(0) cv2.destroyAllWindows()
img1
img2
results
Haris+ORB
SIFT
ORB
SIFT算法
ORB算法
Harris角点 +ORB