OPENCV(6) —— 角点检测
2024-08-31 08:08:52
图像特征的类型通常指边界、角点(兴趣点)、斑点(兴趣区域)。角点就是图像的一个局部特征,应用广泛。harris角点检测是一种直接基于灰度图像的角点提取算法,稳定性高,尤其对L型角点检测精度高,但由于采用了高斯滤波,运算速度相对较慢,角点信息有丢失和位置偏移的现象,而且角点提取有聚簇现象。
- Use the FeatureDetector interface in order to find interest points. Specifically:
- Use the SurfFeatureDetector and its function detect to perform the detection process
- Use the function drawKeypoints to draw the detected keypoints
#include "stdafx.h" #include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp" using namespace cv; void readme(); /** @function main */
int main( int argc, char** argv )
{
/*
if( argc != 3 )
{ readme(); return -1; } */ Mat img_1 = imread( "zhang.jpg", CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( "guo.jpg", CV_LOAD_IMAGE_GRAYSCALE ); if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; } //-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400; SurfFeatureDetector detector( minHessian ); std::vector<KeyPoint> keypoints_1, keypoints_2; detector.detect( img_1, keypoints_1 ); // 特征点向量
detector.detect( img_2, keypoints_2 ); //-- Draw keypoints
Mat img_keypoints_1; Mat img_keypoints_2; drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT ); //-- Show detected (drawn) keypoints
imshow("Keypoints 1", img_keypoints_1 );
imshow("Keypoints 2", img_keypoints_2 ); waitKey(0); return 0;
} /** @function readme */
void readme()
{ std::cout << " Usage: ./SURF_detector <img1> <img2>" << std::endl; }
检测keypoints点的检测器是SURF,获取描述子也是用到SURF来描述,而用到的匹配器是FlannBased,最后通过findHomography寻找单映射矩阵,perspectiveTransform获得最终的目标
findHomography 函数是求两幅图像的单应性矩阵,它是一个3*3的矩阵
#include "stdafx.h"
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <opencv2\calib3d\calib3d.hpp> using namespace cv; void readme(); int main( int argc, char** argv )
{ /*
if( argc != 3 )
{ return -1; }*/ Mat img_1 = imread( "test1.jpg", CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( "test2.jpg", CV_LOAD_IMAGE_GRAYSCALE ); if( !img_1.data || !img_2.data )
{ return -1; } //-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400; SurfFeatureDetector detector( minHessian ); std::vector<KeyPoint> keypoints_1, keypoints_2; detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 ); // 角点集合 —— 数目确定 //-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor; // 角点描述子 Mat descriptors_1, descriptors_2; extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 ); /*
//-- Step 3: Matching descriptor vectors with a brute force matcher
BruteForceMatcher< L2<float> > matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches ); //-- Draw matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches ); //-- Show detected matches
imshow("Matches", img_matches );
*/ //-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches ); double max_dist = 0; double min_dist = 100; //-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
} printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist ); //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist ) —— 阈值
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches; for( int i = 0; i < descriptors_1.rows; i++ )
{
if( matches[i].distance < 2*min_dist )
{
good_matches.push_back( matches[i]); // 在匹配源头限制
}
} //-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); //-- Show detected matches
imshow( "Good Matches", img_matches ); //-- Localize the object from img_1 in img_2
std::vector<Point2f> obj;
std::vector<Point2f> scene; for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
} Mat H = findHomography( obj, scene, CV_RANSAC ); // findHomography 函数是求两幅图像的单应性矩阵,它是一个3*3的矩阵 //-- Get the corners from the image_1 ( the object to be "detected" )
Point2f obj_corners[4] = { cvPoint(0,0), cvPoint( img_1.cols, 0 ), cvPoint( img_1.cols, img_1.rows ), cvPoint( 0, img_1.rows ) };
Point scene_corners[4]; //-- Map these corners in the scene ( image_2)
for( int i = 0; i < 4; i++ )
{
double x = obj_corners[i].x;
double y = obj_corners[i].y; double Z = 1./( H.at<double>(2,0)*x + H.at<double>(2,1)*y + H.at<double>(2,2) );
double X = ( H.at<double>(0,0)*x + H.at<double>(0,1)*y + H.at<double>(0,2) )*Z;
double Y = ( H.at<double>(1,0)*x + H.at<double>(1,1)*y + H.at<double>(1,2) )*Z;
scene_corners[i] = cvPoint( cvRound(X) + img_1.cols, cvRound(Y) );
} //-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches, scene_corners[0], scene_corners[1], Scalar(0, 255, 0), 2 );
line( img_matches, scene_corners[1], scene_corners[2], Scalar( 0, 255, 0), 2 );
line( img_matches, scene_corners[2], scene_corners[3], Scalar( 0, 255, 0), 2 );
line( img_matches, scene_corners[3], scene_corners[0], Scalar( 0, 255, 0), 2 ); //-- Show detected matches
imshow( "Good Matches & Object detection", img_matches ); waitKey(0); return 0;
} /**
* @function readme
*/
void readme()
{ std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl; }
利用findHomography函数利用匹配的关键点找出相应的变换,再利用perspectiveTransform函数映射点群。
转自:http://blog.csdn.net/yang_xian521/article/details/6901762
最新文章
- 让OMCS支持更多的视频采集设备
- hdu-2063-二分图最大匹配
- json数组转普通数组 普通数组转json数组
- [转]CAP原理与最终一致性 强一致性 透析
- Oracle sqlplus设置显示格式命令详解
- JavaScript Lib Interface (JavaScript系统定义的接口一览表)
- POJ 3274 Gold Balanced Lineup
- Setup Factory 打包.netframework 2.0
- UVa 1607 (二分) Gates
- Java-泛型编程-使用通配符? extends 和 ? super
- innerHTML/outerHTML; innerText/outerText; textContent
- Android上使用OpenGLES2.0显示YUV数据
- Objective-C的hook方案(一): Method Swizzling
- css3特效详解
- 关于ES6
- [数]昨天欠下的一道立体几何题HDU-4741
- 基于设备树的TQ2440的中断(1)
- bzoj5470 / P4578 [FJOI2018]所罗门王的宝藏
- iptable 大量需要封杀的ip地址便捷方法
- Object-C使用类静态方法创建对象时容易内存泄露