openCV2马拉松第18圈——坐标变换
- 仿射变换
- 坐标映射
- 利用坐标映射做一些效果,例如以下
watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvYWJjZDE5OTI3MTln/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/Center" width="300" height="300" alt="">上面是原图,以下是利用坐标映射后的结果
url=OvyNHG3WKjwxbiJDOWund5lfoAPXkxXdzSyFcQqI3NKsJZkqOZjlPegJ4DG75vOosupgaM3iklTPnq3TSzHnZq
fr=aladdin
我们仅仅要知道3个相应点,就能知道这个矩阵。OpenCV提供了这样一个计算的函数
getAffineTransform
Image1中的点1。2,3相应到了Image2中的点1,2。3。这样我们就能得到仿射矩阵。于是Image1中的全部点都能通过这个仿射矩阵映射到Image2中
仿射变换后
坐标映射
在做图像增强时。我们改变的时图像的值域,g(x)output f(x)是input h是我们的方法,比方对照度增强直方图均衡化 用g(x)
= h(f(x))
可是在坐标变换,改变的是定义域, g(x)
= f(h(x))
watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvYWJjZDE5OTI3MTln/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/Center" alt="">
如果我们有源图像f和坐标映射函数h,我们要怎么计算输出图像g呢?
大部分人都会这样
procedure forwardWarp(f,h,outg):
For every pixelxinf(x)
1. Compute the destination locationx′=h(x).
2. Copy the pixelf(x)tog(x′).
我一開始也是这样想的。那就是遍历源图像,对一个Point p,应用变换函数h计算其在输出图像的坐标P',然后复制
procedure inverseWarp(f,h,outg):
For every pixelx′ing(x′)
1. Compute the source locationx=hˆ(x′)
2. Resamplef(x)at
locationx
and copy tog(x′)
遍历输出图像的点。映射到源图像,再去取点。
映射函数h就是原来h的逆矩阵。
当然了,也会碰到非整数的情况,接下来还要作一些别的处理。就不细述。
另一种简单的坐标映射,就是人工指定了输出图像的哪个点相应到源图像的哪个点。在OpenCV里叫remapping
一眼就能看出。我们的映射函数是
-
C++: void warpAffine(InputArray src,
OutputArray dst, InputArray M, Size dsize, int flags=INTER_LINEAR, int borderMode=BORDER_CONSTANT, const Scalar& borderValue=Scalar())
-
- src – 输入图像
- dst – 输出图像。有dsize的大小(由于仿射会把图片变大变小),type和src一样
- M – 仿射矩阵.
- dsize – 输出图像的大小.
- flags –WARP_INVERSE_MAP意味着M是逆变换 ( ).
- borderMode .
- borderValue 默认是0.
获得我们的变换矩阵M
/// 设置源图像和相应图像的3组相应点
srcTri[0] = Point2f( 0,0 );
srcTri[1] = Point2f( src.cols - 1, 0 );
srcTri[2] = Point2f( 0, src.rows - 1 ); dstTri[0] = Point2f( src.cols*0.0, src.rows*0.33 );
dstTri[1] = Point2f( src.cols*0.85, src.rows*0.25 );
dstTri[2] = Point2f( src.cols*0.15, src.rows*0.7 );/// 获得变换矩阵
warp_mat = getAffineTransform( srcTri, dstTri );附上官方的sample
#include "opencv2/imgproc.hpp"
#include <iostream>
#include "opencv2/highgui.hpp"
using namespace cv;
using namespace std; /// Global variables
char* source_window = "Source image";
char* warp_window = "Warp";
char* warp_rotate_window = "Warp + Rotate"; /** @function main */
int main( int argc, char** argv )
{
Point2f srcTri[3];
Point2f dstTri[3]; Mat rot_mat( 2, 3, CV_32FC1 );
Mat warp_mat( 2, 3, CV_32FC1 );
Mat src, warp_dst, warp_rotate_dst; /// Load the image
src = imread( argv[1], 1 ); /// Set the dst image the same type and size as src
warp_dst = Mat::zeros( src.rows, src.cols, src.type() ); /// Set your 3 points to calculate the Affine Transform
srcTri[0] = Point2f( 0,0 );
srcTri[1] = Point2f( src.cols - 1, 0 );
srcTri[2] = Point2f( 0, src.rows - 1 ); dstTri[0] = Point2f( src.cols*0.0, src.rows*0.33 );
dstTri[1] = Point2f( src.cols*0.85, src.rows*0.25 );
dstTri[2] = Point2f( src.cols*0.15, src.rows*0.7 ); /// Get the Affine Transform
warp_mat = getAffineTransform( srcTri, dstTri ); /// Apply the Affine Transform just found to the src image
warpAffine( src, warp_dst, warp_mat, warp_dst.size() ); /** Rotating the image after Warp */ /// Compute a rotation matrix with respect to the center of the image
Point center = Point( warp_dst.cols/2, warp_dst.rows/2 );
double angle = -50.0;
double scale = 0.6; /// 这里获得旋转矩阵,中心是center,角度为-50度,并缩放为原来的0.6倍,也是简单的样例
rot_mat = getRotationMatrix2D( center, angle, scale ); /// Rotate the warped image
warpAffine( warp_dst, warp_rotate_dst, rot_mat, warp_dst.size() ); /// Show what you got
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src ); namedWindow( warp_window, CV_WINDOW_AUTOSIZE );
imshow( warp_window, warp_dst ); namedWindow( warp_rotate_window, CV_WINDOW_AUTOSIZE );
imshow( warp_rotate_window, warp_rotate_dst ); /// Wait until user exits the program
waitKey(0); return 0;
}怎样实现这种变换呢
for( int j = 0; j < src.rows; j++ ) {
for( int i = 0; i < src.cols; i++ ) {
map_x.at<float>(j,i) = src.cols - i ;
map_y.at<float>(j,i) = j ; }}然后再调用
remap( src, dst, map_x, map_y, INTER_LINEAR, BORDER_CONSTANT, Scalar(0,0, 0) );
remap很好懂,INTER_LINEAR是线性插值
以下是官方的sample,比較简单就不解释啦
-
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <stdio.h> using namespace cv; /// Global variables
Mat src, dst;
Mat map_x, map_y;
char* remap_window = "Remap demo";
int ind = 0; /// Function Headers
void update_map( void ); /**
* @function main
*/
int main( int argc, char** argv )
{
/// Load the image
src = imread( argv[1], 1 ); /// Create dst, map_x and map_y with the same size as src:
dst.create( src.size(), src.type() );
map_x.create( src.size(), CV_32FC1 );
map_y.create( src.size(), CV_32FC1 ); /// Create window
namedWindow( remap_window, CV_WINDOW_AUTOSIZE ); /// Loop
while( true )
{
/// Each 1 sec. Press ESC to exit the program
int c = waitKey( 1000 ); if( (char)c == 27 )
{ break; } /// Update map_x & map_y. Then apply remap
update_map();
remap( src, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0,0, 0) ); /// Display results
imshow( remap_window, dst );
}
return 0;
} /**
* @function update_map
* @brief Fill the map_x and map_y matrices with 4 types of mappings
*/
void update_map( void )
{
ind = ind%4; for( int j = 0; j < src.rows; j++ )
{ for( int i = 0; i < src.cols; i++ )
{
switch( ind )
{
case 0:
if( i > src.cols*0.25 && i < src.cols*0.75 && j > src.rows*0.25 && j < src.rows*0.75 )
{
map_x.at<float>(j,i) = 2*( i - src.cols*0.25 ) + 0.5 ;
map_y.at<float>(j,i) = 2*( j - src.rows*0.25 ) + 0.5 ;
}
else
{ map_x.at<float>(j,i) = 0 ;
map_y.at<float>(j,i) = 0 ;
}
break;
case 1:
map_x.at<float>(j,i) = i ;
map_y.at<float>(j,i) = src.rows - j ;
break;
case 2:
map_x.at<float>(j,i) = src.cols - i ;
map_y.at<float>(j,i) = j ;
break;
case 3:
map_x.at<float>(j,i) = src.cols - i ;
map_y.at<float>(j,i) = src.rows - j ;
break;
} // end of switch
}
}
ind++;
}
荷枪实弹remap还有很多其它的功能,比方能帮我们实现图像的缩放,k是缩放系数#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream> #define K 1.1
using namespace cv; // Global variables
Mat src, dst;
Mat map_x, map_y;
char* remap_window = "Remap demo";
void update_map( void ); int main( int argc, char** argv ) { src = imread( argv[1], 1 ); dst.create( src.rows*K, src.cols*K, src.type() );
map_x.create( src.rows*K, src.cols*K, CV_32FC1 );
map_y.create( src.rows*K, src.cols*K, CV_32FC1 ); namedWindow( remap_window, CV_WINDOW_AUTOSIZE ); update_map();
remap( src, dst, map_x, map_y, INTER_LANCZOS4, BORDER_CONSTANT, Scalar(0, 0, 0) ); imshow( remap_window, dst ); waitKey(0);
return 0;
} void update_map( void )
{
for( int j = 0; j < (int)(K*src.rows); j++ ) {
for( int i = 0; i < (int)(K*src.cols); i++ ) {
map_x.at<float>(j,i) = i/K;
map_y.at<float>(j,i) = j/K;
}
}
}举一反三我们一開始就见到了这图片,那么怎么实现呢?
我在y轴事实上没有缩放。仅仅在x轴进行了缩放。分别分成了三段。
我的程序首先要用户在图片上点两次,各自是要扩大的举行的左上角和右下角。然后,就以左上角的x和右下角的x为界。分为3段进行映射。
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream> using namespace cv;
using namespace std; #define K 2 //表示取2个点
#define threshold 0.1 //缩放的程度 // Global variables
Mat src, src_copy, dst;
Mat map_x, map_y;
char* window = "Scale demo";
int samplePointNum = 0; //已经点了几次
Point myPoints[K]; //存放用户点击的坐标
bool flag = false; //点击2次,就设置flag,開始更新 void update_map( void )
{
int leftX = threshold*src.cols;
int rightX = src.cols - leftX;
int recLeftX = myPoints[0].x;
int recRightX = myPoints[1].x; for( int j = 0; j < src.rows; j++ ) {
for( int i = 0; i < src.cols; i++ ) {
if(i > leftX && i < rightX) {
map_x.at<float>(j,i) = recLeftX + (i - leftX) * (recRightX - recLeftX)/(rightX - leftX);
map_y.at<float>(j,i) = j;
} else if(i <= leftX) {
map_x.at<float>(j,i) = i * recLeftX/leftX;
map_y.at<float>(j,i) = j;
} else {
map_x.at<float>(j,i) = recRightX + (i- rightX) * (src.cols - recRightX)/(src.cols - rightX);
map_y.at<float>(j,i) = j;
}
}
}
flag = true;
remap( src_copy, dst, map_x, map_y, INTER_LANCZOS4, BORDER_CONSTANT, Scalar(0, 0, 0) );
imshow( window, dst );
imwrite( "./result.jpg", dst );
return;
} static void onMouse( int event, int x, int y, int, void* )
{
if(samplePointNum == K){
if(!flag)
update_map();
return;
}
if( event != EVENT_LBUTTONDOWN)
return;
rectangle(src, Point(x-3,y-3), Point(x+3,y+3), Scalar(255,0,0), 1);
myPoints[samplePointNum++] = Point(x,y);
imshow( window, src );
return;
} int main( int argc, char** argv ) {
src = imread( argv[1], 1 );
src_copy = src.clone(); dst.create( src.rows, src.cols, src.type() );
map_x.create( src.rows, src.cols, CV_32FC1 );
map_y.create( src.rows, src.cols, CV_32FC1 ); namedWindow( window, CV_WINDOW_AUTOSIZE ); setMouseCallback( window, onMouse, 0 );
imshow( window, src );
waitKey(0);
return 0;
}
最新文章
- 轻量级C#编辑器RoslynPad
- EC笔记,第二部分:6.若不想使用编译器默认生成的函数,就该明确拒绝
- bzoj 2738 矩阵乘法
- jquery中是否加()的问题
- Java常见异常总结
- canvas学习总结五:线段的端点与连接点
- Java对正则表达式的支持(二)
- [Postman]定制Postman(4)
- 设计模式—装饰模式的C++实现
- 秦皇岛CCPC的失败总结
- OFART: OpenFlow-Switch Adaptive Random Testing
- ANT task之Junit、JunitReport
- SQL语句(十七)综合练习_分组查询_内嵌查询_视图使用
- SSM(Spring5.x+Mybatis3)框架搭建【解决日志问题】(Github源码)
- 【android】通过leakCanary找出程序内存泄露点
- 如何使用JFinal开发javaweb
- 牛客网NOIP赛前集训营-提高组(第四场)B区间
- [MySQL] Innodb參数优化
- 4 TensorFlow入门之dropout解决overfitting问题
- BeatSaber节奏光剑双手柄MR教程