features2d\features2d.hpp(1172):run time error 339

[android]ndk 生成 error.opencv2/core/core.hpp: 没有这样的文件或目录
注意事项: 本文中文内容可能为机器翻译,如要查看英文原文请点击上面连接.
我要使用 OpenCV 私有模块在 android 系统中的问题。我读此教程
但是,运行后 ndk 生成,它显示以下错误...
guru@guru-Aspire-5738:~/Android/OpenCVWorkspace/sift_opencv_android/jni$ ~/Android/android-ndk-r9/ndk-build
: libopencv_java.so =& libs/armeabi-v7a/libopencv_java.so
: libnonfree.so =& libs/armeabi-v7a/libnonfree.so
Compile++ thumb
: test_sift &= test_sift.cpp
/home/guru/Android/OpenCVWorkspace/sift_opencv_android/jni/test_sift.cpp:2:33: fatal error: opencv2/core/core.hpp: No such file or directory
compilation terminated.
make: ***[/home/guru/Android/OpenCVWorkspace/sift_opencv_android/obj/local/armeabi-v7a/objs/test_sift/test_sift.o] Error 1
这里是我的代码...
#include &iostream&
#include &opencv2/core/core.hpp&
#include &opencv2/highgui/highgui.hpp&
#include &opencv2/nonfree/features2d.hpp&
#include &opencv2/nonfree/nonfree.hpp&
int main( int argc, char** argv )
if( argc != 3)
cout &&" Usage: sift input_image output_image" &&
return -1;
//cv::initModule_nonfree();
//cout &&"initModule_nonfree() called" &&
image = imread(argv[1], CV_LOAD_IMAGE_COLOR);
if(! image.data )
"Could not open or find the image" && std::
return -1;
vector&KeyPoint&
// Create a SIFT keypoint detector.
SiftFeatureD
detector.detect(image, keypoints);
cout && "Detected " && (int) keypoints.size() && " keypoints" &&
// Compute feature description.
pute(image,keypoints, descriptors);
cout && "Computed feature."&&
// Store description to "descriptors.des".
fs.open("descriptors.des", FileStorage::WRITE);
cout && "Opened file to store the features."&&
fs && "descriptors" &&
cout && "Finished writing file."&&
fs.release();
cout && "Released file."&&
// Show keypoints in the output image.
Mat outputI
Scalar keypointColor = Scalar(0, 0, 255);
drawKeypoints(image, keypoints, outputImg, keypointColor, DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
cout && "Drew keypoints in output image file."&&
namedWindow("Output image", CV_WINDOW_NORMAL );
imshow("Output image", outputImg);
waitKey(0);
cout && "Generate the output image."&&
imwrite(argv[2], outputImg);
cout && "Done."&&
我 Android.mk 是...
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE
:= sift_prebuilt
LOCAL_SRC_FILES := libnonfree.so
include $(PREBUILT_SHARED_LIBRARY)
include $(CLEAR_VARS)
LOCAL_MODULE
:= opencv_java_prebuilt
LOCAL_SRC_FILES := libopencv_java.so
include $(PREBUILT_SHARED_LIBRARY)
LOCAL_C_INCLUDE:= /home/guru/Android/OpenCV-2.4.6-android-sdk/sdk/native/jni/include
LOCAL_MODULE
:= test_sift
LOCAL_LDLIBS +=
-llog -ldl
LOCAL_SHARED_LIBRARIES := sift_prebuilt opencv_java_prebuilt
LOCAL_SRC_FILES := test_sift.cpp
include $(BUILD_EXECUTABLE)
解决方法 1:
我想你忘了包括"opencv2/core/core.hpp"。这里是您的包括:
LOCAL_C_INCLUDE:= /home/guru/Android/OpenCV-2.4.6-android-sdk/sdk/native/jni/include
将"opencv2/core/core.hpp"添加到 LOCAL_C_INCLUDE 。>> features2d.hpp
features2d.hpp ( 文件浏览 )
/*M///////////////////////////////////////////////////////////////////////////////////////
IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
By downloading, copying, installing or using the software you agree to this license.
If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
// Copyright (C) , Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
* Redistribution's of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistribution's in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* The name of the copyright holders may not be used to endorse or promote products
derived from this software without specific prior written permission.
// This software is provided by the copyright holders and contributors &as is& and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitu
// loss of use, data, or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
#ifndef __OPENCV_NONFREE_FEATURES_2D_HPP__
#define __OPENCV_NONFREE_FEATURES_2D_HPP__
#include &opencv2/features2d/features2d.hpp&
#ifdef __cplusplus
namespace cv
SIFT implementation.
The class implements SIFT algorithm by D. Lowe.
class CV_EXPORTS_W SIFT : public Feature2D
CV_WRAP explicit SIFT( int nfeatures=0, int nOctaveLayers=3,
double contrastThreshold=0.04, double edgeThreshold=10,
double sigma=1.6);
//! returns the descriptor size in floats (128)
CV_WRAP int descriptorSize()
//! returns the descriptor type
CV_WRAP int descriptorType()
//! finds the keypoints using SIFT algorithm
void operator()(InputArray img, InputArray mask,
vector&KeyPoint&& keypoints)
//! finds the keypoints and computes descriptors for them using SIFT algorithm.
//! Optionally it can compute descriptors for the user-provided keypoints
void operator()(InputArray img, InputArray mask,
vector&KeyPoint&& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints=false)
AlgorithmInfo* info()
void buildGaussianPyramid( const Mat& base, vector&Mat&& pyr, int nOctaves )
void buildDoGPyramid( const vector&Mat&& pyr, vector&Mat&& dogpyr )
void findScaleSpaceExtrema( const vector&Mat&& gauss_pyr, const vector&Mat&& dog_pyr,
vector&KeyPoint&& keypoints )
protected:
void detectImpl( const Mat& image, vector&KeyPoint&& keypoints, const Mat& mask=Mat() )
void computeImpl( const Mat& image, vector&KeyPoint&& keypoints, Mat& descriptors )
CV_PROP_RW
CV_PROP_RW int nOctaveL
CV_PROP_RW double contrastT
CV_PROP_RW double edgeT
CV_PROP_RW
typedef SIFT SiftFeatureD
typedef SIFT SiftDescriptorE
SURF implementation.
The class implements SURF algorithm by H. Bay et al.
class CV_EXPORTS_W SURF : public Feature2D
//! the default constructor
CV_WRAP SURF();
//! the full constructor taking all the necessary parameters
explicit CV_WRAP SURF(double hessianThreshold,
int nOctaves=4, int nOctaveLayers=2,
bool extended=true, bool upright=false);
//! returns the descriptor size in float's (64 or 128)
CV_WRAP int descriptorSize()
//! returns the descriptor type
CV_WRAP int descriptorType()
//! finds the keypoints using fast hessian detector used in SURF
void operator()(InputArray img, InputArray mask,
CV_OUT vector&KeyPoint&& keypoints)
//! finds the keypoints and computes their descriptors. Optionally it can compute descriptors for the user-provided keypoints
void operator()(InputArray img, InputArray mask,
CV_OUT vector&KeyPoint&& keypoints,
OutputArray descriptors,
bool useProvidedKeypoints=false)
AlgorithmInfo* info()
CV_PROP_RW double hessianT
CV_PROP_RW int nO
CV_PROP_RW int nOctaveL
CV_PROP_RW
CV_PROP_RW
protected:
void detectImpl( const Mat& image, vector&KeyPoint&& keypoints, const Mat& mask=Mat() )
void computeImpl( const Mat& image, vector&KeyPoint&& keypoints, Mat& descriptors )
typedef SURF SurfFeatureD
typedef SURF SurfDescriptorE
} /* namespace cv */
#endif /* __cplusplus */
/* End of file. */
展开> <收缩
下载源码到电脑,阅读使用更方便
还剩0行未阅读,继续阅读 ▼
Sponsored links
源码文件列表
温馨提示: 点击源码文件名可预览文件内容哦 ^_^
avcodec-55.dll16.43 MB08-01-14 11:06
avdevice-55.dll1.13 MB08-01-14 11:06
avfilter-4.dll1.84 MB08-01-14 11:06
avformat-55.dll4.99 MB08-01-14 11:06
avutil-52.dll399.00 kB08-01-14 11:06
7.95 kB17-01-14 22:30
171.36 kB19-12-13 11:06
2.99 kB19-12-13 11:06
2.22 kB19-12-13 11:06
10.40 kB19-12-13 11:06
3.91 kB19-12-13 11:06
3.86 kB19-12-13 11:06
6.05 kB19-12-13 11:06
4.71 kB19-12-13 11:06
5.96 kB19-12-13 11:06
2.11 kB19-12-13 11:06
1.82 kB19-12-13 11:06
3.24 kB19-12-13 11:06
3.70 kB19-12-13 11:06
55.26 kB19-12-13 11:06
975.00 B19-12-13 11:06
6.87 kB19-12-13 11:06
4.61 kB19-12-13 11:06
3.30 kB19-12-13 11:06
81.98 kB19-12-13 11:06
16.89 kB19-12-13 11:06
2.76 kB19-12-13 11:06
1.55 kB19-12-13 11:06
1.79 kB19-12-13 11:06
4.13 kB19-12-13 11:06
82.00 B19-12-13 11:06
4.31 kB19-12-13 11:06
2.06 kB19-12-13 11:06
251.00 B19-12-13 11:06
11.82 kB19-12-13 11:06
8.16 kB19-12-13 11:06
2.03 kB19-12-13 11:06
2.26 kB19-12-13 11:06
7.55 kB19-12-13 11:06
2.79 kB19-12-13 11:06
9.54 kB19-12-13 11:06
8.75 kB19-12-13 11:06
14.47 kB30-12-13 18:09
4.35 kB19-12-13 11:06
2.64 kB19-12-13 11:06
6.39 kB11-01-14 22:14
4.65 kB19-12-13 11:06
5.18 kB19-12-13 11:06
128.00 B19-12-13 11:06
4.54 kB19-12-13 11:06
2.35 kB19-12-13 11:06
21.18 kB19-12-13 11:06
2.80 kB19-12-13 11:06
7.66 kB19-12-13 11:06
1.69 kB19-12-13 11:06
1.52 kB19-12-13 11:06
17.56 kB19-12-13 11:06
1.94 kB19-12-13 11:06
9.02 kB19-12-13 11:06
2.00 kB19-12-13 11:06
4.61 kB19-12-13 11:06
1.92 kB19-12-13 11:06
13.46 kB19-12-13 11:06
1.19 kB19-12-13 11:06
14.07 kB19-12-13 11:06
31.11 kB19-12-13 11:06
7.08 kB19-12-13 11:06
10.17 kB19-12-13 11:06
26.08 kB19-12-13 11:06
1.37 kB19-12-13 11:06
3.86 kB19-12-13 11:06
1.98 kB19-12-13 11:06
9.85 kB19-12-13 11:06
1.90 kB19-12-13 11:06
1.98 kB19-12-13 11:06
3.53 kB19-12-13 11:06
1.25 kB19-12-13 11:06
5.20 kB19-12-13 11:06
2.40 kB19-12-13 11:06
4.54 kB19-12-13 11:06
1.82 kB19-12-13 11:06
2.82 kB19-12-13 11:06
1.64 kB19-12-13 11:06
11.35 kB19-12-13 11:06
1.65 kB19-12-13 11:06
11.97 kB19-12-13 11:06
2.07 kB19-12-13 11:06
3.36 kB13-11-13 17:19
2.35 kB13-11-13 17:19
2.78 kB13-11-13 17:19
2.29 kB13-11-13 17:19
2.13 kB13-11-13 17:19
2.41 kB13-11-13 17:19
2.37 kB13-11-13 17:19
2.21 kB13-11-13 17:19
110.00 B13-11-13 17:19
2.25 kB13-11-13 17:19
2.14 kB13-11-13 17:19
36.75 kB13-11-13 17:19
37.57 kB13-11-13 17:19
2.97 kB13-11-13 17:19
6.93 kB13-11-13 17:19
12.51 kB13-11-13 17:19
23.40 kB13-11-13 17:19
181.29 kB13-11-13 17:19
76.66 kB13-11-13 17:19
7.67 kB13-11-13 17:19
2.15 kB13-11-13 17:19
9.24 kB13-11-13 17:19
18.27 kB13-11-13 17:19
33.33 kB13-11-13 17:19
78.67 kB28-11-13 11:16
9.13 kB13-11-13 17:19
9.69 kB13-11-13 17:19
128.67 kB04-12-13 11:09
56.12 kB13-11-13 17:19
2.94 kB08-12-13 11:13
20.26 kB13-11-13 17:19
62.38 kB13-11-13 17:19
5.92 kB13-11-13 17:19
5.82 kB13-11-13 17:19
7.32 kB13-11-13 17:19
20.12 kB13-11-13 17:19
5.85 kB13-11-13 17:19
1.75 kB13-11-13 17:19
4.57 kB13-11-13 17:19
24.66 kB13-11-13 17:19
251.00 B13-11-13 17:19
4.48 kB13-11-13 17:19
17.02 kB13-11-13 17:19
8.67 kB13-11-13 17:19
1.99 kB13-11-13 17:19
3.23 kB13-11-13 17:19
7.16 kB13-11-13 17:19
4.01 kB13-11-13 17:19
21.05 kB13-11-13 17:19
10.56 kB13-11-13 17:19
19.22 kB13-11-13 17:19
19.70 kB13-11-13 17:19
33.72 kB13-11-13 17:19
3.61 kB13-11-13 17:19
3.63 kB29-12-13 20:50
15.19 kB13-11-13 17:19
17.54 kB13-11-13 17:19
3.21 kB13-11-13 17:19
5.88 kB13-11-13 17:19
5.93 kB13-11-13 17:19
2.83 kB13-11-13 17:19
3.09 kB13-11-13 17:19
3.61 kB13-11-13 17:19
14.60 kB13-11-13 17:19
2.78 kB13-11-13 17:19
5.64 kB13-11-13 17:19
5.61 kB13-11-13 17:19
2.38 kB13-11-13 17:19
8.05 kB13-11-13 17:19
24.32 kB13-11-13 17:19
14.90 kB13-11-13 17:19
4.07 kB13-11-13 17:19
4.51 kB13-11-13 17:19
217.21 kB13-11-13 17:19
14.94 kB13-11-13 17:19
22.76 kB13-11-13 17:19
17.39 kB13-11-13 17:19
8.37 kB13-11-13 17:19
5.07 kB13-11-13 17:19
3.09 kB13-11-13 17:19
4.86 kB13-11-13 17:19
9.51 kB13-11-13 17:19
3.08 kB13-11-13 17:19
31.35 kB13-11-13 17:19
4.65 kB13-11-13 17:19
11.41 kB13-11-13 17:19
9.76 kB13-11-13 17:19
8.69 kB13-11-13 17:19
33.08 kB13-11-13 17:19
2.68 kB13-11-13 17:19
3.21 kB13-11-13 17:19
4.34 kB13-11-13 17:19
7.34 kB13-11-13 17:19
7.60 kB13-11-13 17:19
49.32 kB13-11-13 17:19
12.91 kB13-11-13 17:19
4.97 kB13-11-13 17:19
2.92 kB13-11-13 17:19
4.86 kB13-11-13 17:19
2.15 kB13-11-13 17:19
107.80 kB13-11-13 17:19
2.14 kB13-11-13 17:19
2.80 kB13-11-13 17:19
5.22 kB13-11-13 17:19
8.95 kB13-11-13 17:19
25.70 kB13-11-13 17:19
2.32 kB13-11-13 17:19
54.97 kB13-11-13 17:19
29.62 kB13-11-13 17:19
16.05 kB13-11-13 17:19
32.14 kB13-11-13 17:19
29.39 kB13-11-13 17:19
133.51 kB13-11-13 17:19
3.99 kB13-11-13 17:19
76.73 kB28-11-13 11:16
5.68 kB13-11-13 17:19
5.28 kB13-11-13 17:19
2.30 kB13-11-13 17:19
5.81 kB13-11-13 17:19
37.41 kB13-11-13 17:19
17.06 kB08-12-13 11:13
92.86 kB08-12-13 11:13
2.69 kB13-11-13 17:19
701.00 B09-12-13 18:35
3.90 kB13-11-13 17:19
2.58 kB13-11-13 17:19
2.88 kB13-11-13 17:19
4.86 kB13-11-13 17:19
2.71 kB13-11-13 17:19
4.10 kB13-11-13 17:19
6.15 kB13-11-13 17:19
7.27 kB13-11-13 17:19
8.59 kB13-11-13 17:19
5.43 kB13-11-13 17:19
3.83 kB13-11-13 17:19
15.17 kB13-11-13 17:19
21.07 kB13-11-13 17:19
7.42 kB13-11-13 17:19
5.26 kB13-11-13 17:19
3.30 kB13-11-13 17:19
3.91 kB13-11-13 17:19
4.49 kB13-11-13 17:19
14.44 kB13-11-13 17:19
19.41 kB13-11-13 17:19
812.83 kB13-11-13 17:19
24.00 kB14-11-13 11:16
10.63 kB04-12-13 11:09
18.00 kB13-11-13 17:19
2.36 kB13-11-13 17:19
3.78 kB13-11-13 17:19
3.50 kB13-11-13 17:19
5.34 kB13-11-13 17:19
3.09 kB13-11-13 17:19
5.34 kB13-11-13 17:19
6.69 kB13-11-13 17:19
2.62 kB13-11-13 17:19
3.57 kB13-11-13 17:19
4.06 kB13-11-13 17:19
6.33 kB13-11-13 17:19
2.23 kB13-11-13 17:19
avcodec-55.def7.08 kB19-12-13 11:06
avcodec.lib213.26 kB19-12-13 11:06
avdevice-55.def103.00 B19-12-13 11:06
avdevice.lib4.47 kB19-12-13 11:06
avfilter-4.def2.39 kB19-12-13 11:06
avfilter.lib66.06 kB19-12-13 11:06
avformat-55.def3.21 kB19-12-13 11:06
avformat.lib111.11 kB19-12-13 11:06
avutil-52.def9.24 kB19-12-13 11:06
avutil.lib309.17 kB19-12-13 11:06
libavcodec.dll.a184.51 kB19-12-13 11:06
libavdevice.dll.a3.38 kB19-12-13 11:06
libavfilter.dll.a56.60 kB19-12-13 11:06
libavformat.dll.a95.01 kB19-12-13 11:06
libavutil.dll.a267.06 kB19-12-13 11:06
libpostproc.dll.a6.46 kB19-12-13 11:06
libswresample.dll.a69.55 kB19-12-13 11:06
libswscale.dll.a23.45 kB19-12-13 11:06
OpenCVConfig.cmake12.96 kB09-12-13 18:16
OpenCVModules-debug.cmake13.41 kB09-12-13 18:16
OpenCVModules-release.cmake13.58 kB09-12-13 18:16
OpenCVModules.cmake4.67 kB09-12-13 18:16
opencv_calib3d247.lib193.10 kB09-12-13 18:19
opencv_calib3d247d.lib193.76 kB09-12-13 18:21
opencv_contrib247.lib358.89 kB09-12-13 18:20
opencv_contrib247d.lib360.00 kB09-12-13 18:22
opencv_core247.lib455.31 kB09-12-13 18:17
opencv_core247d.lib456.77 kB09-12-13 18:20
opencv_features2d247.lib322.71 kB09-12-13 18:18
opencv_features2d247d.lib323.51 kB09-12-13 18:21
opencv_flann247.lib104.70 kB09-12-13 18:17
opencv_flann247d.lib105.07 kB09-12-13 18:20
opencv_gpu247.lib444.05 kB09-12-13 18:19
opencv_gpu247d.lib445.36 kB09-12-13 18:21
opencv_highgui247.lib138.85 kB09-12-13 18:18
opencv_highgui247d.lib139.34 kB09-12-13 18:21
opencv_imgproc247.lib187.50 kB09-12-13 18:18
opencv_imgproc247d.lib188.10 kB09-12-13 18:20
opencv_legacy247.lib461.72 kB09-12-13 18:19
opencv_legacy247d.lib463.20 kB09-12-13 18:21
opencv_ml247.lib233.58 kB09-12-13 18:17
opencv_ml247d.lib234.27 kB09-12-13 18:20
opencv_nonfree247.lib341.69 kB09-12-13 18:19
opencv_nonfree247d.lib342.81 kB09-12-13 18:21
opencv_objdetect247.lib181.26 kB09-12-13 18:18
opencv_objdetect247d.lib181.80 kB09-12-13 18:21
opencv_ocl247.lib538.91 kB09-12-13 18:19
opencv_ocl247d.lib540.40 kB09-12-13 18:21
opencv_photo247.lib88.80 kB09-12-13 18:18
opencv_photo247d.lib89.14 kB09-12-13 18:20
opencv_stitching247.lib551.96 kB09-12-13 18:20
opencv_stitching247d.lib553.43 kB09-12-13 18:22
opencv_superres247.lib360.13 kB09-12-13 18:19
opencv_superres247d.lib361.28 kB09-12-13 18:21
opencv_ts247.lib7.08 MB09-12-13 18:19
opencv_ts247d.lib9.55 MB09-12-13 18:21
opencv_video247.lib112.34 kB09-12-13 18:18
opencv_video247d.lib112.73 kB09-12-13 18:20
opencv_videostab247.lib369.68 kB09-12-13 18:20
opencv_videostab247d.lib370.83 kB09-12-13 18:21
postproc-52.def205.00 B19-12-13 11:06
postproc.lib8.03 kB19-12-13 11:06
swresample-0.def3.70 kB19-12-13 11:06
swresample.lib74.80 kB19-12-13 11:06
swscale-2.def801.00 B19-12-13 11:06
swscale.lib27.80 kB19-12-13 11:06
postproc-52.dll118.50 kB08-01-14 11:06
swresample-0.dll265.00 kB08-01-14 11:06
swscale-2.dll363.00 kB08-01-14 11:06
&detail&0.00 B17-01-14 23:09
&device&0.00 B17-01-14 23:09
&detail&0.00 B17-01-14 23:09
&calib3d&0.00 B17-01-14 23:08
&contrib&0.00 B17-01-14 23:08
&core&0.00 B17-01-14 23:08
&features2d&0.00 B17-01-14 23:08
&flann&0.00 B17-01-14 23:08
&gpu&0.00 B17-01-14 23:08
&highgui&0.00 B17-01-14 23:09
&imgproc&0.00 B17-01-14 23:09
&legacy&0.00 B17-01-14 23:09
&ml&0.00 B17-01-14 23:09
&nonfree&0.00 B17-01-14 23:09
&objdetect&0.00 B17-01-14 23:09
&ocl&0.00 B17-01-14 23:09
&photo&0.00 B17-01-14 23:09
&stitching&0.00 B17-01-14 23:09
&superres&0.00 B17-01-14 23:09
&ts&0.00 B17-01-14 23:09
&video&0.00 B17-01-14 23:09
&videostab&0.00 B17-01-14 23:09
&libavcodec&0.00 B17-01-14 23:08
&libavdevice&0.00 B17-01-14 23:08
&libavfilter&0.00 B17-01-14 23:08
&libavformat&0.00 B17-01-14 23:08
&libavutil&0.00 B17-01-14 23:08
&libpostproc&0.00 B17-01-14 23:08
&libswresample&0.00 B17-01-14 23:08
&libswscale&0.00 B17-01-14 23:08
&opencv&0.00 B17-01-14 23:08
&opencv2&0.00 B17-01-14 23:09
&include&0.00 B17-01-14 23:08
&lib&0.00 B17-01-14 23:08
&capture&0.00 B17-01-14 23:11
Sponsored links
评价成功,多谢!
下载capture.rar
CodeForge积分(原CF币)全新升级,功能更强大,使用更便捷,不仅可以用来下载海量源代码马上还可兑换精美小礼品了
您的积分不足
支付宝优惠套餐快速获取 30 积分
10积分 / ¥100
30积分 / ¥200原价 ¥300 元
100积分 / ¥500原价 ¥1000 元
订单支付完成后,积分将自动加入到您的账号。以下是优惠期的人民币价格,优惠期过后将恢复美元价格。
支付宝支付宝付款
微信钱包微信付款
更多付款方式:、
您本次下载所消耗的积分将转交上传作者。
同一源码,30天内重复下载,只扣除一次积分。
鲁ICP备号-3 runtime:Elapsed:30.678ms 27.69
登录 CodeForge
还没有CodeForge账号?
Switch to the English version?
^_^"呃 ...
Sorry!这位大神很神秘,未开通博客呢,请浏览一下其他的吧[blog]基于SURF特征的图像与视频拼接技术的研究和实现(一)-爱编程
[blog]基于SURF特征的图像与视频拼接技术的研究和实现(一)
基于SURF特征的图像与视频拼接技术的研究和实现(一)
& & &一直有计划研究实时图像拼接,但是直到最近拜读西电2013年张亚娟的《基于SURF特征的图像与视频拼接技术的研究和实现》,条理清晰、内容完整、实现的技术具有市场价值。因此定下决心以这篇论文为基础脉络,结合实际情况,进行&基于SURF特征的图像与视频拼接技术的研究和实现&。
& & & 一、基于opencv的surf实现
& & & 3.0以后,surf被分到了"opencv_contrib-master"中去,操作起来不习惯,这里仍然选择一直在使用的opencv2.48,其surf的调用方式为:
//&raw_surf.cpp&:&本例是对opencv-2.48相关例子的实现//#include&"stdafx.h"#include&&iostream&#include&"opencv2/core/core.hpp"#include&"opencv2/features2d/features2d.hpp"#include&"opencv2/highgui/highgui.hpp"#include&"opencv2/nonfree/features2d.hpp"using&namespace&using&namespace&int&main(&int&argc,&char**&argv&){&&&&&&&&&Mat&img_1&=&imread(&"img_opencv_1.png",&0&);&&&&Mat&img_2&=&imread(&"img_opencv_2.png",&0&);&&&&if(&!img_1.data&||&!img_2.data&)&&&&{&std::cout&&&"&--(!)&Error&reading&images&"&&&&std::&return&-1;&}&&&&//--&Step&1:&Detect&the&keypoints&using&SURF&Detector&&&&int&minHessian&=&10000;&&&&SurfFeatureDetector&detector(&minHessian&);&&&&std::vector&KeyPoint&&keypoints_1,&keypoints_2;&&&&detector.detect(&img_1,&keypoints_1&);&&&&detector.detect(&img_2,&keypoints_2&);&&&&//--&Draw&keypoints&&&&Mat&img_keypoints_1;&Mat&img_keypoints_2;&&&&drawKeypoints(&img_1,&keypoints_1,&img_keypoints_1,&Scalar::all(-1),&DrawMatchesFlags::DEFAULT&);&&&&drawKeypoints(&img_2,&keypoints_2,&img_keypoints_2,&Scalar::all(-1),&DrawMatchesFlags::DEFAULT&);&&&&//--&Step&2:&Calculate&descriptors&(feature&vectors)&&&&SurfDescriptorExtractor&&&&&Mat&descriptors_1,&descriptors_2;&&&&pute(&img_1,&keypoints_1,&descriptors_1&);&&&&pute(&img_2,&keypoints_2,&descriptors_2&);&&&&//--&Step&3:&Matching&descriptor&vectors&with&a&brute&force&matcher&&&&BFMatcher&matcher(NORM_L2);&&&&std::vector&&DMatch&&&&&&&matcher.match(&descriptors_1,&descriptors_2,&matches&);&&&&//--&Draw&matches&&&&Mat&img_&&&&drawMatches(&img_1,&keypoints_1,&img_2,&keypoints_2,&matches,&img_matches&);&&&&//--&Show&detected&(drawn)&keypoints&&&&imshow("Keypoints&1",&img_keypoints_1&);&&&&imshow("Keypoints&2",&img_keypoints_2&);&&&&//--&Show&detected&matches&&&&imshow("Matches",&img_matches&);&&&&waitKey(0);&&&&return&0;}
这里采用的是surffeaturedector的方法进行点的寻找,而后采用BFMatcher的方法进行数据比对。但这种方法错误的比较多,提供了FLANN的方法进行比对:
//&raw_surf.cpp&:&本例是对opencv-2.48相关例子的实现//#include&"stdafx.h"#include&&iostream&#include&"opencv2/core/core.hpp"#include&"opencv2/features2d/features2d.hpp"#include&"opencv2/highgui/highgui.hpp"#include&"opencv2/nonfree/features2d.hpp"using&namespace&using&namespace&int&main(&int&argc,&char**&argv&){&&&&&&&&&Mat&img_1&=&imread(&"img_opencv_1.png",&0&);&&&&Mat&img_2&=&imread(&"img_opencv_2.png",&0&);&&&&if(&!img_1.data&||&!img_2.data&)&&&&{&std::cout&&&"&--(!)&Error&reading&images&"&&&&std::&return&-1;&}&&&&//--&Step&1:&Detect&the&keypoints&using&SURF&Detector&&&&int&minHessian&=&400;&&&&SurfFeatureDetector&detector(&minHessian&);&&&&std::vector&KeyPoint&&keypoints_1,&keypoints_2;&&&&detector.detect(&img_1,&keypoints_1&);&&&&detector.detect(&img_2,&keypoints_2&);&&&&//--&Draw&keypoints&&&&Mat&img_keypoints_1;&Mat&img_keypoints_2;&&&&drawKeypoints(&img_1,&keypoints_1,&img_keypoints_1,&Scalar::all(-1),&DrawMatchesFlags::DEFAULT&);&&&&drawKeypoints(&img_2,&keypoints_2,&img_keypoints_2,&Scalar::all(-1),&DrawMatchesFlags::DEFAULT&);&&&&//--&Step&2:&Calculate&descriptors&(feature&vectors)&&&&SurfDescriptorExtractor&&&&&Mat&descriptors_1,&descriptors_2;&&&&pute(&img_1,&keypoints_1,&descriptors_1&);&&&&pute(&img_2,&keypoints_2,&descriptors_2&);&&&&//--&Step&3:&Matching&descriptor&vectors&using&FLANN&matcher&&&&FlannBasedMatcher&&&&&std::vector&&DMatch&&&&&&&matcher.match(&descriptors_1,&descriptors_2,&matches&);&&&&double&max_dist&=&0;&double&min_dist&=&100;&&&&//--&Quick&calculation&of&max&and&min&distances&between&keypoints&&&&for(&int&i&=&0;&i&&&descriptors_1.&i++&)&&&&{&double&dist&=&matches[i].&&&&if(&dist&&&min_dist&)&min_dist&=&&&&&if(&dist&&&max_dist&)&max_dist&=&&&&&}&&&&printf("--&Max&dist&:&%f&\n",&max_dist&);&&&&printf("--&Min&dist&:&%f&\n",&min_dist&);&&&&//--&Draw&only&"good"&matches&(i.e.&whose&distance&is&less&than&2*min_dist,&&&&//--&or&a&small&arbitary&value&(&0.02&)&in&the&event&that&min_dist&is&very&&&&//--&small)&&&&//--&PS.-&radiusMatch&can&also&be&used&here.&&&&std::vector&&DMatch&&&good_&&&&for(&int&i&=&0;&i&&&descriptors_1.&i++&)&&&&{&if(&matches[i].distance&&=&max(2*min_dist,&0.02)&)&&&&{&good_matches.push_back(&matches[i]);&}&&&&}&&&&//--&Draw&only&"good"&matches&&&&Mat&img_&&&&drawMatches(&img_1,&keypoints_1,&img_2,&keypoints_2,&&&&&&&&good_matches,&img_matches,&Scalar::all(-1),&Scalar::all(-1),&&&&&&&&vector&char&(),&DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS&);&&&&//--&Show&detected&matches&&&&imshow(&"Good&Matches",&img_matches&);&&&&for(&int&i&=&0;&i&&&(int)good_matches.size();&i++&)&&&&{&printf(&"--&Good&Match&[%d]&Keypoint&1:&%d&&--&Keypoint&2:&%d&&\n",&i,&good_matches[i].queryIdx,&good_matches[i].trainIdx&);&}&&&&waitKey(0);&&&&return&0;}
可以发现,除了错误一例,其他都是正确的。
继续来做,计算出单应矩阵
//&raw_surf.cpp&:&本例是对opencv-2.48相关例子的实现//#include&"stdafx.h"#include&&iostream&#include&"opencv2/core/core.hpp"#include&"opencv2/features2d/features2d.hpp"#include&"opencv2/highgui/highgui.hpp"#include&"opencv2/nonfree/features2d.hpp"#include&"opencv2/calib3d/calib3d.hpp"using&namespace&using&namespace&int&main(&int&argc,&char**&argv&){&&&&&&&&&Mat&img_1&=&imread(&"img_opencv_1.png",&0&);&&&&Mat&img_2&=&imread(&"img_opencv_2.png",&0&);&&&&if(&!img_1.data&||&!img_2.data&)&&&&{&std::cout&&&"&--(!)&Error&reading&images&"&&&&std::&return&-1;&}&&&&//--&Step&1:&Detect&the&keypoints&using&SURF&Detector&&&&int&minHessian&=&400;&&&&SurfFeatureDetector&detector(&minHessian&);&&&&std::vector&KeyPoint&&keypoints_1,&keypoints_2;&&&&detector.detect(&img_1,&keypoints_1&);&&&&detector.detect(&img_2,&keypoints_2&);&&&&//--&Draw&keypoints&&&&Mat&img_keypoints_1;&Mat&img_keypoints_2;&&&&drawKeypoints(&img_1,&keypoints_1,&img_keypoints_1,&Scalar::all(-1),&DrawMatchesFlags::DEFAULT&);&&&&drawKeypoints(&img_2,&keypoints_2,&img_keypoints_2,&Scalar::all(-1),&DrawMatchesFlags::DEFAULT&);&&&&//--&Step&2:&Calculate&descriptors&(feature&vectors)&&&&SurfDescriptorExtractor&&&&&Mat&descriptors_1,&descriptors_2;&&&&pute(&img_1,&keypoints_1,&descriptors_1&);&&&&pute(&img_2,&keypoints_2,&descriptors_2&);&&&&//--&Step&3:&Matching&descriptor&vectors&using&FLANN&matcher&&&&FlannBasedMatcher&&&&&std::vector&&DMatch&&&&&&&matcher.match(&descriptors_1,&descriptors_2,&matches&);&&&&double&max_dist&=&0;&double&min_dist&=&100;&&&&//--&Quick&calculation&of&max&and&min&distances&between&keypoints&&&&for(&int&i&=&0;&i&&&descriptors_1.&i++&)&&&&{&double&dist&=&matches[i].&&&&if(&dist&&&min_dist&)&min_dist&=&&&&&if(&dist&&&max_dist&)&max_dist&=&&&&&}&&&&printf("--&Max&dist&:&%f&\n",&max_dist&);&&&&printf("--&Min&dist&:&%f&\n",&min_dist&);&&&&//--&Draw&only&"good"&matches&(i.e.&whose&distance&is&less&than&2*min_dist,&&&&//--&or&a&small&arbitary&value&(&0.02&)&in&the&event&that&min_dist&is&very&&&&//--&small)&&&&//--&PS.-&radiusMatch&can&also&be&used&here.&&&&std::vector&&DMatch&&&good_&&&&for(&int&i&=&0;&i&&&descriptors_1.&i++&)&&&&{&if(&matches[i].distance&&=&/*max(2*min_dist,&0.02)*/3*min_dist&)&&&&{&good_matches.push_back(&matches[i]);&}&&&&}&&&&//--&Draw&only&"good"&matches&&&&Mat&img_&&&&drawMatches(&img_1,&keypoints_1,&img_2,&keypoints_2,&&&&&&&&good_matches,&img_matches,&Scalar::all(-1),&Scalar::all(-1),&&&&&&&&vector&char&(),&DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS&);&&&&//--&Localize&the&object&from&img_1&in&img_2&&&&std::vector&Point2f&&&&&&std::vector&Point2f&&&&&&for(&int&i&=&0;&i&&&(int)good_matches.size();&i++&)&&&&{&&&&&&&&&&&&obj.push_back(&keypoints_1[&good_matches[i].queryIdx&].pt&);&&&&&&&&scene.push_back(&keypoints_2[&good_matches[i].trainIdx&].pt&);&&&&&&&&printf(&"--&Good&Match&[%d]&Keypoint&1:&%d&&--&Keypoint&2:&%d&&\n",&i,&good_matches[i].queryIdx,&good_matches[i].trainIdx&);&&&&&}&&&&//直接调用ransac&&&&Mat&H&=&findHomography(&obj,&scene,&CV_RANSAC&);&&&&//--&Get&the&corners&from&the&image_1&(&the&object&to&be&"detected"&)&&&&std::vector&Point2f&&obj_corners(4);&&&&obj_corners[0]&=&Point(0,0);&obj_corners[1]&=&Point(&img_1.cols,&0&);&&&&obj_corners[2]&=&Point(&img_1.cols,&img_1.rows&);&obj_corners[3]&=&Point(&0,&img_1.rows&);&&&&std::vector&Point2f&&scene_corners(4);&&&&perspectiveTransform(&obj_corners,&scene_corners,&H);&&&&//--&Draw&lines&between&the&corners&(the&mapped&object&in&the&scene&-&image_2&)&&&&Point2f&offset(&(float)img_1.cols,&0);&&&&line(&img_matches,&scene_corners[0]&+&offset,&scene_corners[1]&+&offset,&Scalar(0,&255,&0),&4&);&&&&line(&img_matches,&scene_corners[1]&+&offset,&scene_corners[2]&+&offset,&Scalar(&0,&255,&0),&4&);&&&&line(&img_matches,&scene_corners[2]&+&offset,&scene_corners[3]&+&offset,&Scalar(&0,&255,&0),&4&);&&&&line(&img_matches,&scene_corners[3]&+&offset,&scene_corners[0]&+&offset,&Scalar(&0,&255,&0),&4&);&&&&//--&Show&detected&matches&&&&imshow(&"Good&Matches&&&Object&detection",&img_matches&);&&&&waitKey(0);&&&&return&0;}
简化后和注释后的版本
//&raw_surf.cpp&:&本例是对opencv-2.48相关例子的实现
#include&"stdafx.h"
#include&&iostream&
#include&"opencv2/core/core.hpp"
#include&"opencv2/features2d/features2d.hpp"
#include&"opencv2/highgui/highgui.hpp"
#include&"opencv2/nonfree/features2d.hpp"
#include&"opencv2/calib3d/calib3d.hpp"
using&namespace&
using&namespace&
int&main(&int&argc,&char**&argv&)
&&&&Mat&img_1&=&imread(&"img_opencv_1.png",&0&);
&&&&Mat&img_2&=&imread(&"img_opencv_2.png",&0&);
&&&&if(&!img_1.data&||&!img_2.data&)
&&&&{&std::cout&&&"&--(!)&Error&reading&images&"&&&&std::&return&-1;&}
&&&&//--&Step&1:&使用SURF识别出特征点
&&&&int&minHessian&=&400;
&&&&SurfFeatureDetector&detector(&minHessian&);
&&&&std::vector&KeyPoint&&keypoints_1,&keypoints_2;
&&&&detector.detect(&img_1,&keypoints_1&);
&&&&detector.detect(&img_2,&keypoints_2&);
&&&&//--&Step&2:&描述SURF特征
&&&&SurfDescriptorExtractor&
&&&&Mat&descriptors_1,&descriptors_2;
&&&&pute(&img_1,&keypoints_1,&descriptors_1&);
&&&&pute(&img_2,&keypoints_2,&descriptors_2&);
&&&&//--&Step&3:&匹配
&&&&FlannBasedMatcher&//BFMatcher为强制匹配
&&&&std::vector&&DMatch&&&
&&&&matcher.match(&descriptors_1,&descriptors_2,&matches&);
&&&&//取最大最小距离
&&&&double&max_dist&=&0;&double&min_dist&=&100;
&&&&for(&int&i&=&0;&i&&&descriptors_1.&i++&)
&&&&&&&&double&dist&=&matches[i].
&&&&&&&&if(&dist&&&min_dist&)&min_dist&=&
&&&&&&&&if(&dist&&&max_dist&)&max_dist&=&
&&&&std::vector&&DMatch&&&good_
&&&&for(&int&i&=&0;&i&&&descriptors_1.&i++&)
&&&&&&&&if(&matches[i].distance&&=&3*min_dist&)//这里的阈值选择了3倍的min_dist
&&&&&&&&&&&&{&
&&&&&&&&&&&&&&&&good_matches.push_back(&matches[i]);&
&&&&&&&&&&&&&}
&&&&//画出"good&match"
&&&&Mat&img_
&&&&drawMatches(&img_1,&keypoints_1,&img_2,&keypoints_2,
&&&&&&&&good_matches,&img_matches,&Scalar::all(-1),&Scalar::all(-1),
&&&&&&&&vector&char&(),&DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS&);
&&&&//--&Localize&the&object&from&img_1&in&img_2
&&&&std::vector&Point2f&&
&&&&std::vector&Point2f&&
&&&&for(&int&i&=&0;&i&&&(int)good_matches.size();&i++&)
&&&&&&&&obj.push_back(&keypoints_1[&good_matches[i].queryIdx&].pt&);
&&&&&&&&scene.push_back(&keypoints_2[&good_matches[i].trainIdx&].pt&);
&&&&//直接调用ransac,计算单应矩阵
&&&&Mat&H&=&findHomography(&obj,&scene,&CV_RANSAC&);
&&&&//--&Get&the&corners&from&the&image_1&(&the&object&to&be&"detected"&)
&&&&std::vector&Point2f&&obj_corners(4);
&&&&obj_corners[0]&=&Point(0,0);&
&&&&obj_corners[1]&=&Point(&img_1.cols,&0&);
&&&&obj_corners[2]&=&Point(&img_1.cols,&img_1.rows&);&
&&&&obj_corners[3]&=&Point(&0,&img_1.rows&);
&&&&std::vector&Point2f&&scene_corners(4);
&&&&perspectiveTransform(&obj_corners,&scene_corners,&H);
&&&&//--&Draw&lines&between&the&corners&(the&mapped&object&in&the&scene&-&image_2&)
&&&&Point2f&offset(&(float)img_1.cols,&0);
&&&&line(&img_matches,&scene_corners[0]&+&offset,&scene_corners[1]&+&offset,&Scalar(0,&255,&0),&4&);
&&&&line(&img_matches,&scene_corners[1]&+&offset,&scene_corners[2]&+&offset,&Scalar(&0,&255,&0),&4&);
&&&&line(&img_matches,&scene_corners[2]&+&offset,&scene_corners[3]&+&offset,&Scalar(&0,&255,&0),&4&);
&&&&line(&img_matches,&scene_corners[3]&+&offset,&scene_corners[0]&+&offset,&Scalar(&0,&255,&0),&4&);
&&&&//--&Show&detected&matches
&&&&imshow(&"Good&Matches&&&Object&detection",&img_matches&);
&&&&waitKey(0);
&&&&return&0;
这里有两点需要注意,一个是除了FlannBasedMatcher之外,还有一种mathcer叫做BFMatcher,后者为强制匹配.
此外计算所谓GOODFEATURE的时候,采用了&3*min_dist的方法,我认为这里和论文中指出的&误差阈值设为3&是一致的,如果理解错误请指出,感谢!
同时测试了航拍图片和连铸图片,航拍图片是自然图片,特征丰富;
连铸图片由于表面干扰大于原始纹理,无法得到单应矩阵
& & & & 最后,添加计算RANSAC内点外点的相关代码,这里以3作为分界线
& &&& &//&raw_surf.cpp&:&本例是对opencv-2.48相关例子的实现
//#include&"stdafx.h"#include&&iostream&#include&"opencv2/core/core.hpp"#include&"opencv2/imgproc/imgproc.hpp"#include&"opencv2/features2d/features2d.hpp"#include&"opencv2/highgui/highgui.hpp"#include&"opencv2/nonfree/features2d.hpp"#include&"opencv2/calib3d/calib3d.hpp"using&namespace&using&namespace&//获得两个pointf之间的距离float&fDistance(Point2f&p1,Point2f&p2){&&&&float&ftmp&=&(p1.x-p2.x)*(p1.x-p2.x)&+&(p1.y-p2.y)*(p1.y-p2.y);&&&&ftmp&=&sqrt((float)ftmp);&&&&return&}int&main(&int&argc,&char**&argv&){&&&&Mat&img_1&=&imread(&"img_opencv_1.png",&0&);&&&&Mat&img_2&=&imread(&"img_opencv_2.png",&0&);&&&&////添加于连铸图像&&&&//img_1&=&img_1(Rect(20,0,img_1.cols-40,img_1.rows));&&&&//img_2&=&img_2(Rect(20,0,img_1.cols-40,img_1.rows));&//&&&&cv::Canny(img_1,img_1,100,200);&//&&&&cv::Canny(img_2,img_2,100,200);&&&&if(&!img_1.data&||&!img_2.data&)&&&&{&std::cout&&&"&--(!)&Error&reading&images&"&&&&std::&return&-1;&}&&&&//--&Step&1:&使用SURF识别出特征点&&&&int&minHessian&=&400;&&&&SurfFeatureDetector&detector(&minHessian&);&&&&std::vector&KeyPoint&&keypoints_1,&keypoints_2;&&&&detector.detect(&img_1,&keypoints_1&);&&&&detector.detect(&img_2,&keypoints_2&);&&&&//--&Step&2:&描述SURF特征&&&&SurfDescriptorExtractor&&&&&Mat&descriptors_1,&descriptors_2;&&&&pute(&img_1,&keypoints_1,&descriptors_1&);&&&&pute(&img_2,&keypoints_2,&descriptors_2&);&&&&//--&Step&3:&匹配&&&&FlannBasedMatcher&//BFMatcher为强制匹配&&&&std::vector&&DMatch&&&&&&&matcher.match(&descriptors_1,&descriptors_2,&matches&);&&&&//取最大最小距离&&&&double&max_dist&=&0;&double&min_dist&=&100;&&&&for(&int&i&=&0;&i&&&descriptors_1.&i++&)&&&&{&&&&&&&&&double&dist&=&matches[i].&&&&&&&&if(&dist&&&min_dist&)&min_dist&=&&&&&&&&&if(&dist&&&max_dist&)&max_dist&=&&&&&}&&&&std::vector&&DMatch&&&good_&&&&for(&int&i&=&0;&i&&&descriptors_1.&i++&)&&&&{&&&&&&&&&if(&matches[i].distance&&=&3*min_dist&)//这里的阈值选择了3倍的min_dist&&&&&&&&&&&&{&&&&&&&&&&&&&&&&&good_matches.push_back(&matches[i]);&&&&&&&&&&&&&&}&&&&}&&&&//画出"good&match"&&&&Mat&img_&&&&drawMatches(&img_1,&keypoints_1,&img_2,&keypoints_2,&&&&&&&&good_matches,&img_matches,&Scalar::all(-1),&Scalar::all(-1),&&&&&&&&vector&char&(),&DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS&);&&&&//--&Localize&the&object&from&img_1&in&img_2&&&&std::vector&Point2f&&&&&&std::vector&Point2f&&&&&&for(&int&i&=&0;&i&&&(int)good_matches.size();&i++&)&&&&{&&&&&&&&&&&&obj.push_back(&keypoints_1[&good_matches[i].queryIdx&].pt&);&&&&&&&&scene.push_back(&keypoints_2[&good_matches[i].trainIdx&].pt&);&&&&}&&&&//直接调用ransac,计算单应矩阵&&&&Mat&H&=&findHomography(&obj,&scene,&CV_RANSAC&);&&&&//--&Get&the&corners&from&the&image_1&(&the&object&to&be&"detected"&)&&&&std::vector&Point2f&&obj_corners(4);&&&&obj_corners[0]&=&Point(0,0);&&&&&obj_corners[1]&=&Point(&img_1.cols,&0&);&&&&obj_corners[2]&=&Point(&img_1.cols,&img_1.rows&);&&&&&obj_corners[3]&=&Point(&0,&img_1.rows&);&&&&std::vector&Point2f&&scene_corners(4);&&&&perspectiveTransform(&obj_corners,&scene_corners,&H);&&&&//计算内点外点&&&&std::vector&Point2f&&scene_test(obj.size());&&&&perspectiveTransform(obj,scene_test,H);&&&&for&(int&i=0;i&scene_test.size();i++)&&&&{&&&&&&&printf("%d&is&%f&\n",i+1,fDistance(scene[i],scene_test[i]));&&&&}&&&&&&&&//--&Draw&lines&between&the&corners&(the&mapped&object&in&the&scene&-&image_2&)&&&&Point2f&offset(&(float)img_1.cols,&0);&&&&line(&img_matches,&scene_corners[0]&+&offset,&scene_corners[1]&+&offset,&Scalar(0,&255,&0),&4&);&&&&line(&img_matches,&scene_corners[1]&+&offset,&scene_corners[2]&+&offset,&Scalar(&0,&255,&0),&4&);&&&&line(&img_matches,&scene_corners[2]&+&offset,&scene_corners[3]&+&offset,&Scalar(&0,&255,&0),&4&);&&&&line(&img_matches,&scene_corners[3]&+&offset,&scene_corners[0]&+&offset,&Scalar(&0,&255,&0),&4&);&&&&//--&Show&detected&matches&&&&imshow(&"Good&Matches&&&Object&detection",&img_matches&);&&&&waitKey(0);&&&&return&0;}
& & & & 结果显示
& & & &其中,有误差的点就很明显了。
& & & &小结一下,这里实现了使用opencv得到两幅图像之间的单应矩阵的方法。不是所有的图像都能够获得单应矩阵的,必须是两幅本身就有关系的图片才可以;而且最好是自然图像,像生产线上的这种图像,其拼接就需要采用其他方法。
二、拼接和融合
& & & & 由于之前已经计算出了&单应矩阵&,所以这里直接利用这个矩阵就好。需要注意的一点是理清楚&帧&和拼接图像之间的关系。一般来说,我们采用的是&柱面坐标&或平面坐标。书中采用的是若干图像在水平方向上基本上是一字排开,是平面坐标。那么,如果按照文中的&帧到拼接图像&的方法,我们认为图像拼接的顺序就是由左到右,一幅一幅地计算误差,而后进行叠加。
& & & & &为了方便说明算法,采用了《学习opencv》中提供的教堂图像
其结果就是经过surf匹配,而将右边的图像形变成为适合叠加的状态。
基于此,进行图像对准
//&raw_surf.cpp&:&本例是对opencv-2.48相关例子的实现//#include&"stdafx.h"#include&&iostream&#include&"opencv2/core/core.hpp"#include&"opencv2/imgproc/imgproc.hpp"#include&"opencv2/features2d/features2d.hpp"#include&"opencv2/highgui/highgui.hpp"#include&"opencv2/nonfree/features2d.hpp"#include&"opencv2/calib3d/calib3d.hpp"using&namespace&using&namespace&int&main(&int&argc,&char**&argv&){&&&&&&&&Mat&img_1&;&&&&Mat&img_2&;&&&&Mat&img_raw_1&=&imread("c1.bmp");&&&&Mat&img_raw_2&=&imread("c3.bmp");&&&&cvtColor(img_raw_1,img_1,CV_BGR2GRAY);&&&&cvtColor(img_raw_2,img_2,CV_BGR2GRAY);&&&&//--&Step&1:&使用SURF识别出特征点&&&&int&minHessian&=&400;&&&&SurfFeatureDetector&detector(&minHessian&);&&&&std::vector&KeyPoint&&keypoints_1,&keypoints_2;&&&&detector.detect(&img_1,&keypoints_1&);&&&&detector.detect(&img_2,&keypoints_2&);&&&&//--&Step&2:&描述SURF特征&&&&SurfDescriptorExtractor&&&&&Mat&descriptors_1,&descriptors_2;&&&&pute(&img_1,&keypoints_1,&descriptors_1&);&&&&pute(&img_2,&keypoints_2,&descriptors_2&);&&&&//--&Step&3:&匹配&&&&FlannBasedMatcher&//BFMatcher为强制匹配&&&&std::vector&&DMatch&&&&&&&matcher.match(&descriptors_1,&descriptors_2,&matches&);&&&&//取最大最小距离&&&&double&max_dist&=&0;&double&min_dist&=&100;&&&&for(&int&i&=&0;&i&&&descriptors_1.&i++&)&&&&{&&&&&&&&&double&dist&=&matches[i].&&&&&&&&if(&dist&&&min_dist&)&min_dist&=&&&&&&&&&if(&dist&&&max_dist&)&max_dist&=&&&&&}&&&&std::vector&&DMatch&&&good_&&&&for(&int&i&=&0;&i&&&descriptors_1.&i++&)&&&&{&&&&&&&&&if(&matches[i].distance&&=&3*min_dist&)//这里的阈值选择了3倍的min_dist&&&&&&&&{&&&&&&&&&&&&&good_matches.push_back(&matches[i]);&&&&&&&&&}&&&&}&&&&//--&Localize&the&object&from&img_1&in&img_2&&&&std::vector&Point2f&&&&&&std::vector&Point2f&&&&&&for(&int&i&=&0;&i&&&(int)good_matches.size();&i++&)&&&&{&&&&&&&&&&&&//这里采用&帧向拼接图像中添加的方法&,因此左边的是scene,右边的是obj&&&&&&&&scene.push_back(&keypoints_1[&good_matches[i].queryIdx&].pt&);&&&&&&&&obj.push_back(&keypoints_2[&good_matches[i].trainIdx&].pt&);&&&&}&&&&//直接调用ransac,计算单应矩阵&&&&Mat&H&=&findHomography(&obj,&scene,&CV_RANSAC&);&&&&//图像对准&&&&Mat&&&&&warpPerspective(img_raw_2,result,H,Size(2*img_2.cols,img_2.rows));&&&&Mat&half(result,cv::Rect(0,0,img_2.cols,img_2.rows));&&&&img_raw_1.copyTo(half);&&&&imshow("result",result);&&&&waitKey(0);&&&&return&0;}
依据论文中提到的3种方法进行融合
//&raw_surf.cpp&:&本例是对opencv-2.48相关例子的实现//#include&"stdafx.h"#include&&iostream&#include&"opencv2/core/core.hpp"#include&"opencv2/imgproc/imgproc.hpp"#include&"opencv2/features2d/features2d.hpp"#include&"opencv2/highgui/highgui.hpp"#include&"opencv2/nonfree/features2d.hpp"#include&"opencv2/calib3d/calib3d.hpp"using&namespace&using&namespace&int&main(&int&argc,&char**&argv&){&&&&&&&&Mat&img_1&;&&&&Mat&img_2&;&&&&Mat&img_raw_1&=&imread("c1.bmp");&&&&Mat&img_raw_2&=&imread("c3.bmp");&&&&cvtColor(img_raw_1,img_1,CV_BGR2GRAY);&&&&cvtColor(img_raw_2,img_2,CV_BGR2GRAY);&&&&//--&Step&1:&使用SURF识别出特征点&&&&int&minHessian&=&400;&&&&SurfFeatureDetector&detector(&minHessian&);&&&&std::vector&KeyPoint&&keypoints_1,&keypoints_2;&&&&detector.detect(&img_1,&keypoints_1&);&&&&detector.detect(&img_2,&keypoints_2&);&&&&//--&Step&2:&描述SURF特征&&&&SurfDescriptorExtractor&&&&&Mat&descriptors_1,&descriptors_2;&&&&pute(&img_1,&keypoints_1,&descriptors_1&);&&&&pute(&img_2,&keypoints_2,&descriptors_2&);&&&&//--&Step&3:&匹配&&&&FlannBasedMatcher&//BFMatcher为强制匹配&&&&std::vector&&DMatch&&&&&&&matcher.match(&descriptors_1,&descriptors_2,&matches&);&&&&//取最大最小距离&&&&double&max_dist&=&0;&double&min_dist&=&100;&&&&for(&int&i&=&0;&i&&&descriptors_1.&i++&)&&&&{&&&&&&&&&double&dist&=&matches[i].&&&&&&&&if(&dist&&&min_dist&)&min_dist&=&&&&&&&&&if(&dist&&&max_dist&)&max_dist&=&&&&&}&&&&std::vector&&DMatch&&&good_&&&&for(&int&i&=&0;&i&&&descriptors_1.&i++&)&&&&{&&&&&&&&&if(&matches[i].distance&&=&3*min_dist&)//这里的阈值选择了3倍的min_dist&&&&&&&&{&&&&&&&&&&&&&good_matches.push_back(&matches[i]);&&&&&&&&&}&&&&}&&&&//--&Localize&the&object&from&img_1&in&img_2&&&&std::vector&Point2f&&&&&&std::vector&Point2f&&&&&&for(&int&i&=&0;&i&&&(int)good_matches.size();&i++&)&&&&{&&&&&&&&&&&&//这里采用&帧向拼接图像中添加的方法&,因此左边的是scene,右边的是obj&&&&&&&&scene.push_back(&keypoints_1[&good_matches[i].queryIdx&].pt&);&&&&&&&&obj.push_back(&keypoints_2[&good_matches[i].trainIdx&].pt&);&&&&}&&&&//直接调用ransac,计算单应矩阵&&&&Mat&H&=&findHomography(&obj,&scene,&CV_RANSAC&);&&&&//图像对准&&&&Mat&&&&&Mat&&//保存的是新帧经过单应矩阵变换以后的图像&&&&warpPerspective(img_raw_2,result,H,Size(2*img_2.cols,img_2.rows));&&&&result.copyTo(resultback);&&&&Mat&half(result,cv::Rect(0,0,img_2.cols,img_2.rows));&&&&img_raw_1.copyTo(half);&&&&imshow("ajust",result);&&&&//渐入渐出融合&&&&Mat&result_linerblend&=&result.clone();&&&&&double&dblend&=&0.0;&&&&&int&ioffset&=img_2.cols-100;&&&&&for&(int&i&=&0;i&100;i++)&&&&&{&&&&&&&&&&&&&&&&&&&&&&&result_linerblend.col(ioffset+i)&=&result.col(ioffset+i)*(1-dblend)&+&resultback.col(ioffset+i)*&&&&&&&&&dblend&=&dblend&+0.01;&&&&}&&&&imshow("result_linerblend",result_linerblend);&&&&//最大值法融合&&&&Mat&result_maxvalue&=&result.clone();&&&&for&(int&i&=&0;i&img_2.i++)&&&&{&&&&&&&&&&&&&for&(int&j=0;j&100;j++)&&&&&&&&{&&&&&&&&&&&&int&iresult=&result.at&Vec3b&(i,ioffset+j)[0]+&result.at&Vec3b&(i,ioffset+j)[1]+&result.at&Vec3b&(i,ioffset+j)[2];&&&&&&&&&&&&int&iresultback&=&resultback.at&Vec3b&(i,ioffset+j)[0]+&resultback.at&Vec3b&(i,ioffset+j)[1]+&resultback.at&Vec3b&(i,ioffset+j)[2];&&&&&&&&&&&&if&(iresultback&&iresult)&&&&&&&&&&&&{&&&&&&&&&&&&&&&&result_maxvalue.at&Vec3b&(i,ioffset+j)&=&resultback.at&Vec3b&(i,ioffset+j);&&&&&&&&&&&&}&&&&&&&&}&&&&}&&&&imshow("result_maxvalue",result_maxvalue);&&&&//带阈值的加权平滑处理&&&&Mat&result_advance&=&result.clone();&&&&for&(int&i&=&0;i&img_2.i++)&&&&{&&&&&&&&&&for&(int&j&=&0;j&33;j++)&&&&&&&&{&&&&&&&&&&&&&&&int&iimg1=&result.at&Vec3b&(i,ioffset+j)[0]+&result.at&Vec3b&(i,ioffset+j)[1]+&result.at&Vec3b&(i,ioffset+j)[2];&&&&&&&&&&&&//int&iimg2=&resultback.at&Vec3b&(i,ioffset+j)[0]+&resultback.at&Vec3b&(i,ioffset+j)[1]+&resultback.at&Vec3b&(i,ioffset+j)[2];&&&&&&&&&&&&int&ilinerblend&=&result_linerblend.at&Vec3b&(i,ioffset+j)[0]+&result_linerblend.at&Vec3b&(i,ioffset+j)[1]+&result_linerblend.at&Vec3b&(i,ioffset+j)[2];&&&&&&&&&&&&if&(abs(iimg1&-&ilinerblend)&3)&&&&&&&&&&&&{&&&&&&&&&&&&&&&&result_advance.at&Vec3b&(i,ioffset+j)&=&result_linerblend.at&Vec3b&(i,ioffset+j);&&&&&&&&&&&&}&&&&&&&&}&&&&}&&&&for&(int&i&=&0;i&img_2.i++)&&&&{&&&&&&&&&&for&(int&j&=&33;j&66;j++)&&&&&&&&{&&&&&&&&&&&&&&&int&iimg1=&result.at&Vec3b&(i,ioffset+j)[0]+&result.at&Vec3b&(i,ioffset+j)[1]+&result.at&Vec3b&(i,ioffset+j)[2];&&&&&&&&&&&&int&iimg2=&resultback.at&Vec3b&(i,ioffset+j)[0]+&resultback.at&Vec3b&(i,ioffset+j)[1]+&resultback.at&Vec3b&(i,ioffset+j)[2];&&&&&&&&&&&&int&ilinerblend&=&result_linerblend.at&Vec3b&(i,ioffset+j)[0]+&result_linerblend.at&Vec3b&(i,ioffset+j)[1]+&result_linerblend.at&Vec3b&(i,ioffset+j)[2];&&&&&&&&&&&&if&(abs(max(iimg1,iimg2)&-&ilinerblend)&3)&&&&&&&&&&&&{&&&&&&&&&&&&&&&&result_advance.at&Vec3b&(i,ioffset+j)&=&result_linerblend.at&Vec3b&(i,ioffset+j);&&&&&&&&&&&&}&&&&&&&&&&&&else&if&(iimg2&iimg1)&&&&&&&&&&&&{&&&&&&&&&&&&&&&&result_advance.at&Vec3b&(i,ioffset+j)&=&resultback.at&Vec3b&(i,ioffset+j);&&&&&&&&&&&&}&&&&&&&&}&&&&}&&&&for&(int&i&=&0;i&img_2.i++)&&&&{&&&&&&&&&&for&(int&j&=&66;j&100;j++)&&&&&&&&{&&&&&&&&&&&&&&&//int&iimg1=&result.at&Vec3b&(i,ioffset+j)[0]+&result.at&Vec3b&(i,ioffset+j)[1]+&result.at&Vec3b&(i,ioffset+j)[2];&&&&&&&&&&&&int&iimg2=&resultback.at&Vec3b&(i,ioffset+j)[0]+&resultback.at&Vec3b&(i,ioffset+j)[1]+&resultback.at&Vec3b&(i,ioffset+j)[2];&&&&&&&&&&&&int&ilinerblend&=&result_linerblend.at&Vec3b&(i,ioffset+j)[0]+&result_linerblend.at&Vec3b&(i,ioffset+j)[1]+&result_linerblend.at&Vec3b&(i,ioffset+j)[2];&&&&&&&&&&&&if&(abs(iimg2&-&ilinerblend)&3)&&&&&&&&&&&&{&&&&&&&&&&&&&&&&result_advance.at&Vec3b&(i,ioffset+j)&=&result_linerblend.at&Vec3b&(i,ioffset+j);&&&&&&&&&&&&}&&&&&&&&&&&&else&&&&&&&&&&&&{&&&&&&&&&&&&&&&&result_advance.at&Vec3b&(i,ioffset+j)&=&resultback.at&Vec3b&(i,ioffset+j);&&&&&&&&&&&&}&&&&&&&&}&&&&}&&&&imshow("result_advance",result_advance);&&&&waitKey(0);&&&&return&0;}
目前看来,maxvalue是最好的融合方法,但是和论文中提到的一样,此类图片不能很好地体现融合算法的特点,为此我也拍摄了和论文中类似的图片。发现想拍摄质量较好的图片,还是需要一定的硬件和技巧的。因此,软件和硬件,在使用的过程中应该结合起来。
此外,使用文中图片,效果如下
换一组图片,可以发现不同的结果
相比较而言,还是linerblend能够保持不错的质量,而具体到底采取哪种拼接的方式,必须根据实际情况来选择。
三、多图连续融合拼接
& & & & 前面处理的是2图的例子,至少将这种情况推广到3图,这样才能够得到统一处理的经验。
& & & & 连续图像处理,不仅仅是在已经处理好的图像上面再添加一幅图,其中比较关键的一点就是如何来处理已经拼接好的图像。
那么,m2也就是H.at&char&(0,2)就是水平位移。但是在实际使用中,始终无法正确取得这个值
Mat&outImage&=H.clone();&&&&uchar*&outData=outImage.ptr&uchar&(0);&&&&int&itemp&=&outData[2];&&&&&//获得偏移&&&&line(result_linerblend,Point(result_linerblend.cols-itemp,0),Point(result_linerblend.cols-itemp,img_2.rows),Scalar(255,255,255),2);&&&&imshow("result_linerblend",result_linerblend);
只好采取编写专门代码的方法进行处理
//获取已经处理图像的边界&&&&Mat&matmask&=&result_linerblend.clone();&&&&int&idaterow0&=&0;int&idaterowend&=&0;//标识了最上面和最小面第一个不为0的树,这里采用的是宽度减去的算法&&&&for(int&j=matmask.cols-1;j&=0;j--)&&&&{&&&&&&&&&&&&&&&&&&if&(matmask.at&Vec3b&(0,j)[0]&0)&&&&&&&&{&&&&&&&&&&&&idaterow0&=&j;&&&&&&&&&&&&break;&&&&&&&&}&&&&}&&&&&for(int&j=matmask.cols-1;j&=0;j--)&&&&{&&&&&&&&&&&&&&&&&&&&if&(matmask.at&Vec3b&(matmask.rows-1,j)[0]&0)&&&&&&&&{&&&&&&&&&&&&idaterowend&=&j;&&&&&&&&&&&&break;&&&&&&&&}&&&&}&&&&&&&&line(matmask,Point(min(idaterow0,idaterowend),0),Point(min(idaterow0,idaterowend),img_2.rows),Scalar(255,255,255),2);&&&&imshow("result_linerblend",matmask);
效果良好稳定.目前的实现是将白线以左的区域切割下来进行拼接。
基于此,编写3图拼接,效果如下。目前的图像质量,在差值上面可能还需要增强,下一步处理
//&blend_series.cpp&:&多图拼接//#include&"stdafx.h"#include&&iostream&#include&"opencv2/core/core.hpp"#include&"opencv2/imgproc/imgproc.hpp"#include&"opencv2/features2d/features2d.hpp"#include&"opencv2/highgui/highgui.hpp"#include&"opencv2/nonfree/features2d.hpp"#include&"opencv2/calib3d/calib3d.hpp"using&namespace&using&namespace&int&main(&int&argc,&char**&argv&){&&&&Mat&img_1&;&&&&Mat&img_2&;&&&&Mat&img_raw_1&=&imread("Univ3.jpg");&&&&Mat&img_raw_2&=&imread("Univ2.jpg");&&&&cvtColor(img_raw_1,img_1,CV_BGR2GRAY);&&&&cvtColor(img_raw_2,img_2,CV_BGR2GRAY);&&&&//--&Step&1:&使用SURF识别出特征点&&&&int&minHessian&=&400;&&&&SurfFeatureDetector&detector(&minHessian&);&&&&std::vector&KeyPoint&&keypoints_1,&keypoints_2;&&&&detector.detect(&img_1,&keypoints_1&);&&&&detector.detect(&img_2,&keypoints_2&);&&&&//--&Step&2:&描述SURF特征&&&&SurfDescriptorExtractor&&&&&Mat&descriptors_1,&descriptors_2;&&&&pute(&img_1,&keypoints_1,&descriptors_1&);&&&&pute(&img_2,&keypoints_2,&descriptors_2&);&&&&//--&Step&3:&匹配&&&&FlannBasedMatcher&//BFMatcher为强制匹配&&&&std::vector&&DMatch&&&&&&&matcher.match(&descriptors_1,&descriptors_2,&matches&);&&&&//取最大最小距离&&&&double&max_dist&=&0;&double&min_dist&=&100;&&&&for(&int&i&=&0;&i&&&descriptors_1.&i++&)&&&&{&&&&&&&&&double&dist&=&matches[i].&&&&&&&&if(&dist&&&min_dist&)&min_dist&=&&&&&&&&&if(&dist&&&max_dist&)&max_dist&=&&&&&}&&&&std::vector&&DMatch&&&good_&&&&for(&int&i&=&0;&i&&&descriptors_1.&i++&)&&&&{&&&&&&&&&if(&matches[i].distance&&=&3*min_dist&)//这里的阈值选择了3倍的min_dist&&&&&&&&{&&&&&&&&&&&&&good_matches.push_back(&matches[i]);&&&&&&&&&}&&&&}&&&&//--&Localize&the&object&from&img_1&in&img_2&&&&std::vector&Point2f&&&&&&std::vector&Point2f&&&&&&for(&int&i&=&0;&i&&&(int)good_matches.size();&i++&)&&&&{&&&&&&&&&&&&//这里采用&帧向拼接图像中添加的方法&,因此左边的是scene,右边的是obj&&&&&&&&scene.push_back(&keypoints_1[&good_matches[i].queryIdx&].pt&);&&&&&&&&obj.push_back(&keypoints_2[&good_matches[i].trainIdx&].pt&);&&&&}&&&&//直接调用ransac,计算单应矩阵&&&&Mat&H&=&findHomography(&obj,&scene,&CV_RANSAC&);&&&&//图像对准&&&&Mat&&&&&Mat&&//保存的是新帧经过单应矩阵变换以后的图像&&&&warpPerspective(img_raw_2,result,H,Size(2*img_2.cols,img_2.rows));&&&&result.copyTo(resultback);&&&&Mat&half(result,cv::Rect(0,0,img_2.cols,img_2.rows));&&&&img_raw_1.copyTo(half);&&&&//imshow("ajust",result);&&&&//渐入渐出融合&&&&Mat&result_linerblend&=&result.clone();&&&&double&dblend&=&0.0;&&&&int&ioffset&=img_2.cols-100;&&&&for&(int&i&=&0;i&100;i++)&&&&{&&&&&&&&&&&&&&&&&&&&&&result_linerblend.col(ioffset+i)&=&result.col(ioffset+i)*(1-dblend)&+&resultback.col(ioffset+i)*&&&&&&&&dblend&=&dblend&+0.01;&&&&}&&&&//获取已经处理图像的边界&&&&Mat&matmask&=&result_linerblend.clone();&&&&int&idaterow0&=&0;int&idaterowend&=&0;//标识了最上面和最小面第一个不为0的树,这里采用的是宽度减去的算法&&&&for(int&j=matmask.cols-1;j&=0;j--)&&&&{&&&&&&&&&&&&&&&&&&if&(matmask.at&Vec3b&(0,j)[0]&0)&&&&&&&&{&&&&&&&&&&&&idaterow0&=&j;&&&&&&&&&&&&break;&&&&&&&&}&&&&}&&&&&for(int&j=matmask.cols-1;j&=0;j--)&&&&{&&&&&&&&&&&&&&&&&&&&if&(matmask.at&Vec3b&(matmask.rows-1,j)[0]&0)&&&&&&&&{&&&&&&&&&&&&idaterowend&=&j;&&&&&&&&&&&&break;&&&&&&&&}&&&&}&&&&&&&&line(matmask,Point(min(idaterow0,idaterowend),0),Point(min(idaterow0,idaterowend),img_2.rows),Scalar(255,255,255),2);&&&&imshow("result_linerblend",matmask);&&&&/////////////////---------------对结果图像继续处理---------------------------------/////////////////&&&&img_raw_1&=&result_linerblend(Rect(0,0,min(idaterow0,idaterowend),img_2.rows));&&&&img_raw_2&=&imread("Univ1.jpg");&&&&cvtColor(img_raw_1,img_1,CV_BGR2GRAY);&&&&cvtColor(img_raw_2,img_2,CV_BGR2GRAY);&&&&////--&Step&1:&使用SURF识别出特征点&&&&//&&&&SurfFeatureDetector&detector2(&minHessian&);&&&&keypoints_1.clear();&&&&keypoints_2.clear();&&&&detector2.detect(&img_1,&keypoints_1&);&&&&detector2.detect(&img_2,&keypoints_2&);&&&&//--&Step&2:&描述SURF特征&&&&SurfDescriptorExtractor&extractor2;&&&&pute(&img_1,&keypoints_1,&descriptors_1&);&&&&pute(&img_2,&keypoints_2,&descriptors_2&);&&&&//--&Step&3:&匹配&&&&FlannBasedMatcher&matcher2;//BFMatcher为强制匹配&&&&matcher2.match(&descriptors_1,&descriptors_2,&matches&);&&&&//取最大最小距离&&&&&max_dist&=&0;&&min_dist&=&100;&&&&for(&int&i&=&0;&i&&&descriptors_1.&i++&)&&&&{&&&&&&&&&double&dist&=&matches[i].&&&&&&&&if(&dist&&&min_dist&)&min_dist&=&&&&&&&&&if(&dist&&&max_dist&)&max_dist&=&&&&&}&&&&good_matches.clear();&&&&for(&int&i&=&0;&i&&&descriptors_1.&i++&)&&&&{&&&&&&&&&if(&matches[i].distance&&=&3*min_dist&)//这里的阈值选择了3倍的min_dist&&&&&&&&{&&&&&&&&&&&&&good_matches.push_back(&matches[i]);&&&&&&&&&}&&&&}&&&&//--&Localize&the&object&from&img_1&in&img_2&&&&obj.clear();&&&&scene.clear();&&&&for(&int&i&=&0;&i&&&(int)good_matches.size();&i++&)&&&&{&&&&&&&&&&&&//这里采用&帧向拼接图像中添加的方法&,因此左边的是scene,右边的是obj&&&&&&&&scene.push_back(&keypoints_1[&good_matches[i].queryIdx&].pt&);&&&&&&&&obj.push_back(&keypoints_2[&good_matches[i].trainIdx&].pt&);&&&&}&&&&//直接调用ransac,计算单应矩阵&&&&&H&=&findHomography(&obj,&scene,&CV_RANSAC&);&&&&//图像对准&&&&warpPerspective(img_raw_2,result,H,Size(img_1.cols+img_2.cols,img_2.rows));&&&&result.copyTo(resultback);&&&&Mat&half2(result,cv::Rect(0,0,img_1.cols,img_1.rows));&&&&img_raw_1.copyTo(half2);&&&&imshow("ajust",result);&&&&//渐入渐出融合&&&&result_linerblend&=&result.clone();&&&&&dblend&=&0.0;&&&&&ioffset&=img_1.cols-100;&&&&for&(int&i&=&0;i&100;i++)&&&&{&&&&&&&&&&&&&&&&&&&&&&result_linerblend.col(ioffset+i)&=&result.col(ioffset+i)*(1-dblend)&+&resultback.col(ioffset+i)*&&&&&&&&dblend&=&dblend&+0.01;&&&&}&&&&imshow("result_linerblend",result_linerblend);&&&&waitKey(0);&&&&return&0;}
复制粘贴,实现5图拼接。这个时候发现,3图往往是一个极限值(这也可能就是为什么opencv里面的例子提供的是3图),当第四图出现的时候,其单应效果非常差
为什么会出现这种情况,反思后认识到,论文中采用的是平面坐标,也就是所有的图片都是基本位于一个平面上的,这一点特别通过她后面的那个罗技摄像头的部署能够看出来。但是在现实中,更常见的情况是人站在中间,360度地拍摄,这个时候需要采用柱面坐标系,也就是一开始对于图像要进行相关处理,也就是所谓的柱状投影。
可以得到这样的效果,这个效果是否正确还有待商榷,但是基于此的确可以更进一步地做东西了。
//&column_transoform.cpp&:&桶装投影//#include&"stdafx.h"#include&&iostream&#include&"opencv2/core/core.hpp"#include&"opencv2/imgproc/imgproc.hpp"#include&"opencv2/features2d/features2d.hpp"#include&"opencv2/highgui/highgui.hpp"#include&"opencv2/nonfree/features2d.hpp"#include&"opencv2/calib3d/calib3d.hpp"using&namespace&using&namespace&#define&&PI&3.14159&int&main(&int&argc,&char**&argv&){&&&&Mat&img_1&=&imread(&"Univ1.jpg");&&&&Mat&img_result&=&img_1.clone();&&&&for(int&i=0;i&img_result.i++)&&&&{&&&&&&&&for(int&j=0;j&img_result.j++)&&&&&&&&{&&&&&&&&&&&&&&&&&img_result.at&Vec3b&(i,j)=0;&&&&&&&&}&&&&}&&&&&&&&int&W&=&img_1.&&&&int&H&=&img_1.&&&&float&r&=&W/(2*tan(PI/6));&&&&float&k&=&0;&&&&float&fx=0;&&&&float&fy=0;&&&&for(int&i=0;i&img_1.i++)&&&&{&&&&&&&&for(int&j=0;j&img_1.j++)&&&&&&&&{&&&&&&&&&&&&&&&&&k&=&sqrt((float)(r*r+(W/2-j)*(W/2-j)));&&&&&&&&&&&&fx&=&r*sin(PI/6)+r*sin(atan((j&-W/2&)/r));&&&&&&&&&&&&fy&=&H/2&+r*(i-H/2)/k;&&&&&&&&&&&&int&ix&=&(int)&&&&&&&&&&&&int&iy&=&(int)&&&&&&&&&&&&if&(ix&W&&ix&=0&&iy&H&&iy&=0)&&&&&&&&&&&&{&&&&&&&&&&&&&&&&img_result.at&Vec3b&(iy,ix)=&img_1.at&Vec3b&(i,j);&&&&&&&&&&&&&&&&&&&&&&&&&&&&&}&&&&&&&&&&&&&&&&&&&&}&&&&}&&&&&&&&imshow(&"桶状投影",&img_1&);&&&&imshow("img_result",img_result);&&&&waitKey(0);&&&&return&0;}
效果依然是不佳,看来在这个地方,不仅仅是做一个桶形变换那么简单,一定有定量的参数在里面,也可能是我的变换写错了。这个下一步研究。
【未完待续】
版权所有 爱编程 (C) Copyright 2012. . All Rights Reserved.
闽ICP备号-3
微信扫一扫关注爱编程,每天为您推送一篇经典技术文章。

我要回帖

更多关于 runtime error 的文章

 

随机推荐