-1

也許問題是,我不擅長英語。 我是openCV的新手。我想知道拼接器合併的區域。這樣↓全景圖拼接算法

image

merged image

+1

請提供更多詳情。 – Piglet

+0

查看[Wiki:autostitch](https://en.wikipedia.org/wiki/AutoStitch)它是如何工作的。當您運行拼接算法時,您將獲得每個圖像的變換矩陣,以便將其轉換爲多邊形,並將它們的相交/重疊計算爲幾何問題。 – Spektre

回答

0

如果你知道在你的拍攝圖像,您可以按照此代碼爲您的拼接圖像一起順序。如果訂單未知,那麼解決方案會變得更加複雜。此外,此代碼是爲相同大小的圖像而設計的,如果您的相機被移動了,它可能會導致一些錯誤的結果。實施一些檢查以便正確理解。您可以參考這篇文章「http://ramsrigoutham.com/2012/11/22/panorama-image-stitching-in-opencv/」,以更好地理解主要調用兩次的拼接功能。

#include <stdio.h> 
#include <iostream> 
#include "opencv2/core/core.hpp" 
#include "opencv2/features2d/features2d.hpp" 
#include "opencv2/highgui/highgui.hpp" 
#include "opencv2/nonfree/nonfree.hpp" 
#include "opencv2/calib3d/calib3d.hpp" 
#include "opencv2/imgproc/imgproc.hpp" 
using namespace cv; 
void stitching(cv::Mat&,cv::Mat& ,cv::Mat&); 
int main() 
{   
Mat image1= imread("image1.jpg"); 
Mat image2= imread("image2.jpg"); 
Mat image3= imread("image3.jpg"); 
Mat gray_image1; 
Mat gray_image2; 
Mat gray_image3; 
Mat result1,result2; 
// Convert to Grayscale 
cvtColor(image1, gray_image1, CV_RGB2GRAY); 
cvtColor(image2, gray_image2, CV_RGB2GRAY); 
cvtColor(image3, gray_image3, CV_RGB2GRAY); 

stitching(gray_image1,gray_image2,result1); 
stitching(result1,gray_image3,result2); 
cv::imshow("stitched image"result2); 
cv::WaitKey(0); 


} 


    void stitching(cv::Mat& im1,cv::Mat& im2,cv::Mat& stitch_im) 
    { 
int minHessian = 400; 

SurfFeatureDetector detector(minHessian); 

std::vector<KeyPoint> keypoints_object, keypoints_scene; 

detector.detect(im1, keypoints_object); 
detector.detect(im2, keypoints_scene); 

SurfDescriptorExtractor extractor; 

Mat descriptors_object, descriptors_scene; 

extractor.compute(im1, keypoints_object, descriptors_object); 
extractor.compute(im2, keypoints_scene, descriptors_scene); 

FlannBasedMatcher matcher; 
std::vector<DMatch> matches; 
matcher.match(descriptors_object, descriptors_scene, matches); 

double max_dist = 0; double min_dist = 100; 

for(int i = 0; i < descriptors_object.rows; i++) 
{ double dist = matches[i].distance; 
if(dist < min_dist) min_dist = dist; 
if(dist > max_dist) max_dist = dist; 
} 

std::vector<DMatch> good_matches; 

for(int i = 0; i < descriptors_object.rows; i++) 
{ if(matches[i].distance < 3*min_dist) 
{ good_matches.push_back(matches[i]); } 
} 
std::vector<Point2f> obj; 
std::vector<Point2f> scene; 

for(int i = 0; i < good_matches.size(); i++) 
{ 

obj.push_back(keypoints_object[ good_matches[i].queryIdx ].pt); 
scene.push_back(keypoints_scene[ good_matches[i].trainIdx ].pt); 
} 


Mat H = findHomography(obj, scene, CV_RANSAC); 

cv::Mat result; 
warpPerspective(im1,stitch_im,H,cv::Size(im1.cols+im2.cols,im1.rows)); 



}