我已經遇到過類似的麻煩,試圖在OpenCV上顯示圖像上2D DFT的大小。我終於完成了一個工作實現。我不認爲它是偉大的代碼,它只能在256×256 8位(灰度)單通道圖像上測試 - lenna.png(它可能需要進行調整以適當填充其他圖像尺寸)。輸入圖像與重建圖像之間的均方誤差爲零,因此它的工作方式應該如此。 OpenCV v2.1是必需的。
文件:mstrInclude.h
#ifndef _MASTER_INCLUDE_
#define _MASTER_INCLUDE_
// Standard
#include <stdio.h>
#include <string>
#include <sstream>
#include <iostream>
#include <cmath>
using namespace std;
// OpenCV2.1
#include "cv.h"
#include "highgui.h"
using namespace cv;
#define LOAD_IMAGE "lenna.png"
#endif // _MASTER_INCLUDE_
文件:main.cpp中
#include "mstrInclude.h"
void translateImg(Mat& imgIn, Mat& imgOut)
{
int i, j;
for (i = 0; i < imgIn.rows; i++)
for (j = 0; j < imgIn.cols; j++)
imgOut.at<double>(i,j) = imgIn.at<double>(i,j) * pow(-1.0, i+j);
}
void scaleImg(Mat& imgIn, Mat& imgOut, float scaleFactor)
{
int i, j;
for (i = 0; i < imgIn.rows; i++)
for (j = 0; j < imgIn.cols; j++)
imgOut.at<double>(i,j) = (double)scaleFactor * log(1.0 + imgIn.at<double>(i,j));
}
void consoleOut(cv::Mat outMat, int rows = 5, int cols = 5)
{
rows = ((rows == -1 || rows >= outMat.rows) ? outMat.rows : rows);
cols = ((cols == -1 || cols >= outMat.cols) ? outMat.cols : cols);
for(int i = 0; i < rows; i++)
{
for(int j = 0; j < cols; j++)
{
cout << outMat.at<double>(i, j);
cout << " ";
}
cout << endl;
}
}
double calcMSE(Mat& imgOrig, Mat& imgReconst)
{
int valOrig = 0, valReconst = 0;
double MSE = 0.0;
for(int i = 0; i < imgOrig.rows; i++)
{
for (int j = 0; j < imgOrig.cols; j++)
{
valOrig = imgOrig.at<unsigned char>(i, j);
valReconst = imgReconst.at<unsigned char>(i, j);
MSE += pow((double)(valOrig - valReconst), 2.0);
}
}
return (MSE/(imgOrig.rows * imgOrig.cols));
}
string convertInt(int number) // converts integer to string
{
stringstream ss;
ss << number;
return ss.str();
}
int main(unsigned int argc, char* const argv[])
{
int dftH, dftW;
cv::Mat imgIn;
imgIn = cv::imread(LOAD_IMAGE, 0); //grayscale
cv::imshow("Original Image", imgIn);
waitKey();
dftH = cv::getOptimalDFTSize(imgIn.rows);
dftW = cv::getOptimalDFTSize(imgIn.cols);
Mat imgMod;
Mat imgPrecFFT(dftH, dftW, CV_64FC1, Scalar::all(0));
imgIn.convertTo(imgMod, CV_64FC1);
imgPrecFFT = imgMod(cv::Range::all(), cv::Range::all()).clone();
// translate image
std::vector<Mat> imgsTrans;
imgsTrans.push_back(Mat_<double>(imgIn.size(), CV_64FC1));
imgsTrans.push_back(Mat_<double>(imgIn.size(), CV_64FC1));
imgsTrans[1].setTo(Scalar::all(0), Mat());
translateImg(imgPrecFFT, imgsTrans[0]);
Mat imgPrecTransFFT(imgIn.size(), CV_64FC2, Scalar::all(0));
cv::merge(imgsTrans, imgPrecTransFFT);
// dft
cv::Mat imgFFT;
dft(imgPrecTransFFT, imgFFT, DFT_COMPLEX_OUTPUT);
cv::Mat imgDispFFT;
// calculate magnitude
Mat imgMagnitude(imgIn.size(), CV_64FC1);
std::vector<Mat> chans;
cv::split(imgFFT, chans);
cv::magnitude(chans[0], chans[1], imgMagnitude);
// scale magnitude image
Mat imgMagnitudeScaled(imgIn.size(), CV_64FC1);
scaleImg(imgMagnitude, imgMagnitudeScaled, 10.0);
// display magnitude image
cv::Mat imgDisp;
cv::convertScaleAbs(imgMagnitudeScaled, imgDisp);
imshow("Magnitude Output", imgDisp);
waitKey();
// inverse dft
cv::split(imgFFT, chans);
chans[1].zeros(imgIn.size(), CV_64FC1);
cv::merge(chans, imgFFT);
cv::Mat invFFT;
cv::idft(imgFFT, invFFT, DFT_REAL_OUTPUT + DFT_SCALE);
// translate image back to original location
cv::split(invFFT, imgsTrans);
Mat imgAfterTrans(imgIn.size(), CV_64FC1);
translateImg(imgsTrans[0], imgAfterTrans);
imgAfterTrans.convertTo(imgDisp, CV_8UC1);
imshow("After Inverse Output", imgDisp);
waitKey();
// calculate and output mean-squared error between input/output images
double MSE = calcMSE(imgIn, imgDisp);
cout<<endl<<"MSE: "<<MSE<<endl;
waitKey();
return 0;
}
當然,這不是輸入圖像的相位。由於你的圖像只有黑白兩色 - 當它應該有一個灰度值範圍時 - 我的猜測是你在轉換時出現了一些錯誤。 – 2010-06-13 23:13:30