2016-03-15 69 views
1

我目前正在對使用Lacatan香蕉一個項目,我想前景知道如何進一步前景從背景中分離:分割從背景

我已經使用侵蝕,擴張和閾值分割圖像。問題在於它仍然沒有被恰當地分割。

這是我的代碼:

cv::Mat imggray, imgthresh, fg, bgt, bg; 
    cv::cvtColor(src, imggray, CV_BGR2GRAY); //Grayscaling the image from RGB color space 
    cv::threshold(imggray, imgthresh, 0, 255, CV_THRESH_BINARY_INV | CV_THRESH_OTSU); //Create an inverted binary image from the grayscaled image 
    cv::erode(imgthresh, fg, cv::Mat(), cv::Point(-1, -1), 1); //erosion of the binary image and setting it as the foreground 
    cv::dilate(imgthresh, bgt, cv::Mat(), cv::Point(-1, -1), 4); //dilation of the binary image to reduce the background region 
    cv::threshold(bgt, bg, 1, 128, CV_THRESH_BINARY); //we get the background by setting the threshold to 1 
    cv::Mat markers = cv::Mat::zeros(src.size(), CV_32SC1); //initializing the markers with a size same as the source image and setting its data type as 32-bit Single channel 
    cv::add(fg, bg, markers); //setting the foreground and background as markers                 
    cv::Mat mask = cv::Mat::zeros(markers.size(), CV_8UC1); 
    markers.convertTo(mask, CV_8UC1); //converting the 32-bit single channel marker to a 8-bit single channel 
    cv::Mat mthresh; 
    cv::threshold(mask, mthresh, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU); //threshold further the mask to reduce the noise 
    // cv::erode(mthresh,mthresh,cv::Mat(), cv::Point(-1,-1),2); 
    cv::Mat result; 
    cv::bitwise_and(src, src, result, mthresh); //use the mask to subtrack the banana from the background 
    for (int x = 0; x < result.rows; x++) { //changing the black background to white 
     for (int y = 0; y < result.cols; y++) { 
      if (result.at<Vec3b>(x, y) == Vec3b(0, 0, 0)){ 
       result.at<Vec3b>(x, y)[0] = 255; 
       result.at<Vec3b>(x, y)[1] = 255; 
       result.at<Vec3b>(x, y)[2] = 255; 
      } 
     } 
    } 

這是我的結果:

+0

這裏是結果圖像[鏈接](http://imgur.com/KG619PS)我還不很熟悉的分割如何工作的,請多多包涵:) –

+0

還我應該使用輪廓或任何其他技術呢? –

+0

這個圖像看起來像是一個簡單的例子。您是否嘗試過一個簡單的二進制閾值,並在結合Otsu過濾器之前使用閾值進行播放? – SpamBot

回答

0

如背景是接近灰色色,嘗試使用代替灰度圖像的色調信道和飽和度的信道。
你可以很容易地得到它們。

cv::Mat hsv; 
cv::cvtColor(src, hsv, CV_BGR2HSV); 
std::vector<cv::Mat> channels; 
cv::split(src, channels); 

cv::Mat hue = channels[0]; 
cv::Mat saturation = channels[1]; 

// If you want to combine those channels, use this code. 
cv::Mat hs = cv::Mat::zeros(src.size(), CV_8U); 
for(int r=0; r<src.rows; r++) { 
    for(int c=0; c<src.cols; c++) { 
     int hp = h.at<uchar>(r,c); 
     int sp = s.at<uchar>(r,c); 
     hs.at<uchar>(r, c) = static_cast<uchar>((h+s)>>1); 
    } 
} 
+0

我可以問爲什麼在hsv?我問的很多人也說hsv色彩空間比較容易,但他們甚至沒告訴我理由,傷心。 –

+0

@HarveyC https://en.wikipedia.org/wiki/HSL_and_HSV看看這裏。總之,色調意味着「色彩方向」,飽和意味着「色彩強度」。香蕉多爲黃色,因此色相在香蕉區大多相似。 –

0

adaptiveThreshold()應該工作不僅僅是水平切閾值()更好,因爲它不考慮絕對的色彩層次,而是被檢查在圍繞該點的小區域顏色的變化。

嘗試用自適應替換您的閾值。

0

使用頂帽,而不僅僅是侵蝕/擴張。它將同時處理背景變化。

然後在你的情況下,簡單的閾值應該是足夠好有一個準確的分割。否則,你可以將它與分水嶺耦合。

(我會盡快與大家分享一些圖片)。

0

謝謝你們,我嘗試套用您的建議,並能想出這個

enter image description here

但是你可以看到仍存在的背景下位,任何想法如何「乾淨」這些進一步,我試圖進一步thresholding,但它仍然有位。我想到的代碼是在下面,我提前道歉,如果變量和編碼風格有點混亂沒有時間來正確排序它們。

#include <stdio.h> 
#include <iostream> 
#include <opencv2\core.hpp> 
#include <opencv2\opencv.hpp> 
#include <opencv2\highgui.hpp> 

using namespace cv; 
using namespace std; 

Mat COLOR_MAX(Scalar(65, 255, 255)); 
Mat COLOR_MIN(Scalar(15, 45, 45)); 


int main(int argc, char** argv){ 

Mat src,hsv_img,mask,gray_img,initial_thresh; 
Mat second_thresh,add_res,and_thresh,xor_thresh; 
Mat result_thresh,rr_thresh,final_thresh; 
// Load source Image 
src = imread("sample11.jpg"); 
imshow("Original Image", src); 
cvtColor(src,hsv_img,CV_BGR2HSV); 
imshow("HSV Image",hsv_img); 

//imwrite("HSV Image.jpg", hsv_img); 

inRange(hsv_img,COLOR_MIN,COLOR_MAX, mask); 
imshow("Mask Image",mask); 

cvtColor(src,gray_img,CV_BGR2GRAY); 
adaptiveThreshold(gray_img, initial_thresh, 255,ADAPTIVE_THRESH_GAUSSIAN_C,CV_THRESH_BINARY_INV,257,2); 
imshow("AdaptiveThresh Image", initial_thresh); 

add(mask,initial_thresh,add_res); 
erode(add_res, add_res, Mat(), Point(-1, -1), 1); 
dilate(add_res, add_res, Mat(), Point(-1, -1), 5); 
imshow("Bitwise Res",add_res); 

threshold(gray_img,second_thresh,170,255,CV_THRESH_BINARY_INV | CV_THRESH_OTSU); 
imshow("TreshImge", second_thresh); 

bitwise_and(add_res,second_thresh,and_thresh); 
imshow("andthresh",and_thresh); 

bitwise_xor(add_res, second_thresh, xor_thresh); 
imshow("xorthresh",xor_thresh); 

bitwise_or(and_thresh,xor_thresh,result_thresh); 
imshow("Result image", result_thresh); 

bitwise_and(add_res,result_thresh,final_thresh); 
imshow("Final Thresh",final_thresh); 
erode(final_thresh, final_thresh, Mat(), Point(-1,-1),5); 

bitwise_and(src,src,rr_thresh,final_thresh); 
imshow("Segmented Image", rr_thresh); 
imwrite("Segmented Image.jpg", rr_thresh); 

waitKey(0); 
return 1; 
}