Learning OpenCV Lecture 4 (Transforming Images with Morphological Operations)
生活随笔
收集整理的這篇文章主要介紹了
Learning OpenCV Lecture 4 (Transforming Images with Morphological Operations)
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
In this chapter, we will cover:
- Eroding and dilating images using morphological filters
- Opening and closing images using morphological filters
- Detecting edges and corners using morphological filters
- Segmenting images using watersheds 分水嶺算法
- Extracting foreground objects with the GrabCut algorithm
results:
?
Detecting edges and corners using morphological filters morphoFeatures.h #if !defined MORPHOF #define MORPHOF#include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp>class MorphoFeatures { private:// threshold to produce binary imageint threshold;// structuring elements used in corner detectioncv::Mat cross;cv::Mat diamond;cv::Mat square;cv::Mat x;public:MorphoFeatures() : threshold(-1),cross(5, 5, CV_8U, cv::Scalar(0)),diamond(5, 5, CV_8U, cv::Scalar(0)),square(5, 5, CV_8U, cv::Scalar(0)),x(5, 5, CV_8U, cv::Scalar(0)) {// Creating the cross-shaped structuring elementfor (int i = 0; i < 5; i++) {cross.at<uchar>(2, i) = 1;cross.at<uchar>(i, 2) = 1;}// Creating the diamond-shaped structuring elementdiamond.at<uchar>(0, 0) = 0;diamond.at<uchar>(0, 1) = 0;diamond.at<uchar>(1, 0) = 0;diamond.at<uchar>(4, 4) = 0;diamond.at<uchar>(3, 4) = 0;diamond.at<uchar>(4, 3) = 0;diamond.at<uchar>(4, 0) = 0;diamond.at<uchar>(4, 1) = 0;diamond.at<uchar>(3, 0) = 0;diamond.at<uchar>(0, 4) = 0;diamond.at<uchar>(0, 3) = 0;diamond.at<uchar>(1, 4) = 0;// Creating the x-shaped structuring elementfor (int i = 0; i < 5; i++) {x.at<uchar>(i, i) = 1;x.at<uchar>(4 - i, i) = 1;}}void setThreshold(int t) {if (t > 0)threshold = t;}int getThreshold() const {return threshold;}cv::Mat getEdges(const cv::Mat &image) {// Get the gradient imagecv::Mat result;cv::morphologyEx(image, result, cv::MORPH_GRADIENT, cv::Mat());// Apply threshold to obtain a binary imageapplyThreshold(result);return result;}void applyThreshold(cv::Mat &result) {// Apply threshold on resultif (threshold > 0) {cv::threshold(result, result, threshold, 255, cv::THRESH_BINARY_INV);}}cv::Mat getCorners(const cv::Mat &image) {cv::Mat result;// Dilate with a crosscv::dilate(image, result, cross);// Erode with a diamondcv::erode(result, result, diamond);cv::Mat result2;// Dilate with a xcv::dilate(image, result2, x);// Erode with a squarecv::erode(result2, result2, square);// Corners are obtained by differencing// the two closed imagescv::absdiff(result2, result, result);// Apply threshold to obtain a binary imageapplyThreshold(result);return result;}void drawOnImage(const cv::Mat &binary, cv::Mat &image) {cv::Mat_<uchar>::const_iterator it = binary.begin<uchar>();cv::Mat_<uchar>::const_iterator itend = binary.end<uchar>();// for each pixelfor (int i = 0; it != itend; ++it, ++i) {if (!*it) {cv::circle(image,cv::Point(i%image.step, i/image.step),5, cv::Scalar(255, 0, 0));}}}};#endifmorph.cpp
#include <iostream>#include "morphoFeatures.h"int main() {cv::Mat image = cv::imread( "../building.jpg");cv::cvtColor(image, image, CV_BGR2GRAY);// Create the morphological features instanceMorphoFeatures morpho;morpho.setThreshold(40);// Get the edgescv::Mat edges;edges = morpho.getEdges(image);cv::namedWindow( "Edges Image", CV_WINDOW_AUTOSIZE);cv::imshow( "Edges Image", edges);// Get the cornerscv::Mat corners;corners = morpho.getCorners(image);// Display the corner on the imagemorpho.drawOnImage(corners, image);cv::namedWindow( "Corners on Image", CV_WINDOW_AUTOSIZE);cv::imshow( "Corners on Image", image);cv::waitKey(0);return 0;}results:
?
Segmenting images using watersheds watershedSegment.h #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp>class WatershedSegmenter { private:cv::Mat markers;public:void setMarkers(const cv::Mat &markerImage) {// Convert to image of intsmarkerImage.convertTo(markers, CV_32S);}cv::Mat process(const cv::Mat &image) {// Apply watershedcv::watershed(image, markers);return markers;}// Return result in the form of an imagecv::Mat getSegmentation() {cv::Mat tmp;// all segment with label higher than 255// will be assigned value 255markers.convertTo(tmp, CV_8U);return tmp;}// Return watershed in the form of an imagecv::Mat getWatersheds() {cv::Mat tmp;// Each pixel p is transform into// 255p + 255 befor conversionmarkers.convertTo(tmp, CV_8U, 255, 255);return tmp;} };// Read input imagecv::Mat image = cv::imread( "../group.jpg");if (!image.data) {return 0;}// Display the imagecv::namedWindow( "Original Image");cv::imshow( "Original Image", image);// Get the binary imagecv::Mat binary;binary = cv::imread( "../binary.bmp", 0);// Display the binary imagecv::namedWindow( "Binary Image");cv::imshow( "Binary Image", binary);// Eliminate noise and smaller objectscv::Mat fg;cv::erode(binary, fg, cv::Mat(), cv::Point(-1, -1), 6);// Display the foreground imagecv::namedWindow( "Foreground Image");cv::imshow( "Foreground Image", fg);
results:
// Identify image pixels without objectscv::Mat bg;cv::dilate(binary, bg, cv::Mat(), cv::Point(-1, -1), 6);cv::threshold(bg, bg, 1, 128, cv::THRESH_BINARY_INV);// Display the backgroud imagecv::namedWindow( "Background Image");cv::imshow( "Background Image", bg);results:
// Show markers imagecv::Mat markers(binary.size(), CV_8U, cv::Scalar(0));markers = fg + bg;cv::namedWindow( "Markers");cv::imshow( "Markers", markers);// Create watershed segmentation objectWatershedSegmenter segmenter;// Set markers and processsegmenter.setMarkers(markers);segmenter.process(image);// Display segmentation resultcv::namedWindow( "Segmentation");cv::imshow( "Segmentation", segmenter.getSegmentation());// Display watershedscv::namedWindow( "Watershed");cv::imshow( "Watershed", segmenter.getWatersheds());
// Open another image------------------------------------image = cv::imread( "../tower.jpg");// Identify background pixelscv::Mat imageMask(image.size(), CV_8U, cv::Scalar(0));cv::rectangle(imageMask, cv::Point(5, 5), cv::Point(image.cols - 5, image.rows - 5), cv::Scalar(255), 3);// Identify forground pixels (in the middle of the image)cv::rectangle(imageMask, cv::Point(image.cols / 2 - 10, image.rows / 2 - 10),cv::Point(image.cols / 2 + 10, image.rows / 2 + 10), cv::Scalar(1), 10);// Set markers and processsegmenter.setMarkers(imageMask);segmenter.process(image);// Display the image with markerscv::rectangle(image, cv::Point(5, 5), cv::Point(image.cols - 5, image.rows - 5), cv::Scalar(255, 255, 255), 3);cv::rectangle(image, cv::Point(image.cols / 2 - 10, image.rows / 2 - 10),cv::Point(image.cols / 2 + 10, image.rows / 2 + 10), cv::Scalar(1, 1, 1), 10);cv::namedWindow( "Image with marker");cv::imshow( "Image with marker", image);// Display watershedscv::namedWindow( "Watersheds of foreground object");cv::imshow( "Watersheds of foreground object", segmenter.getWatersheds());
results:
?
Extracting foreground objects with the GrabCut algorithm // Open another imageimage = cv::imread( "../tower.jpg");// define bounding rectangecv::Rect rectangle(50, 70, image.cols - 150, image.rows - 180);cv::Mat result; // segmentation result (4 possible values)cv::Mat bgModel, fgModel; // the models (internally used)// GrabCut segmentationcv::grabCut(image, // input imageresult, // segmentation resultrectangle, // rectangle containing foregroundbgModel, fgModel, // models1, //number of iterationscv::GC_INIT_WITH_RECT// use rectangle);// Get the pixles marked as likely foregroundcv::compare(result, cv::GC_PR_FGD, result, cv::CMP_EQ);// Generate output imagecv::Mat foreground(image.size(), CV_8UC3, cv::Scalar(255, 255, 255));image.copyTo(foreground, result); // bg pixels not copied// draw rectangle on original imagecv::rectangle(image, rectangle, cv::Scalar(255,255,255),1);cv::namedWindow( "Image");cv::imshow( "Image",image);// display resultcv::namedWindow( "Segmented Image");cv::imshow( "Segmented Image",foreground);// Open another imageimage= cv::imread("../group.jpg");// define bounding rectanglecv::Rect rectangle2(10,100,380,180);cv::Mat bkgModel,fgrModel; // the models (internally used)// GrabCut segmentationcv::grabCut(image, // input imageresult, // segmentation resultrectangle2,bkgModel,fgrModel,5,cv::GC_INIT_WITH_RECT);// Get the pixels marked as likely foreground// cv::compare(result,cv::GC_PR_FGD,result,cv::CMP_EQ);result= result&1;foreground.create(image.size(),CV_8UC3);foreground.setTo(cv::Scalar(255,255,255));image.copyTo(foreground,result); // bg pixels not copied// draw rectangle on original imagecv::rectangle(image, rectangle2, cv::Scalar(255,255,255),1);cv::namedWindow( "Image 2");cv::imshow( "Image 2",image);// display resultcv::namedWindow( "Foreground objects");cv::imshow( "Foreground objects",foreground);
轉載于:https://www.cnblogs.com/starlitnext/p/3861398.html
《新程序員》:云原生和全面數字化實踐50位技術專家共同創作,文字、視頻、音頻交互閱讀總結
以上是生活随笔為你收集整理的Learning OpenCV Lecture 4 (Transforming Images with Morphological Operations)的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 润记牌亚麻籽油750毫升价格?
- 下一篇: 清心汤什么时间煮种最好?