以相关组为基础的3D物体识别
這次我們要解釋如何以pcl_recognition模塊來進行3D物體識別。特別地,它解釋了怎么使用相關組算法為了聚類那些從3D描述器算法里面把當前的場景與模型進行匹配的相關點對點的匹配。(長難句)。對于每一次聚類,描繪了一個在場景中的可能模型實例,相關組算法也輸出標識6DOF位姿估計的轉換矩陣。
代碼
#include <pcl/io/pcd_io.h> #include <pcl/point_cloud.h> #include <pcl/correspondence.h> #include <pcl/features/normal_3d_omp.h> #include <pcl/features/shot_omp.h> #include <pcl/features/board.h> #include <pcl/filters/uniform_sampling.h> #include <pcl/recognition/cg/hough_3d.h> #include <pcl/recognition/cg/geometric_consistency.h> #include <pcl/visualization/pcl_visualizer.h> #include <pcl/kdtree/kdtree_flann.h> #include <pcl/kdtree/impl/kdtree_flann.hpp> #include <pcl/common/transforms.h> #include <pcl/console/parse.h>typedef pcl::PointXYZRGBA PointType; typedef pcl::Normal NormalType; typedef pcl::ReferenceFrame RFType; typedef pcl::SHOT352 DescriptorType;std::string model_filename_; std::string scene_filename_;//Algorithm params bool show_keypoints_ (false); bool show_correspondences_ (false); bool use_cloud_resolution_ (false); bool use_hough_ (true); float model_ss_ (0.01f); float scene_ss_ (0.03f); float rf_rad_ (0.015f); float descr_rad_ (0.02f); float cg_size_ (0.01f); float cg_thresh_ (5.0f);void showHelp (char *filename) {std::cout << std::endl;std::cout << "***************************************************************************" << std::endl;std::cout << "* *" << std::endl;std::cout << "* Correspondence Grouping Tutorial - Usage Guide *" << std::endl;std::cout << "* *" << std::endl;std::cout << "***************************************************************************" << std::endl << std::endl;std::cout << "Usage: " << filename << " model_filename.pcd scene_filename.pcd [Options]" << std::endl << std::endl;std::cout << "Options:" << std::endl;std::cout << " -h: Show this help." << std::endl;std::cout << " -k: Show used keypoints." << std::endl;std::cout << " -c: Show used correspondences." << std::endl;std::cout << " -r: Compute the model cloud resolution and multiply" << std::endl;std::cout << " each radius given by that value." << std::endl;std::cout << " --algorithm (Hough|GC): Clustering algorithm used (default Hough)." << std::endl;std::cout << " --model_ss val: Model uniform sampling radius (default 0.01)" << std::endl;std::cout << " --scene_ss val: Scene uniform sampling radius (default 0.03)" << std::endl;std::cout << " --rf_rad val: Reference frame radius (default 0.015)" << std::endl;std::cout << " --descr_rad val: Descriptor radius (default 0.02)" << std::endl;std::cout << " --cg_size val: Cluster size (default 0.01)" << std::endl;std::cout << " --cg_thresh val: Clustering threshold (default 5)" << std::endl << std::endl; }void parseCommandLine (int argc, char *argv[]) {//Show helpif (pcl::console::find_switch (argc, argv, "-h")){showHelp (argv[0]);exit (0);}//Model & scene filenamesstd::vector<int> filenames;filenames = pcl::console::parse_file_extension_argument (argc, argv, ".pcd");if (filenames.size () != 2){std::cout << "Filenames missing.\n";showHelp (argv[0]);exit (-1);}model_filename_ = argv[filenames[0]];scene_filename_ = argv[filenames[1]];//Program behaviorif (pcl::console::find_switch (argc, argv, "-k")){show_keypoints_ = true;}if (pcl::console::find_switch (argc, argv, "-c")){show_correspondences_ = true;}if (pcl::console::find_switch (argc, argv, "-r")){use_cloud_resolution_ = true;}std::string used_algorithm;if (pcl::console::parse_argument (argc, argv, "--algorithm", used_algorithm) != -1){if (used_algorithm.compare ("Hough") == 0){use_hough_ = true;}else if (used_algorithm.compare ("GC") == 0){use_hough_ = false;}else{std::cout << "Wrong algorithm name.\n";showHelp (argv[0]);exit (-1);}}//General parameterspcl::console::parse_argument (argc, argv, "--model_ss", model_ss_);pcl::console::parse_argument (argc, argv, "--scene_ss", scene_ss_);pcl::console::parse_argument (argc, argv, "--rf_rad", rf_rad_);pcl::console::parse_argument (argc, argv, "--descr_rad", descr_rad_);pcl::console::parse_argument (argc, argv, "--cg_size", cg_size_);pcl::console::parse_argument (argc, argv, "--cg_thresh", cg_thresh_); }double computeCloudResolution (const pcl::PointCloud<PointType>::ConstPtr &cloud) {double res = 0.0;int n_points = 0;int nres;std::vector<int> indices (2);std::vector<float> sqr_distances (2);pcl::search::KdTree<PointType> tree;tree.setInputCloud (cloud);for (size_t i = 0; i < cloud->size (); ++i){if (! pcl_isfinite ((*cloud)[i].x)){continue;}//Considering the second neighbor since the first is the point itself.nres = tree.nearestKSearch (i, 2, indices, sqr_distances);if (nres == 2){res += sqrt (sqr_distances[1]);++n_points;}}if (n_points != 0){res /= n_points;}return res; }int main (int argc, char *argv[]) {parseCommandLine (argc, argv);pcl::PointCloud<PointType>::Ptr model (new pcl::PointCloud<PointType> ());pcl::PointCloud<PointType>::Ptr model_keypoints (new pcl::PointCloud<PointType> ());pcl::PointCloud<PointType>::Ptr scene (new pcl::PointCloud<PointType> ());pcl::PointCloud<PointType>::Ptr scene_keypoints (new pcl::PointCloud<PointType> ());pcl::PointCloud<NormalType>::Ptr model_normals (new pcl::PointCloud<NormalType> ());pcl::PointCloud<NormalType>::Ptr scene_normals (new pcl::PointCloud<NormalType> ());pcl::PointCloud<DescriptorType>::Ptr model_descriptors (new pcl::PointCloud<DescriptorType> ());pcl::PointCloud<DescriptorType>::Ptr scene_descriptors (new pcl::PointCloud<DescriptorType> ());//// Load clouds//if (pcl::io::loadPCDFile (model_filename_, *model) < 0){std::cout << "Error loading model cloud." << std::endl;showHelp (argv[0]);return (-1);}if (pcl::io::loadPCDFile (scene_filename_, *scene) < 0){std::cout << "Error loading scene cloud." << std::endl;showHelp (argv[0]);return (-1);}//// Set up resolution invariance//if (use_cloud_resolution_){float resolution = static_cast<float> (computeCloudResolution (model));if (resolution != 0.0f){model_ss_ *= resolution;scene_ss_ *= resolution;rf_rad_ *= resolution;descr_rad_ *= resolution;cg_size_ *= resolution;}std::cout << "Model resolution: " << resolution << std::endl;std::cout << "Model sampling size: " << model_ss_ << std::endl;std::cout << "Scene sampling size: " << scene_ss_ << std::endl;std::cout << "LRF support radius: " << rf_rad_ << std::endl;std::cout << "SHOT descriptor radius: " << descr_rad_ << std::endl;std::cout << "Clustering bin size: " << cg_size_ << std::endl << std::endl;}//// Compute Normals//pcl::NormalEstimationOMP<PointType, NormalType> norm_est;norm_est.setKSearch (10);norm_est.setInputCloud (model);norm_est.compute (*model_normals);norm_est.setInputCloud (scene);norm_est.compute (*scene_normals);//// Downsample Clouds to Extract keypoints//pcl::UniformSampling<PointType> uniform_sampling;uniform_sampling.setInputCloud (model);uniform_sampling.setRadiusSearch (model_ss_);uniform_sampling.filter (*model_keypoints);std::cout << "Model total points: " << model->size () << "; Selected Keypoints: " << model_keypoints->size () << std::endl;uniform_sampling.setInputCloud (scene);uniform_sampling.setRadiusSearch (scene_ss_);uniform_sampling.filter (*scene_keypoints);std::cout << "Scene total points: " << scene->size () << "; Selected Keypoints: " << scene_keypoints->size () << std::endl;//// Compute Descriptor for keypoints//pcl::SHOTEstimationOMP<PointType, NormalType, DescriptorType> descr_est;descr_est.setRadiusSearch (descr_rad_);descr_est.setInputCloud (model_keypoints);descr_est.setInputNormals (model_normals);descr_est.setSearchSurface (model);descr_est.compute (*model_descriptors);descr_est.setInputCloud (scene_keypoints);descr_est.setInputNormals (scene_normals);descr_est.setSearchSurface (scene);descr_est.compute (*scene_descriptors);//// Find Model-Scene Correspondences with KdTree//pcl::CorrespondencesPtr model_scene_corrs (new pcl::Correspondences ());pcl::KdTreeFLANN<DescriptorType> match_search;match_search.setInputCloud (model_descriptors);// For each scene keypoint descriptor, find nearest neighbor into the model keypoints descriptor cloud and add it to the correspondences vector.for (size_t i = 0; i < scene_descriptors->size (); ++i){std::vector<int> neigh_indices (1);std::vector<float> neigh_sqr_dists (1);if (!pcl_isfinite (scene_descriptors->at (i).descriptor[0])) //skipping NaNs{continue;}int found_neighs = match_search.nearestKSearch (scene_descriptors->at (i), 1, neigh_indices, neigh_sqr_dists);if(found_neighs == 1 && neigh_sqr_dists[0] < 0.25f) // add match only if the squared descriptor distance is less than 0.25 (SHOT descriptor distances are between 0 and 1 by design){pcl::Correspondence corr (neigh_indices[0], static_cast<int> (i), neigh_sqr_dists[0]);model_scene_corrs->push_back (corr);}}std::cout << "Correspondences found: " << model_scene_corrs->size () << std::endl;//// Actual Clustering//std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > rototranslations;std::vector<pcl::Correspondences> clustered_corrs;// Using Hough3Dif (use_hough_){//// Compute (Keypoints) Reference Frames only for Hough//pcl::PointCloud<RFType>::Ptr model_rf (new pcl::PointCloud<RFType> ());pcl::PointCloud<RFType>::Ptr scene_rf (new pcl::PointCloud<RFType> ());pcl::BOARDLocalReferenceFrameEstimation<PointType, NormalType, RFType> rf_est;rf_est.setFindHoles (true);rf_est.setRadiusSearch (rf_rad_);rf_est.setInputCloud (model_keypoints);rf_est.setInputNormals (model_normals);rf_est.setSearchSurface (model);rf_est.compute (*model_rf);rf_est.setInputCloud (scene_keypoints);rf_est.setInputNormals (scene_normals);rf_est.setSearchSurface (scene);rf_est.compute (*scene_rf);// Clusteringpcl::Hough3DGrouping<PointType, PointType, RFType, RFType> clusterer;clusterer.setHoughBinSize (cg_size_);clusterer.setHoughThreshold (cg_thresh_);clusterer.setUseInterpolation (true);clusterer.setUseDistanceWeight (false);clusterer.setInputCloud (model_keypoints);clusterer.setInputRf (model_rf);clusterer.setSceneCloud (scene_keypoints);clusterer.setSceneRf (scene_rf);clusterer.setModelSceneCorrespondences (model_scene_corrs);//clusterer.cluster (clustered_corrs);clusterer.recognize (rototranslations, clustered_corrs);}else // Using GeometricConsistency{pcl::GeometricConsistencyGrouping<PointType, PointType> gc_clusterer;gc_clusterer.setGCSize (cg_size_);gc_clusterer.setGCThreshold (cg_thresh_);gc_clusterer.setInputCloud (model_keypoints);gc_clusterer.setSceneCloud (scene_keypoints);gc_clusterer.setModelSceneCorrespondences (model_scene_corrs);//gc_clusterer.cluster (clustered_corrs);gc_clusterer.recognize (rototranslations, clustered_corrs);}//// Output results//std::cout << "Model instances found: " << rototranslations.size () << std::endl;for (size_t i = 0; i < rototranslations.size (); ++i){std::cout << "\n Instance " << i + 1 << ":" << std::endl;std::cout << " Correspondences belonging to this instance: " << clustered_corrs[i].size () << std::endl;// Print the rotation matrix and translation vectorEigen::Matrix3f rotation = rototranslations[i].block<3,3>(0, 0);Eigen::Vector3f translation = rototranslations[i].block<3,1>(0, 3);printf ("\n");printf (" | %6.3f %6.3f %6.3f | \n", rotation (0,0), rotation (0,1), rotation (0,2));printf (" R = | %6.3f %6.3f %6.3f | \n", rotation (1,0), rotation (1,1), rotation (1,2));printf (" | %6.3f %6.3f %6.3f | \n", rotation (2,0), rotation (2,1), rotation (2,2));printf ("\n");printf (" t = < %0.3f, %0.3f, %0.3f >\n", translation (0), translation (1), translation (2));}//// Visualization//pcl::visualization::PCLVisualizer viewer ("Correspondence Grouping");viewer.addPointCloud (scene, "scene_cloud");pcl::PointCloud<PointType>::Ptr off_scene_model (new pcl::PointCloud<PointType> ());pcl::PointCloud<PointType>::Ptr off_scene_model_keypoints (new pcl::PointCloud<PointType> ());if (show_correspondences_ || show_keypoints_){// We are translating the model so that it doesn't end in the middle of the scene representationpcl::transformPointCloud (*model, *off_scene_model, Eigen::Vector3f (-1,0,0), Eigen::Quaternionf (1, 0, 0, 0));pcl::transformPointCloud (*model_keypoints, *off_scene_model_keypoints, Eigen::Vector3f (-1,0,0), Eigen::Quaternionf (1, 0, 0, 0));pcl::visualization::PointCloudColorHandlerCustom<PointType> off_scene_model_color_handler (off_scene_model, 255, 255, 128);viewer.addPointCloud (off_scene_model, off_scene_model_color_handler, "off_scene_model");}if (show_keypoints_){pcl::visualization::PointCloudColorHandlerCustom<PointType> scene_keypoints_color_handler (scene_keypoints, 0, 0, 255);viewer.addPointCloud (scene_keypoints, scene_keypoints_color_handler, "scene_keypoints");viewer.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 5, "scene_keypoints");pcl::visualization::PointCloudColorHandlerCustom<PointType> off_scene_model_keypoints_color_handler (off_scene_model_keypoints, 0, 0, 255);viewer.addPointCloud (off_scene_model_keypoints, off_scene_model_keypoints_color_handler, "off_scene_model_keypoints");viewer.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 5, "off_scene_model_keypoints");}for (size_t i = 0; i < rototranslations.size (); ++i){pcl::PointCloud<PointType>::Ptr rotated_model (new pcl::PointCloud<PointType> ());pcl::transformPointCloud (*model, *rotated_model, rototranslations[i]);std::stringstream ss_cloud;ss_cloud << "instance" << i;pcl::visualization::PointCloudColorHandlerCustom<PointType> rotated_model_color_handler (rotated_model, 255, 0, 0);viewer.addPointCloud (rotated_model, rotated_model_color_handler, ss_cloud.str ());if (show_correspondences_){for (size_t j = 0; j < clustered_corrs[i].size (); ++j){std::stringstream ss_line;ss_line << "correspondence_line" << i << "_" << j;PointType& model_point = off_scene_model_keypoints->at (clustered_corrs[i][j].index_query);PointType& scene_point = scene_keypoints->at (clustered_corrs[i][j].index_match);// We are drawing a line for each pair of clustered correspondences found between the model and the sceneviewer.addLine<PointType, PointType> (model_point, scene_point, 0, 255, 0, ss_line.str ());}}}while (!viewer.wasStopped ()){viewer.spinOnce ();}return (0); }幫助函數
這個函數將會打印出幾個命令行的簡短選項的解釋。
void showHelp (char *filename) {std::cout << std::endl;std::cout << "***************************************************************************" << std::endl;std::cout << "* *" << std::endl;std::cout << "* Correspondence Grouping Tutorial - Usage Guide *" << std::endl;std::cout << "* *" << std::endl;std::cout << "***************************************************************************" << std::endl << std::endl;std::cout << "Usage: " << filename << " model_filename.pcd scene_filename.pcd [Options]" << std::endl << std::endl;std::cout << "Options:" << std::endl;std::cout << " -h: Show this help." << std::endl;std::cout << " -k: Show used keypoints." << std::endl;std::cout << " -c: Show used correspondences." << std::endl;std::cout << " -r: Compute the model cloud resolution and multiply" << std::endl;std::cout << " each radius given by that value." << std::endl;std::cout << " --algorithm (Hough|GC): Clustering algorithm used (default Hough)." << std::endl;std::cout << " --model_ss val: Model uniform sampling radius (default 0.01)" << std::endl;std::cout << " --scene_ss val: Scene uniform sampling radius (default 0.03)" << std::endl;std::cout << " --rf_rad val: Reference frame radius (default 0.015)" << std::endl;std::cout << " --descr_rad val: Descriptor radius (default 0.02)" << std::endl;std::cout << " --cg_size val: Cluster size (default 0.01)" << std::endl;std::cout << " --cg_thresh val: Clustering threshold (default 5)" << std::endl << std::endl; }第二個函數,解析命令行參數為了設置正確的參數來執行。
void parseCommandLine (int argc, char *argv[]) {//Show helpif (pcl::console::find_switch (argc, argv, "-h")){showHelp (argv[0]);exit (0);}//Model & scene filenamesstd::vector<int> filenames;filenames = pcl::console::parse_file_extension_argument (argc, argv, ".pcd");if (filenames.size () != 2){std::cout << "Filenames missing.\n";showHelp (argv[0]);exit (-1);}model_filename_ = argv[filenames[0]];scene_filename_ = argv[filenames[1]];//Program behaviorif (pcl::console::find_switch (argc, argv, "-k")){show_keypoints_ = true;}if (pcl::console::find_switch (argc, argv, "-c")){show_correspondences_ = true;}if (pcl::console::find_switch (argc, argv, "-r")){use_cloud_resolution_ = true;}std::string used_algorithm;if (pcl::console::parse_argument (argc, argv, "--algorithm", used_algorithm) != -1){if (used_algorithm.compare ("Hough") == 0){use_hough_ = true;}else if (used_algorithm.compare ("GC") == 0){use_hough_ = false;}else{std::cout << "Wrong algorithm name.\n";showHelp (argv[0]);exit (-1);}}//General parameterspcl::console::parse_argument (argc, argv, "--model_ss", model_ss_);pcl::console::parse_argument (argc, argv, "--scene_ss", scene_ss_);pcl::console::parse_argument (argc, argv, "--rf_rad", rf_rad_);pcl::console::parse_argument (argc, argv, "--descr_rad", descr_rad_);pcl::console::parse_argument (argc, argv, "--cg_size", cg_size_);pcl::console::parse_argument (argc, argv, "--cg_thresh", cg_thresh_); }你可以用兩個相應的命令行選項來進行聚類算法的切換。--algorithm(Hough|GC)
Hough(默認)
這是一個以3DHough決策為基礎的算法
GC
這是一個geometric consistency(GC) 幾何一致聚類算法,執行簡單的幾何約束在一對相關的物體之間。
一些別的比較有趣的選項 -k -c 和 -r
-k 顯示了計算相關度的關鍵點,以藍色覆蓋的形式顯示。
-c畫了一條線連接兩幅圖里面相關的地方。
-r估計模型點的空間分辨率,然后考慮把半徑作為點云分辨率。因此獲取一些分辨率不變量將會是有用的,當我們獲取同樣的命令行從不同的點云里面。
下面這個函數實現了某個點云的空間分辨率計算,通過對每個點云里面的點和其最近鄰平均距離。
double computeCloudResolution (const pcl::PointCloud<PointType>::ConstPtr &cloud) {double res = 0.0;int n_points = 0;int nres;std::vector<int> indices (2);std::vector<float> sqr_distances (2);pcl::search::KdTree<PointType> tree;tree.setInputCloud (cloud);for (size_t i = 0; i < cloud->size (); ++i){if (! pcl_isfinite ((*cloud)[i].x)){continue;}//Considering the second neighbor since the first is the point itself.nres = tree.nearestKSearch (i, 2, indices, sqr_distances);if (nres == 2){res += sqrt (sqr_distances[1]);++n_points;}}if (n_points != 0){res /= n_points;}return res; }聚類通道
這是我們主要的函數,執行實際的聚類操作。
parseCommandLine (argc, argv);if (pcl::io::loadPCDFile (model_filename_, *model) < 0){std::cout << "Error loading model cloud." << std::endl;showHelp (argv[0]);return (-1);}if (pcl::io::loadPCDFile (scene_filename_, *scene) < 0){std::cout << "Error loading scene cloud." << std::endl;showHelp (argv[0]);return (-1);}第二步,只有分辨率不變這個標志被enable,程序會調整會在下一節里面使用的半徑,把它們和模型分辨率相乘。
if (use_cloud_resolution_){float resolution = static_cast<float> (computeCloudResolution (model));if (resolution != 0.0f){model_ss_ *= resolution;scene_ss_ *= resolution;rf_rad_ *= resolution;descr_rad_ *= resolution;cg_size_ *= resolution;}std::cout << "Model resolution: " << resolution << std::endl;std::cout << "Model sampling size: " << model_ss_ << std::endl;std::cout << "Scene sampling size: " << scene_ss_ << std::endl;std::cout << "LRF support radius: " << rf_rad_ << std::endl;std::cout << "SHOT descriptor radius: " << descr_rad_ << std::endl;std::cout << "Clustering bin size: " << cg_size_ << std::endl << std::endl;}接下去,將會計算模型和場景的每個點的法線,通過NormalEstiminationOMP估計器,使用10個最近鄰。
pcl::NormalEstimationOMP<PointType, NormalType> norm_est;norm_est.setKSearch (10);norm_est.setInputCloud (model);norm_est.compute (*model_normals);norm_est.setInputCloud (scene);norm_est.compute (*scene_normals);接下去我們對每個點云降低取樣來找到少量的關鍵點。UniformSampling里面要使用的參數要么是命令行里面進行設置要么就用默認值。
pcl::UniformSampling<PointType> uniform_sampling;uniform_sampling.setInputCloud (model);uniform_sampling.setRadiusSearch (model_ss_);uniform_sampling.filter (*model_keypoints);std::cout << "Model total points: " << model->size () << "; Selected Keypoints: " << model_keypoints->size () << std::endl;uniform_sampling.setInputCloud (scene);uniform_sampling.setRadiusSearch (scene_ss_);uniform_sampling.filter (*scene_keypoints);std::cout << "Scene total points: " << scene->size () << "; Selected Keypoints: " << scene_keypoints->size () << std::endl;下一個階段在于把3D描述器和每個模型和場景的點進行關聯。在這里,我們將計算SHOT描述器,使用SHOTEstiminationOMP
descr_est.setInputCloud (model_keypoints);descr_est.setInputNormals (model_normals);descr_est.setSearchSurface (model);descr_est.compute (*model_descriptors);descr_est.setInputCloud (scene_keypoints);descr_est.setInputNormals (scene_normals);descr_est.setSearchSurface (scene);descr_est.compute (*scene_descriptors);現在我們需要決定模型描述器和場景間的相關性。為了做到這,這個程序使用了一個KdTreeFLANN。對于每一個和一個場景關鍵點相關聯的描述器,它會以歐拉距離為基礎找到最相似的模型描述器,并把這對相似的關鍵點放到Correspondences這個向量里面。
match_search.setInputCloud (model_descriptors);// For each scene keypoint descriptor, find nearest neighbor into the model keypoints descriptor cloud and add it to the correspondences vector.for (size_t i = 0; i < scene_descriptors->size (); ++i){std::vector<int> neigh_indices (1);std::vector<float> neigh_sqr_dists (1);if (!pcl_isfinite (scene_descriptors->at (i).descriptor[0])) //skipping NaNs{continue;}int found_neighs = match_search.nearestKSearch (scene_descriptors->at (i), 1, neigh_indices, neigh_sqr_dists);if(found_neighs == 1 && neigh_sqr_dists[0] < 0.25f) // add match only if the squared descriptor distance is less than 0.25 (SHOT descriptor distances are between 0 and 1 by design){pcl::Correspondence corr (neigh_indices[0], static_cast<int> (i), neigh_sqr_dists[0]);model_scene_corrs->push_back (corr);}}std::cout << "Correspondences found: " << model_scene_corrs->size () << std::endl;//// Actual Clustering最后的階段是管道對前面相關的東西進行聚類。
默認的算法是Hough3DGrouping,這是一個以Hough決策為基礎的算法。記住這個算法需要與一個Local Reference Frame對每一個作為參數傳遞的點云關鍵點。在這個例子里面,我們顯示的計算了LRF通過BOARDLocalReferenceFrameEstimation這個預估器。
// Compute (Keypoints) Reference Frames only for Hough//pcl::PointCloud<RFType>::Ptr model_rf (new pcl::PointCloud<RFType> ());pcl::PointCloud<RFType>::Ptr scene_rf (new pcl::PointCloud<RFType> ());pcl::BOARDLocalReferenceFrameEstimation<PointType, NormalType, RFType> rf_est;rf_est.setFindHoles (true);rf_est.setRadiusSearch (rf_rad_);rf_est.setInputCloud (model_keypoints);rf_est.setInputNormals (model_normals);rf_est.setSearchSurface (model);rf_est.compute (*model_rf);rf_est.setInputCloud (scene_keypoints);rf_est.setInputNormals (scene_normals);rf_est.setSearchSurface (scene);rf_est.compute (*scene_rf);// Clusteringpcl::Hough3DGrouping<PointType, PointType, RFType, RFType> clusterer;clusterer.setHoughBinSize (cg_size_);clusterer.setHoughThreshold (cg_thresh_);clusterer.setUseInterpolation (true);clusterer.setUseDistanceWeight (false);clusterer.setInputCloud (model_keypoints);clusterer.setInputRf (model_rf);clusterer.setSceneCloud (scene_keypoints);clusterer.setSceneRf (scene_rf);clusterer.setModelSceneCorrespondences (model_scene_corrs);//clusterer.cluster (clustered_corrs);clusterer.recognize (rototranslations, clustered_corrs);}else // Using GeometricConsistency{pcl::GeometricConsistencyGrouping<PointType, PointType> gc_clusterer;注意我們并于一定要顯示的計算LRF在調用聚類算法前。如果被用來聚類的點云沒用相關聯的LRF。
Hough3DGrouping算法是可選的,我們還可以使用GeometricConsistencyGrouping這個算法。在這個例子里面LRF計算是不需要的,我們只要簡單創建一個算法類的實例,傳入相應的參數,并調用相應的recognize這個方法。
gc_clusterer.setGCSize (cg_size_);gc_clusterer.setGCThreshold (cg_thresh_);gc_clusterer.setInputCloud (model_keypoints);gc_clusterer.setSceneCloud (scene_keypoints);gc_clusterer.setModelSceneCorrespondences (model_scene_corrs);//gc_clusterer.cluster (clustered_corrs);gc_clusterer.recognize (rototranslations, clustered_corrs);}輸出和可視化。
std::cout << "\n Instance " << i + 1 << ":" << std::endl;std::cout << " Correspondences belonging to this instance: " << clustered_corrs[i].size () << std::endl;// Print the rotation matrix and translation vectorEigen::Matrix3f rotation = rototranslations[i].block<3,3>(0, 0);Eigen::Vector3f translation = rototranslations[i].block<3,1>(0, 3);printf ("\n");printf (" | %6.3f %6.3f %6.3f | \n", rotation (0,0), rotation (0,1), rotation (0,2));printf (" R = | %6.3f %6.3f %6.3f | \n", rotation (1,0), rotation (1,1), rotation (1,2));printf (" | %6.3f %6.3f %6.3f | \n", rotation (2,0), rotation (2,1), rotation (2,2));printf ("\n");printf (" t = < %0.3f, %0.3f, %0.3f >\n", translation (0), translation (1), translation (2));}程序到時候會顯示一個PCLVisualizer這個窗口。如果命令行選項里面-k或者-c被使用了,程序將會顯示一個“獨立的”經過渲染的點云。
?
運行程序
./correspondence_grouping milk.pcd milk_cartoon_all_small_clorox.pcd可選的,你可以指定點云分辨率單元里面的半徑
./correspondence_grouping milk.pcd milk_cartoon_all_small_clorox.pcd milk.pcd milk_cartoon_all_small_clorox.pcd -r --model_ss 7.5 --scene_ss 20 --rf_rad 10 --descr_rad 15 --cg_size 10提示:如果你使用不同的點云并且你不知道如何去設置不同的參數,你可以使用"-r"這個標志還有設置LRF和描述器半徑變成分辨率的5 10 15倍。接著你需要小心的調節參數來得到最好的結果。
幾秒之后會有如下輸出
Model total points: 13704; Selected Keypoints: 732 Scene total points: 307200; Selected Keypoints: 3747Correspondences found: 1768 Model instances found: 1Instance 1:Correspondences belonging to this instance: 24| 0.969 -0.120 0.217 |R = | 0.117 0.993 0.026 || -0.218 -0.000 0.976 |t = < -0.159, 0.212, -0.042 >?最終提示,如果你的pcl/filters里面沒有uniform_sampling.h這個文件你可以從https://github.com/PointCloudLibrary/pcl/tree/master/filters/include/pcl/filters下載uniform_sampling.h和impl里面的uniform_sampling.hpp放到/usr/include/pcl/pcl1.7里面對應的地方。
?
?
?
?
總結
以上是生活随笔為你收集整理的以相关组为基础的3D物体识别的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: sqlite数据库主键自增_sqlite
- 下一篇: Python数模笔记-Sklearn (