1.單目標定
 
單應矩陣
 
設三維空間點的齊次坐標,對應的圖像坐標為
 
他們滿足一下關系:
 
 
 
s為尺度因子,K為內參矩陣 R和T旋轉平移矩陣統稱為外參
 
 
假設我們提供K個棋盤圖像,每個棋盤有N個角點,于是我們擁有2KN個約束方程。與此同時,忽略畸變的情況下,我們就需要求解4個內參和6K個外參(內參只于相機內部參數有關,外參卻隨目標點位置變化而變化),也就是說,只有當2KN>=4+6K的時候,也即K(N-3)>=2時,才能求出內外參矩陣。同時,無論在一張棋盤上檢測到多少角點,由于棋盤上角點的規則布置使得真正能利用上的角點只有4個(在四個方向上可延展成不同的矩形),于是有當N=4時,K(4-3)>=2,即K>=2,也就是說,我們至少需要兩張棋盤在不同方位的圖像才能求解出無畸變條件下的內參和外參。
 
因此,我們定義相機標定的單應性矩陣(從物體平面到成像平面)為:
 
 
先將H化為H=[h1 h2 h3],再分解方程可得:
 
 
因為旋轉向量在構造中是相互正交的,即r1和r2相互正交,由此我們就可以利用“正交”的兩個含義,得出每個單應性矩陣(也即每個棋盤方位圖像)提供的兩個約束條件:
 
旋轉向量點積為0(兩垂直平面上的旋轉向量互相垂直):
 
 
替換和并化簡可得:
 
 
旋轉向量長度相等(旋轉不改變尺度):
 
 
替換掉r1和r2可得:
 
 
設:
 
 
則可將兩個約束條件轉化為:
 
 
由上式可知,兩約束中的單項式均可寫為
 
 
的形式,同時易知B為對稱矩陣,真正有用的元素只有6個(主對角線任意一側的6個元素)。于是可展開為如下形式:?
 
 
由此,兩約束條件可等價為:
 
 
前面的討論我們已經知道,棋盤圖像數目滿足就可求出內外參數,此時b有解,于是由內參數B的封閉解和b的對應關系即可求解出內參數矩陣中的各個元素(具體形式這里不給出)。得到內參數后,可繼續求得外參數:
 
 
其中又由旋轉矩陣性質有
 
 
則可得:
 
 
代碼分析
 
主流程代碼?
 
#include <opencv2/core/core.hpp>#include <opencv2/calib3d/calib3d.hpp>#include <opencv2/highgui/highgui.hpp>#include <opencv2/imgproc/imgproc.hpp>#include <stdio.h>#include <iostream>#include "popt_pp.h"#include <sys/stat.h>using namespace std;using namespace cv;vector< vector< Point3f > > object_points;vector< vector< Point2f > > image_points;vector< Point2f > corners;vector< vector< Point2f > > left_img_points;Mat img, gray;Size im_size;bool doesExist (const std::string& name) {struct stat buffer;   return (stat (name.c_str(), &buffer) == 0); }void setup_calibration(int board_width, int board_height, int num_imgs, float square_size, char* imgs_directory, char* imgs_filename,char* extension) {Size board_size = Size(board_width, board_height);int board_n = board_width * board_height;for (int k = 1; k <= num_imgs; k++) {char img_file[100];sprintf(img_file, "%s%s%d.%s", imgs_directory, imgs_filename, k, extension);if(!doesExist(img_file))continue;img = imread(img_file, CV_LOAD_IMAGE_COLOR);cv::cvtColor(img, gray, CV_BGR2GRAY);bool found = false;found = cv::findChessboardCorners(img, board_size, corners,CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);if (found){cornerSubPix(gray, corners, cv::Size(5, 5), cv::Size(-1, -1),TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));drawChessboardCorners(gray, board_size, corners, found);}vector< Point3f > obj;for (int i = 0; i < board_height; i++)for (int j = 0; j < board_width; j++)obj.push_back(Point3f((float)j * square_size, (float)i * square_size, 0));if (found) {cout << k << ". Found corners!" << endl;image_points.push_back(corners);object_points.push_back(obj);}}}double computeReprojectionErrors(const vector< vector< Point3f > >& objectPoints,const vector< vector< Point2f > >& imagePoints,const vector< Mat >& rvecs, const vector< Mat >& tvecs,const Mat& cameraMatrix , const Mat& distCoeffs) {vector< Point2f > imagePoints2;int i, totalPoints = 0;double totalErr = 0, err;vector< float > perViewErrors;perViewErrors.resize(objectPoints.size());for (i = 0; i < (int)objectPoints.size(); ++i) {projectPoints(Mat(objectPoints[i]), rvecs[i], tvecs[i], cameraMatrix,distCoeffs, imagePoints2);err = norm(Mat(imagePoints[i]), Mat(imagePoints2), CV_L2);int n = (int)objectPoints[i].size();perViewErrors[i] = (float) std::sqrt(err*err/n);totalErr += err*err;totalPoints += n;}return std::sqrt(totalErr/totalPoints);}int main(int argc, char const **argv){int board_width, board_height, num_imgs;float square_size;char* imgs_directory;char* imgs_filename;char* out_file;char* extension;static struct poptOption options[] = {{ "board_width",'w',POPT_ARG_INT,&board_width,0,"Checkerboard width","NUM" },{ "board_height",'h',POPT_ARG_INT,&board_height,0,"Checkerboard height","NUM" },{ "num_imgs",'n',POPT_ARG_INT,&num_imgs,0,"Number of checkerboard images","NUM" },{ "square_size",'s',POPT_ARG_FLOAT,&square_size,0,"Size of checkerboard square","NUM" },{ "imgs_directory",'d',POPT_ARG_STRING,&imgs_directory,0,"Directory containing images","STR" },{ "imgs_filename",'i',POPT_ARG_STRING,&imgs_filename,0,"Image filename","STR" },{ "extension",'e',POPT_ARG_STRING,&extension,0,"Image extension","STR" },{ "out_file",'o',POPT_ARG_STRING,&out_file,0,"Output calibration filename (YML)","STR" },POPT_AUTOHELP{ NULL, 0, 0, NULL, 0, NULL, NULL }};POpt popt(NULL, argc, argv, options, 0);int c;while((c = popt.getNextOpt()) >= 0) {}setup_calibration(board_width, board_height, num_imgs, square_size,imgs_directory, imgs_filename, extension);printf("Starting Calibration\n");Mat K;Mat D;vector< Mat > rvecs, tvecs;int flag = 0;flag |= CV_CALIB_FIX_K4;flag |= CV_CALIB_FIX_K5;calibrateCamera(object_points, image_points, img.size(), K, D, rvecs, tvecs, flag);cout << "Calibration error: " << computeReprojectionErrors(object_points, image_points, rvecs, tvecs, K, D) << endl;FileStorage fs(out_file, FileStorage::WRITE);fs << "K" << K;fs << "D" << D;fs << "board_width" << board_width;fs << "board_height" << board_height;fs << "square_size" << square_size;printf("Done Calibration\n");return 0;} 
1:先檢測標定板的角點
 
2:構建坐標
 
3:計算內參
 
1.角點檢測函數
 
函數形式
 int cvFindChessboardCorners( const void* image, CvSize pattern_size, CvPoint2D32f* corners, int* corner_count=NULL, int flags=CV_CALIB_CB_ADAPTIVE_THRESH );
 參數說明
 Image:
 輸入的棋盤圖,必須是8位的灰度或者彩色圖像。
 pattern_size:
 棋盤圖中每行和每列角點的個數。
 Corners:
 檢測到的角點
 corner_count:
 輸出,角點的個數。如果不是NULL,函數將檢測到的角點的個數存儲于此變量。
 Flags:
 各種操作標志,可以是0或者下面值的組合:
 CV_CALIB_CB_ADAPTIVE_THRESH -使用自適應閾值(通過平均圖像亮度計算得到)將圖像轉換為黑白圖,而不是一個固定的閾值。
 CV_CALIB_CB_NORMALIZE_IMAGE -在利用固定閾值或者自適應的閾值進行二值化之前,先使用cvNormalizeHist來均衡化圖像亮度。
 CV_CALIB_CB_FILTER_QUADS -使用其他的準則(如輪廓面積,周長,方形形狀)來去除在輪廓檢測階段檢測到的錯誤方塊。
 補充說明
 函數cvFindChessboardCorners試圖確定輸入圖像是否是棋盤模式,并確定角點的位置。如果所有角點都被檢測到且它們都被以一定順序排布,函數返回非零值,否則在函數不能發現所有角點或者記錄它們地情況下,函數返回0。例如一個正常地棋盤圖右8x8個方塊和7x7個內角點,內角點是黑色方塊相互聯通的位置。這個函數檢測到地坐標只是一個大約的值,如果要精確地確定它們的位置,可以使用函數cvFindCornerSubPix。
 
CV_IMPL void cvFindExtrinsicCameraParams2( const CvMat* objectPoints,const CvMat* imagePoints, const CvMat* A,const CvMat* distCoeffs, CvMat* rvec, CvMat* tvec,int useExtrinsicGuess )
{const int max_iter = 20;Ptr<CvMat> matM, _Mxy, _m, _mn, matL;int i, count;double a[9], ar[9]={1,0,0,0,1,0,0,0,1}, R[9];double MM[9], U[9], V[9], W[3];cv::Scalar Mc;double param[6];CvMat matA = cvMat( 3, 3, CV_64F, a );CvMat _Ar = cvMat( 3, 3, CV_64F, ar );CvMat matR = cvMat( 3, 3, CV_64F, R );CvMat _r = cvMat( 3, 1, CV_64F, param );CvMat _t = cvMat( 3, 1, CV_64F, param + 3 );CvMat _Mc = cvMat( 1, 3, CV_64F, Mc.val );CvMat _MM = cvMat( 3, 3, CV_64F, MM );CvMat matU = cvMat( 3, 3, CV_64F, U );CvMat matV = cvMat( 3, 3, CV_64F, V );CvMat matW = cvMat( 3, 1, CV_64F, W );CvMat _param = cvMat( 6, 1, CV_64F, param );CvMat _dpdr, _dpdt;CV_Assert( CV_IS_MAT(objectPoints) && CV_IS_MAT(imagePoints) &&CV_IS_MAT(A) && CV_IS_MAT(rvec) && CV_IS_MAT(tvec) );count = MAX(objectPoints->cols, objectPoints->rows);matM.reset(cvCreateMat( 1, count, CV_64FC3 ));_m.reset(cvCreateMat( 1, count, CV_64FC2 ));cvConvertPointsHomogeneous( objectPoints, matM );cvConvertPointsHomogeneous( imagePoints, _m );cvConvert( A, &matA );CV_Assert( (CV_MAT_DEPTH(rvec->type) == CV_64F || CV_MAT_DEPTH(rvec->type) == CV_32F) &&(rvec->rows == 1 || rvec->cols == 1) && rvec->rows*rvec->cols*CV_MAT_CN(rvec->type) == 3 );CV_Assert( (CV_MAT_DEPTH(tvec->type) == CV_64F || CV_MAT_DEPTH(tvec->type) == CV_32F) &&(tvec->rows == 1 || tvec->cols == 1) && tvec->rows*tvec->cols*CV_MAT_CN(tvec->type) == 3 );CV_Assert((count >= 4) || (count == 3 && useExtrinsicGuess)); // it is unsafe to call LM optimisation without an extrinsic guess in the case of 3 points. This is because there is no guarantee that it will converge on the correct solution._mn.reset(cvCreateMat( 1, count, CV_64FC2 ));_Mxy.reset(cvCreateMat( 1, count, CV_64FC2 ));// normalize image points// (unapply the intrinsic matrix transformation and distortion)cvUndistortPoints( _m, _mn, &matA, distCoeffs, 0, &_Ar );if( useExtrinsicGuess ){CvMat _r_temp = cvMat(rvec->rows, rvec->cols,CV_MAKETYPE(CV_64F,CV_MAT_CN(rvec->type)), param );CvMat _t_temp = cvMat(tvec->rows, tvec->cols,CV_MAKETYPE(CV_64F,CV_MAT_CN(tvec->type)), param + 3);cvConvert( rvec, &_r_temp );cvConvert( tvec, &_t_temp );}else{Mc = cvAvg(matM);cvReshape( matM, matM, 1, count );cvMulTransposed( matM, &_MM, 1, &_Mc );cvSVD( &_MM, &matW, 0, &matV, CV_SVD_MODIFY_A + CV_SVD_V_T );// initialize extrinsic parametersif( W[2]/W[1] < 1e-3){// a planar structure case (all M's lie in the same plane)double tt[3], h[9], h1_norm, h2_norm;CvMat* R_transform = &matV;CvMat T_transform = cvMat( 3, 1, CV_64F, tt );CvMat matH = cvMat( 3, 3, CV_64F, h );CvMat _h1, _h2, _h3;if( V[2]*V[2] + V[5]*V[5] < 1e-10 )cvSetIdentity( R_transform );if( cvDet(R_transform) < 0 )cvScale( R_transform, R_transform, -1 );cvGEMM( R_transform, &_Mc, -1, 0, 0, &T_transform, CV_GEMM_B_T );for( i = 0; i < count; i++ ){const double* Rp = R_transform->data.db;const double* Tp = T_transform.data.db;const double* src = matM->data.db + i*3;double* dst = _Mxy->data.db + i*2;dst[0] = Rp[0]*src[0] + Rp[1]*src[1] + Rp[2]*src[2] + Tp[0];dst[1] = Rp[3]*src[0] + Rp[4]*src[1] + Rp[5]*src[2] + Tp[1];}cvFindHomography( _Mxy, _mn, &matH );if( cvCheckArr(&matH, CV_CHECK_QUIET) ){cvGetCol( &matH, &_h1, 0 );_h2 = _h1; _h2.data.db++;_h3 = _h2; _h3.data.db++;h1_norm = std::sqrt(h[0]*h[0] + h[3]*h[3] + h[6]*h[6]);h2_norm = std::sqrt(h[1]*h[1] + h[4]*h[4] + h[7]*h[7]);cvScale( &_h1, &_h1, 1./MAX(h1_norm, DBL_EPSILON) );cvScale( &_h2, &_h2, 1./MAX(h2_norm, DBL_EPSILON) );cvScale( &_h3, &_t, 2./MAX(h1_norm + h2_norm, DBL_EPSILON));cvCrossProduct( &_h1, &_h2, &_h3 );cvRodrigues2( &matH, &_r );cvRodrigues2( &_r, &matH );cvMatMulAdd( &matH, &T_transform, &_t, &_t );cvMatMul( &matH, R_transform, &matR );}else{cvSetIdentity( &matR );cvZero( &_t );}cvRodrigues2( &matR, &_r );}else{// non-planar structure. Use DLT methoddouble* L;double LL[12*12], LW[12], LV[12*12], sc;CvMat _LL = cvMat( 12, 12, CV_64F, LL );CvMat _LW = cvMat( 12, 1, CV_64F, LW );CvMat _LV = cvMat( 12, 12, CV_64F, LV );CvMat _RRt, _RR, _tt;CvPoint3D64f* M = (CvPoint3D64f*)matM->data.db;CvPoint2D64f* mn = (CvPoint2D64f*)_mn->data.db;matL.reset(cvCreateMat( 2*count, 12, CV_64F ));L = matL->data.db;for( i = 0; i < count; i++, L += 24 ){double x = -mn[i].x, y = -mn[i].y;L[0] = L[16] = M[i].x;L[1] = L[17] = M[i].y;L[2] = L[18] = M[i].z;L[3] = L[19] = 1.;L[4] = L[5] = L[6] = L[7] = 0.;L[12] = L[13] = L[14] = L[15] = 0.;L[8] = x*M[i].x;L[9] = x*M[i].y;L[10] = x*M[i].z;L[11] = x;L[20] = y*M[i].x;L[21] = y*M[i].y;L[22] = y*M[i].z;L[23] = y;}cvMulTransposed( matL, &_LL, 1 );cvSVD( &_LL, &_LW, 0, &_LV, CV_SVD_MODIFY_A + CV_SVD_V_T );_RRt = cvMat( 3, 4, CV_64F, LV + 11*12 );cvGetCols( &_RRt, &_RR, 0, 3 );cvGetCol( &_RRt, &_tt, 3 );if( cvDet(&_RR) < 0 )cvScale( &_RRt, &_RRt, -1 );sc = cvNorm(&_RR);CV_Assert(fabs(sc) > DBL_EPSILON);cvSVD( &_RR, &matW, &matU, &matV, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );cvGEMM( &matU, &matV, 1, 0, 0, &matR, CV_GEMM_A_T );cvScale( &_tt, &_t, cvNorm(&matR)/sc );cvRodrigues2( &matR, &_r );}}cvReshape( matM, matM, 3, 1 );cvReshape( _mn, _mn, 2, 1 );// refine extrinsic parameters using iterative algorithmCvLevMarq solver( 6, count*2, cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,max_iter,FLT_EPSILON), true);cvCopy( &_param, solver.param );for(;;){CvMat *matJ = 0, *_err = 0;const CvMat *__param = 0;bool proceed = solver.update( __param, matJ, _err );cvCopy( __param, &_param );if( !proceed || !_err )break;cvReshape( _err, _err, 2, 1 );if( matJ ){cvGetCols( matJ, &_dpdr, 0, 3 );cvGetCols( matJ, &_dpdt, 3, 6 );cvProjectPoints2( matM, &_r, &_t, &matA, distCoeffs,_err, &_dpdr, &_dpdt, 0, 0, 0 );}else{cvProjectPoints2( matM, &_r, &_t, &matA, distCoeffs,_err, 0, 0, 0, 0, 0 );}cvSub(_err, _m, _err);cvReshape( _err, _err, 1, 2*count );}cvCopy( solver.param, &_param );_r = cvMat( rvec->rows, rvec->cols,CV_MAKETYPE(CV_64F,CV_MAT_CN(rvec->type)), param );_t = cvMat( tvec->rows, tvec->cols,CV_MAKETYPE(CV_64F,CV_MAT_CN(tvec->type)), param + 3 );cvConvert( &_r, rvec );cvConvert( &_t, tvec );
}CV_IMPL void cvInitIntrinsicParams2D( const CvMat* objectPoints,const CvMat* imagePoints, const CvMat* npoints,CvSize imageSize, CvMat* cameraMatrix,double aspectRatio )
{Ptr<CvMat> matA, _b, _allH;int i, j, pos, nimages, ni = 0;double a[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 1 };double H[9] = {0}, f[2] = {0};CvMat _a = cvMat( 3, 3, CV_64F, a );CvMat matH = cvMat( 3, 3, CV_64F, H );CvMat _f = cvMat( 2, 1, CV_64F, f );assert( CV_MAT_TYPE(npoints->type) == CV_32SC1 &&CV_IS_MAT_CONT(npoints->type) );nimages = npoints->rows + npoints->cols - 1;if( (CV_MAT_TYPE(objectPoints->type) != CV_32FC3 &&CV_MAT_TYPE(objectPoints->type) != CV_64FC3) ||(CV_MAT_TYPE(imagePoints->type) != CV_32FC2 &&CV_MAT_TYPE(imagePoints->type) != CV_64FC2) )CV_Error( CV_StsUnsupportedFormat, "Both object points and image points must be 2D" );if( objectPoints->rows != 1 || imagePoints->rows != 1 )CV_Error( CV_StsBadSize, "object points and image points must be a single-row matrices" );matA.reset(cvCreateMat( 2*nimages, 2, CV_64F ));_b.reset(cvCreateMat( 2*nimages, 1, CV_64F ));a[2] = (!imageSize.width) ? 0.5 : (imageSize.width)*0.5;a[5] = (!imageSize.height) ? 0.5 : (imageSize.height)*0.5;_allH.reset(cvCreateMat( nimages, 9, CV_64F ));// extract vanishing points in order to obtain initial value for the focal lengthfor( i = 0, pos = 0; i < nimages; i++, pos += ni ){double* Ap = matA->data.db + i*4;double* bp = _b->data.db + i*2;ni = npoints->data.i[i];double h[3], v[3], d1[3], d2[3];double n[4] = {0,0,0,0};CvMat _m, matM;cvGetCols( objectPoints, &matM, pos, pos + ni );cvGetCols( imagePoints, &_m, pos, pos + ni );cvFindHomography( &matM, &_m, &matH );memcpy( _allH->data.db + i*9, H, sizeof(H) );H[0] -= H[6]*a[2]; H[1] -= H[7]*a[2]; H[2] -= H[8]*a[2];H[3] -= H[6]*a[5]; H[4] -= H[7]*a[5]; H[5] -= H[8]*a[5];for( j = 0; j < 3; j++ ){double t0 = H[j*3], t1 = H[j*3+1];h[j] = t0; v[j] = t1;d1[j] = (t0 + t1)*0.5;d2[j] = (t0 - t1)*0.5;n[0] += t0*t0; n[1] += t1*t1;n[2] += d1[j]*d1[j]; n[3] += d2[j]*d2[j];}for( j = 0; j < 4; j++ )n[j] = 1./std::sqrt(n[j]);for( j = 0; j < 3; j++ ){h[j] *= n[0]; v[j] *= n[1];d1[j] *= n[2]; d2[j] *= n[3];}Ap[0] = h[0]*v[0]; Ap[1] = h[1]*v[1];Ap[2] = d1[0]*d2[0]; Ap[3] = d1[1]*d2[1];bp[0] = -h[2]*v[2]; bp[1] = -d1[2]*d2[2];}cvSolve( matA, _b, &_f, CV_NORMAL + CV_SVD );a[0] = std::sqrt(fabs(1./f[0]));a[4] = std::sqrt(fabs(1./f[1]));if( aspectRatio != 0 ){double tf = (a[0] + a[4])/(aspectRatio + 1.);a[0] = aspectRatio*tf;a[4] = tf;}cvConvert( &_a, cameraMatrix );
} 
?
 
 ?
 
?
 
?
 
參考:
 
https://zhuanlan.zhihu.com/p/24651968?
 
https://blog.csdn.net/h532600610/article/details/51800488
 
https://www.cnblogs.com/riddick/p/8476456.html
 
?
                            總結
                            
                                以上是生活随笔為你收集整理的相机标定原理和opencv代码解析的全部內容,希望文章能夠幫你解決所遇到的問題。
                            
                            
                                如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。