当前位置: 首页 > >

相机标定(3) opencv中solvePnPRansac()和solvePnP()计算外参数

发布时间:

SolvePnP

bool solvePnP( InputArray objectPoints, InputArray imagePoints,
? ? ? ? ? ? ? ? ? ? ? ? ? ? InputArray cameraMatrix, InputArray distCoeffs,
? ? ? ? ? ? ? ? ? ? ? ? ? ? OutputArray rvec, OutputArray tvec,
? ? ? ? ? ? ? ? ? ? ? ? ? ? bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE );

参数说明:


    objectPoints:std::vector?? ?特征点物理坐标imagePoints:? ?std::vector?? ?特征点图像坐标,点对个数必须大于4cameraMatrix :? ?cv::Mat(3, 3, CV_32FC1)?? ?相机内参:3*3的float矩阵distCoeffs:? ? cv::Mat(1, 5, CV_32FC1)?? ?相机畸变参数rvec :? ? ? ? 输出的旋转向量tvec:? ? ? ? ?输出的*移向量useExtrinsicGuess:? ? 0?? ??flags:? ? ? ? ?计算方法

重点flags参数选择选择如下所示:


? ? ? ?SOLVEPNP_ITERATIVE = 0,
???????SOLVEPNP_EPNP ?????= 1, //!< EPnP: Efficient Perspective-n-Point Camera Pose Estimation @cite lepetit2009epnp
???????SOLVEPNP_P3P ??????= 2, //!< Complete Solution Classification for the Perspective-Three-Point Problem?
???????SOLVEPNP_DLS ??????= 3, //!< A Direct Least-Squares (DLS) Method for PnP ?@cite hesch2011direct
???????SOLVEPNP_UPNP ?????= 4, //!< Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation?
???????SOLVEPNP_AP3P ?????= 5, //!< An Efficient Algebraic Solution to the Perspective-Three-Point Problem?
???????SOLVEPNP_MAX_COUNT ?????//!< Used for count


基于同一*面上的标准特征点,主要用SOLVEPNP_ITERATIVE?方法。


切记,虽然Opencv的参数偏爱float类型,但是solvepnp中除了相机内参和畸变参数矩阵是用float类型外,其余的矩阵都是double类型,不然出出现计算结果不正确的情况。


solvePnPRansac函数

solvePnP的一个缺点是对异常值不够鲁棒,当我们用相机定位真实世界的点,可能存在错配


bool cv::solvePnPRansac ( InputArray? objectPoints,
? ? InputArray? imagePoints,
? ? InputArray? cameraMatrix,
? ? InputArray? distCoeffs,
? ? OutputArray? rvec,
? ? OutputArray? tvec,
? ? bool? useExtrinsicGuess?=?false,
? ? int? iterationsCount?=?100,
? ? float? reprojectionError?=?8.0,
? ? double? confidence?=?0.99,
? ? OutputArray? inliers?=?noArray(),
? ? int? flags?=?SOLVEPNP_ITERATIVE?
? )

?solvePnPRansac函数参数与solvePnP参数基本形同:


    [in] ?_opoints ? ? ? ? ? ? ? ?参考点在世界坐标系下的点集;float or double[in] ? ?_ipoints ? ? ? ? ? ? ? ?参考点在相机像*面的坐标;float or double[in] ? ?_cameraMatrix ? ? ? ? ? 相机内参[in] ? ?_distCoeffs ? ? ? ? ? ? 相机畸变系数[out] ? _rvec ? ? ? ? ? ? ? ? ? 旋转矩阵[out] ? _tvec ? ? ? ? ? ? ? ? ? *移向量[in] ? ?useExtrinsicGuess ? ? ? 若果求解PnP使用迭代算法,初始值可以使用猜测的初始值(true),也可以使用解析求解的结果作为初始值(false)。[in] ? ?iterationsCount ? ? ? ? Ransac算法的迭代次数,这只是初始值,根据估计外点的概率,可以进一步缩小迭代次数;(此值函数内部是会不断改变的),所以一开始可以赋一个大的值。[in] ? ?reprojectionErrr ? ? ? ?Ransac筛选内点和外点的距离阈值,这个根据估计内点的概率和每个点的均方差(假设误差按照高斯分布)可以计算出此阈值。[in] ? ?confidence ? ? ? ? ? ? ?此值与计算采样(迭代)次数有关。此值代表从n个样本中取s个点,N次采样可以使s个点全为内点的概率。[out] ? _inliers ? ? ? ? ? ? ? ?返回内点的序列。为矩阵形式[in] ? ?flags ? ? ? ? ? ? ? ? ? 最小子集的计算模型;

* ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? SOLVEPNP_ITERATIVE(此方案,最小模型用的EPNP,内点选出之后用了一个迭代);
* ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? SOLVE_P3P(P3P只用在最小模型上,内点选出之后用了一个EPNP) ? ? ?
* ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? SOLVE_AP3P(AP3P只用在最小模型上,内点选出之后用了一个EPNP)
* ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? SOLVE_EPnP(最小模型上&内点选出之后都采用了EPNP)


实例:


//测试 p3p测位姿算法
#include
#include
#include

#include
#include
#include
#include

void generate3DPointCloud(std::vector& points, cv::Point3f pmin = cv::Point3f(-1, -1, 5), cv::Point3f pmax = cv::Point3f(1, 1, 10))
{
cv::RNG rng = cv::theRNG(); // fix the seed to use "fixed" input 3D points

for (size_t i = 0; i < points.size(); i++)
{
float _x = rng.uniform(pmin.x, pmax.x);
float _y = rng.uniform(pmin.y, pmax.y);
float _z = rng.uniform(pmin.z, pmax.z);
points[i] = cv::Point3f(_x, _y, _z);
}
}

void generateCameraMatrix(cv::Mat& cameraMatrix, cv::RNG& rng)
{
const double fcMinVal = 1e-3;
const double fcMaxVal = 100;
cameraMatrix.create(3, 3, CV_64FC1);
cameraMatrix.setTo(cv::Scalar(0));
cameraMatrix.at(0, 0) = rng.uniform(fcMinVal, fcMaxVal);
cameraMatrix.at(1, 1) = rng.uniform(fcMinVal, fcMaxVal);
cameraMatrix.at(0, 2) = rng.uniform(fcMinVal, fcMaxVal);
cameraMatrix.at(1, 2) = rng.uniform(fcMinVal, fcMaxVal);
cameraMatrix.at(2, 2) = 1;
}

void generateDistCoeffs(cv::Mat& distCoeffs, cv::RNG& rng)
{
distCoeffs = cv::Mat::zeros(4, 1, CV_64FC1);
for (int i = 0; i < 3; i++)
distCoeffs.at(i, 0) = rng.uniform(0.0, 1.0e-6);
}

void generatePose(cv::Mat& rvec, cv::Mat& tvec, cv::RNG& rng)
{
const double minVal = 1.0e-3;
const double maxVal = 1.0;
rvec.create(3, 1, CV_64FC1);
tvec.create(3, 1, CV_64FC1);
for (int i = 0; i < 3; i++)
{
rvec.at(i, 0) = rng.uniform(minVal, maxVal);
tvec.at(i, 0) = rng.uniform(minVal, maxVal / 10);
}
}


int main_p3p()
{
std::vector points;
points.resize(500);
generate3DPointCloud(points);


std::vector rvecs, tvecs;
cv::Mat trueRvec, trueTvec;
cv::Mat intrinsics, distCoeffs;

generateCameraMatrix(intrinsics, cv::RNG());
generateDistCoeffs(distCoeffs, cv::RNG());

generatePose(trueRvec, trueTvec, cv::RNG());

std::vector opoints;
opoints = std::vector(points.begin(), points.begin() + 3);

std::vector projectedPoints;
projectedPoints.resize(opoints.size());
projectPoints(cv::Mat(opoints), trueRvec, trueTvec, intrinsics, distCoeffs, projectedPoints);

std::cout << "intrinsics: " << intrinsics << std::endl;
std::cout << "distcoeffs: " << distCoeffs << std::endl;
std::cout << "trueRvec: " << trueRvec << std::endl;
std::cout << "trueTvec: " << trueTvec << std::endl;

std::cout << "oPoint: " << opoints << std::endl;
std::cout << "projectedPoints: " << projectedPoints << std::endl;



//std::cout << "result numbers A: :" << solveP3P(opoints, projectedPoints, intrinsics, distCoeffs, rvecs, tvecs, cv::SOLVEPNP_AP3P) << std::endl;
//std::cout << "result numbers: :" << solveP3P(opoints, projectedPoints, intrinsics, distCoeffs, rvecs, tvecs, cv::SOLVEPNP_P3P) << std::endl;

bool isTestSuccess = false;
double error = DBL_MAX;
for (unsigned int i = 0; i < rvecs.size() /*&& !isTestSuccess*/; ++i)
{
double rvecDiff = norm(rvecs[i], trueRvec, cv::NORM_L2);
double tvecDiff = norm(tvecs[i], trueTvec, cv::NORM_L2);
isTestSuccess = rvecDiff < 1.0e-4 && tvecDiff < 1.0e-4;
error = std::min(error, std::max(rvecDiff, tvecDiff));
std::cout << "i: " << i << std::endl;
std::cout << "error: " << error << std::endl;
std::cout << "rvec: " << rvecs[i] << std::endl;

}

system("pause");
return 0;
}
int main()
{
std::vector points;
points.resize(500);
generate3DPointCloud(points);


std::vector rvecs, tvecs;
cv::Mat trueRvec, trueTvec;
cv::Mat intrinsics, distCoeffs;

generateCameraMatrix(intrinsics, cv::RNG());
generateDistCoeffs(distCoeffs, cv::RNG());

generatePose(trueRvec, trueTvec, cv::RNG());

std::vector opoints;
//opoints = std::vector(points.begin(), points.begin() + 4);
opoints = std::vector(points.begin(), points.end());

std::vector projectedPoints;
projectedPoints.resize(opoints.size());
projectPoints(cv::Mat(opoints), trueRvec, trueTvec, intrinsics, distCoeffs, projectedPoints);

cv::RNG rng = cv::RNG();
for (size_t i = 0; i < projectedPoints.size(); i++)
{
if (i % 100 == 0)
{
projectedPoints[i] = projectedPoints[rng.uniform(0, (int)points.size() - 1)];
}
}

std::cout << "intrinsics: " << intrinsics << std::endl;
std::cout << "distcoeffs: " << distCoeffs << std::endl;
std::cout << "trueRvec: " << trueRvec << std::endl;
std::cout << "trueTvec: " << trueTvec << std::endl;

//std::cout << "oPoint: " << opoints << std::endl;
//std::cout << "projectedPoints: " << projectedPoints << std::endl;



//solveP3P(opoints, projectedPoints, intrinsics, distCoeffs, rvecs, tvecs, cv::SOLVEPNP_AP3P);
cv::Mat rvec, tvec;
solvePnP(opoints, projectedPoints, intrinsics, distCoeffs, rvec, tvec, false, CV_EPNP);


std::cout << rvec << "---" << norm(rvec - trueRvec) << std::endl;
std::cout << tvec << std::endl;


std::cout << "---------------Ransac--------------------" << std::endl;
solvePnPRansac(opoints, projectedPoints, intrinsics, distCoeffs, rvec, tvec, false, 100, 2);
std::cout << rvec << "---" << norm(rvec - trueRvec) << std::endl;
std::cout << tvec << std::endl;
system("pause");
}

代码from:https://blog.csdn.net/xuelangwin/article/details/80847337做适当修改。



友情链接: