mirror of
https://github.com/opencv/opencv_contrib.git
synced 2025-10-24 20:01:12 +08:00
aruco: use densely packed array for pose computation
allows using Mat and vector<Vec3d> as argument for rvecs, tvecs that are allocated at once.
This commit is contained in:
@@ -190,9 +190,9 @@ CV_EXPORTS void detectMarkers(InputArray image, Dictionary dictionary, OutputArr
|
||||
* \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
|
||||
* @param distCoeffs vector of distortion coefficients
|
||||
* \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
|
||||
* @param rvecs array of output rotation vectors (@sa Rodrigues) (e.g. std::vector<cv::Mat>>).
|
||||
* @param rvecs array of output rotation vectors (@sa Rodrigues) (e.g. std::vector<cv::Vec3d>>).
|
||||
* Each element in rvecs corresponds to the specific marker in imgPoints.
|
||||
* @param tvecs array of output translation vectors (e.g. std::vector<cv::Mat>>).
|
||||
* @param tvecs array of output translation vectors (e.g. std::vector<cv::Vec3d>>).
|
||||
* Each element in tvecs corresponds to the specific marker in imgPoints.
|
||||
*
|
||||
* This function receives the detected markers and returns their pose estimation respect to
|
||||
|
||||
@@ -207,7 +207,7 @@ int main(int argc, char *argv[]) {
|
||||
|
||||
vector< int > ids;
|
||||
vector< vector< Point2f > > corners, rejected;
|
||||
Mat rvec, tvec;
|
||||
Vec3d rvec, tvec;
|
||||
|
||||
// detect markers
|
||||
aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected);
|
||||
|
||||
@@ -204,7 +204,7 @@ int main(int argc, char *argv[]) {
|
||||
vector< int > markerIds, charucoIds;
|
||||
vector< vector< Point2f > > markerCorners, rejectedMarkers;
|
||||
vector< Point2f > charucoCorners;
|
||||
Mat rvec, tvec;
|
||||
Vec3d rvec, tvec;
|
||||
|
||||
// detect markers
|
||||
aruco::detectMarkers(image, dictionary, markerCorners, markerIds, detectorParams,
|
||||
|
||||
@@ -202,7 +202,7 @@ int main(int argc, char *argv[]) {
|
||||
vector< int > markerIds;
|
||||
vector< Vec4i > diamondIds;
|
||||
vector< vector< Point2f > > markerCorners, rejectedMarkers, diamondCorners;
|
||||
vector< Mat > rvecs, tvecs;
|
||||
vector< Vec3d > rvecs, tvecs;
|
||||
|
||||
// detect markers
|
||||
aruco::detectMarkers(image, dictionary, markerCorners, markerIds, detectorParams,
|
||||
|
||||
@@ -194,7 +194,7 @@ int main(int argc, char *argv[]) {
|
||||
|
||||
vector< int > ids;
|
||||
vector< vector< Point2f > > corners, rejected;
|
||||
vector< Mat > rvecs, tvecs;
|
||||
vector< Vec3d > rvecs, tvecs;
|
||||
|
||||
// detect markers and estimate pose
|
||||
aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected);
|
||||
|
||||
@@ -821,9 +821,9 @@ void detectMarkers(InputArray _image, Dictionary dictionary, OutputArrayOfArrays
|
||||
*/
|
||||
class SinglePoseEstimationParallel : public ParallelLoopBody {
|
||||
public:
|
||||
SinglePoseEstimationParallel(Mat *_markerObjPoints, InputArrayOfArrays _corners,
|
||||
SinglePoseEstimationParallel(Mat& _markerObjPoints, InputArrayOfArrays _corners,
|
||||
InputArray _cameraMatrix, InputArray _distCoeffs,
|
||||
OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs)
|
||||
Mat& _rvecs, Mat& _tvecs)
|
||||
: markerObjPoints(_markerObjPoints), corners(_corners), cameraMatrix(_cameraMatrix),
|
||||
distCoeffs(_distCoeffs), rvecs(_rvecs), tvecs(_tvecs) {}
|
||||
|
||||
@@ -832,18 +832,18 @@ class SinglePoseEstimationParallel : public ParallelLoopBody {
|
||||
const int end = range.end;
|
||||
|
||||
for(int i = begin; i < end; i++) {
|
||||
solvePnP(*markerObjPoints, corners.getMat(i), cameraMatrix, distCoeffs, rvecs.getMat(i),
|
||||
tvecs.getMat(i));
|
||||
solvePnP(markerObjPoints, corners.getMat(i), cameraMatrix, distCoeffs,
|
||||
rvecs.at<Vec3d>(0, i), tvecs.at<Vec3d>(0, i));
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
SinglePoseEstimationParallel &operator=(const SinglePoseEstimationParallel &); // to quiet MSVC
|
||||
|
||||
Mat *markerObjPoints;
|
||||
Mat& markerObjPoints;
|
||||
InputArrayOfArrays corners;
|
||||
InputArray cameraMatrix, distCoeffs;
|
||||
OutputArrayOfArrays rvecs, tvecs;
|
||||
Mat& rvecs, tvecs;
|
||||
};
|
||||
|
||||
|
||||
@@ -860,13 +860,10 @@ void estimatePoseSingleMarkers(InputArrayOfArrays _corners, float markerLength,
|
||||
Mat markerObjPoints;
|
||||
_getSingleMarkerObjectPoints(markerLength, markerObjPoints);
|
||||
int nMarkers = (int)_corners.total();
|
||||
_rvecs.create(nMarkers, 1, CV_32FC1);
|
||||
_tvecs.create(nMarkers, 1, CV_32FC1);
|
||||
_rvecs.create(nMarkers, 1, CV_64FC3);
|
||||
_tvecs.create(nMarkers, 1, CV_64FC3);
|
||||
|
||||
for(int i = 0; i < nMarkers; i++) {
|
||||
_rvecs.create(3, 1, CV_64FC1, i, true);
|
||||
_tvecs.create(3, 1, CV_64FC1, i, true);
|
||||
}
|
||||
Mat rvecs = _rvecs.getMat(), tvecs = _tvecs.getMat();
|
||||
|
||||
//// for each marker, calculate its pose
|
||||
// for (int i = 0; i < nMarkers; i++) {
|
||||
@@ -876,8 +873,8 @@ void estimatePoseSingleMarkers(InputArrayOfArrays _corners, float markerLength,
|
||||
|
||||
// this is the parallel call for the previous commented loop (result is equivalent)
|
||||
parallel_for_(Range(0, nMarkers),
|
||||
SinglePoseEstimationParallel(&markerObjPoints, _corners, _cameraMatrix,
|
||||
_distCoeffs, _rvecs, _tvecs));
|
||||
SinglePoseEstimationParallel(markerObjPoints, _corners, _cameraMatrix,
|
||||
_distCoeffs, rvecs, tvecs));
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -515,7 +515,7 @@ void CV_CharucoDiamondDetection::run(int) {
|
||||
}
|
||||
|
||||
// estimate diamond pose
|
||||
vector< Mat > estimatedRvec, estimatedTvec;
|
||||
vector< Vec3d > estimatedRvec, estimatedTvec;
|
||||
aruco::estimatePoseSingleMarkers(diamondCorners, squareLength, cameraMatrix,
|
||||
distCoeffs, estimatedRvec, estimatedTvec);
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ The aruco module provides a specific function, ```estimatePoseBoard()```, to per
|
||||
cv::aruco::detectMarkers(inputImage, board.dictionary, markerCorners, markerIds);
|
||||
// if at least one marker detected
|
||||
if(markerIds.size() > 0) {
|
||||
cv::Mat rvec, tvec;
|
||||
cv::Vec3d rvec, tvec;
|
||||
int valid = cv::aruco::estimatePoseBoard(markerCorners, markerIds, board, cameraMatrix, distCoeffs, rvec, tvec);
|
||||
}
|
||||
```
|
||||
@@ -181,7 +181,7 @@ Finally, a full example of board detection:
|
||||
if (ids.size() > 0) {
|
||||
cv::aruco::drawDetectedMarkers(imageCopy, corners, ids);
|
||||
|
||||
cv::Mat rvec, tvec;
|
||||
cv::Vec3d rvec, tvec;
|
||||
int valid = estimatePoseBoard(corners, ids, board, cameraMatrix, distCoeffs, rvec, tvec);
|
||||
|
||||
// if at least one board marker detected
|
||||
|
||||
@@ -256,7 +256,7 @@ The aruco module provides a function to estimate the poses of all the detected m
|
||||
``` c++
|
||||
Mat cameraMatrix, distCoeffs;
|
||||
...
|
||||
vector< Mat > rvecs, tvecs;
|
||||
vector< Vec3d > rvecs, tvecs;
|
||||
cv::aruco::estimatePoseSingleMarkers(corners, 0.05, cameraMatrix, distCoeffs, rvecs, tvecs);
|
||||
```
|
||||
|
||||
|
||||
@@ -294,7 +294,7 @@ A full example of ChArUco detection with pose estimation:
|
||||
// if at least one charuco corner detected
|
||||
if(charucoIds.size() > 0) {
|
||||
cv::aruco::drawDetectedCornersCharuco(imageCopy, charucoCorners, charucoIds, cv::Scalar(255, 0, 0));
|
||||
cv::Mat rvec, tvec;
|
||||
cv::Vec3d rvec, tvec;
|
||||
bool valid = cv::aruco::estimatePoseCharucoBoard(charucoCorners, charucoIds, board, cameraMatrix, distCoeffs, rvec, tvec);
|
||||
// if charuco pose is valid
|
||||
if(valid)
|
||||
|
||||
@@ -133,7 +133,7 @@ i.e. using the ```estimatePoseSingleMarkers()``` function. For instance:
|
||||
cv::aruco::detectCharucoDiamond(inputImage, markerCorners, markerIds, squareLength / markerLength, diamondCorners, diamondIds);
|
||||
|
||||
// estimate poses
|
||||
std::vector<cv::Mat> rvecs, tvecs;
|
||||
std::vector<cv::Vec3d> rvecs, tvecs;
|
||||
cv::aruco::estimatePoseSingleMarkers(diamondCorners, squareLength, camMatrix, distCoeffs, rvecs, tvecs);
|
||||
|
||||
// draw axis
|
||||
|
||||
Reference in New Issue
Block a user