1
0
mirror of https://github.com/opencv/opencv_contrib.git synced 2025-10-17 15:26:00 +08:00

Fix aruco detect_markers tutorial and code example

This commit is contained in:
Sergei Shutov
2022-12-21 17:23:03 +02:00
parent e22e185a35
commit 1b130dfd45
2 changed files with 44 additions and 10 deletions

View File

@@ -156,13 +156,28 @@ int main(int argc, char *argv[]) {
vector< int > ids;
vector< vector< Point2f > > corners, rejected;
vector< Vec3d > rvecs, tvecs;
// detect markers and estimate pose
detector.detectMarkers(image, corners, ids, rejected);
if(estimatePose && ids.size() > 0)
aruco::estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs,
tvecs);
int nMarkers = corners.size();
vector<Vec3d> rvecs(nMarkers), tvecs(nMarkers);
if(estimatePose && ids.size() > 0) {
// float markerLength = 0.05;
// Set coordinate system
cv::Mat objPoints(4, 1, CV_32FC3);
objPoints.ptr<Vec3f>(0)[0] = Vec3f(-markerLength/2.f, markerLength/2.f, 0);
objPoints.ptr<Vec3f>(0)[1] = Vec3f(markerLength/2.f, markerLength/2.f, 0);
objPoints.ptr<Vec3f>(0)[2] = Vec3f(markerLength/2.f, -markerLength/2.f, 0);
objPoints.ptr<Vec3f>(0)[3] = Vec3f(-markerLength/2.f, -markerLength/2.f, 0);
// Calculate pose for each marker
for (int i = 0; i < nMarkers; i++) {
solvePnP(objPoints, corners.at(i), camMatrix, distCoeffs, rvecs.at(i), tvecs.at(i));
}
}
double currentTime = ((double)getTickCount() - tick) / getTickFrequency();
totalTime += currentTime;

View File

@@ -273,15 +273,21 @@ The camera pose relative to the marker is a 3d transformation from the marker co
camera coordinate system. It is specified by rotation and translation vectors (see `solvePnP()` function for more
information).
The aruco module provides a function to estimate the poses of all the detected markers:
@code{.cpp}
cv::Mat cameraMatrix, distCoeffs;
// You can read camera parameters from tutorial_camera_params.yml
readCameraParameters(cameraParamsFilename, cameraMatrix, distCoeffs); // This function is implemented in aruco_samples_utility.hpp
std::vector<cv::Vec3d> rvecs, tvecs;
cv::aruco::estimatePoseSingleMarkers(markerCorners, 0.05, cameraMatrix, distCoeffs, rvecs, tvecs);
// Set coordinate system
cv::Mat objPoints(4, 1, CV_32FC3);
...
// Calculate pose for each marker
for (int i = 0; i < nMarkers; i++) {
solvePnP(objPoints, corners.at(i), cameraMatrix, distCoeffs, rvecs.at(i), tvecs.at(i));
}
@endcode
- The `markerCorners` parameter is the vector of marker corners returned by the `detectMarkers()` function.
@@ -321,6 +327,7 @@ cv::VideoCapture inputVideo;
inputVideo.open(0);
cv::Mat cameraMatrix, distCoeffs;
float markerLength = 0.05;
// You can read camera parameters from tutorial_camera_params.yml
readCameraParameters(cameraParamsFilename, cameraMatrix, distCoeffs); // This function is implemented in aruco_samples_utility.hpp
@@ -340,10 +347,22 @@ while (inputVideo.grab()) {
// If at least one marker detected
if (ids.size() > 0) {
std::vector<cv::Vec3d> rvecs, tvecs;
cv::aruco::drawDetectedMarkers(imageCopy, corners, ids);
cv::aruco::estimatePoseSingleMarkers(corners, 0.05, cameraMatrix, distCoeffs, rvecs, tvecs);
int nMarkers = corners.size();
std::vector<cv::Vec3d> rvecs(nMarkers), tvecs(nMarkers);
// Set coordinate system
cv::Mat objPoints(4, 1, CV_32FC3);
objPoints.ptr<cv::Vec3f>(0)[0] = cv::Vec3f(-markerLength/2.f, markerLength/2.f, 0);
objPoints.ptr<cv::Vec3f>(0)[1] = cv::Vec3f(markerLength/2.f, markerLength/2.f, 0);
objPoints.ptr<cv::Vec3f>(0)[2] = cv::Vec3f(markerLength/2.f, -markerLength/2.f, 0);
objPoints.ptr<cv::Vec3f>(0)[3] = cv::Vec3f(-markerLength/2.f, -markerLength/2.f, 0);
// Calculate pose for each marker
for (int i = 0; i < nMarkers; i++) {
solvePnP(objPoints, corners.at(i), cameraMatrix, distCoeffs, rvecs.at(i), tvecs.at(i));
}
// Draw axis for each marker
for(unsigned int i = 0; i < ids.size(); i++)