1
0
mirror of https://github.com/opencv/opencv_contrib.git synced 2025-10-17 07:04:18 +08:00

UNFINISHED: get rid of pointers

This commit is contained in:
Bence Magyar
2014-09-02 18:03:51 +02:00
parent 3f620fd19f
commit a8a4524ddb
9 changed files with 73 additions and 89 deletions

View File

@@ -127,6 +127,7 @@ public:
* CV_32F is the only supported data type.
* @param [in] dstPC The input point cloud for the scene. It is assumed that the model is registered on the scene. Scene remains static. Expected to have the normals (Nx6). Currently, CV_32F is the only supported data type.
* @param [out] residual The output registration error.
* @param [out] pose Transformation between srcPC and dstPC.
* \return On successful termination, the function returns 0.
*
* \details It is assumed that the model is registered on the scene. Scene remains static, while the model transforms. The output poses transform the models onto the scene. Because of the point to plane minimization, the scene is expected to have the normals available. Expected to have the normals (Nx6).
@@ -139,12 +140,12 @@ public:
* @param [in] srcPC The input point cloud for the model. Expected to have the normals (Nx6). Currently,
* CV_32F is the only supported data type.
* @param [in] dstPC The input point cloud for the scene. Currently, CV_32F is the only supported data type.
* @param [out] poses List output of poses. For more detailed information check out Pose3D.
* @param [in,out] poses Input poses to start with but also list output of poses.
* \return On successful termination, the function returns 0.
*
* \details It is assumed that the model is registered on the scene. Scene remains static, while the model transforms. The output poses transform the models onto the scene. Because of the point to plane minimization, the scene is expected to have the normals available. Expected to have the normals (Nx6).
*/
int registerModelToScene(const Mat& srcPC, const Mat& dstPC, std::vector<Pose3D*>& poses);
int registerModelToScene(const Mat& srcPC, const Mat& dstPC, std::vector<Pose3DPtr>& poses);
private:
float m_tolerance;

View File

@@ -41,6 +41,7 @@
#ifndef __OPENCV_SURFACE_MATCHING_POSE3D_HPP__
#define __OPENCV_SURFACE_MATCHING_POSE3D_HPP__
#include "opencv2/core/cvstd.hpp" // cv::Ptr
#include <vector>
#include <string>
@@ -48,6 +49,13 @@ namespace cv
{
namespace ppf_match_3d
{
class Pose3D;
typedef Ptr<Pose3D> Pose3DPtr;
class PoseCluster3D;
typedef Ptr<PoseCluster3D> PoseCluster3DPtr;
/**
* @class Pose3D
* @brief Class, allowing the storage of a pose. The data structure stores both
@@ -105,7 +113,7 @@ public:
void appendPose(double IncrementalPose[16]);
void printPose();
Pose3D* clone();
Pose3DPtr clone();
int writePose(FILE* f);
int readPose(FILE* f);
@@ -135,7 +143,7 @@ public:
id=0;
}
PoseCluster3D(Pose3D* newPose)
PoseCluster3D(Pose3DPtr newPose)
{
poseList.clear();
poseList.push_back(newPose);
@@ -143,7 +151,7 @@ public:
id=0;
}
PoseCluster3D(Pose3D* newPose, int newId)
PoseCluster3D(Pose3DPtr newPose, int newId)
{
poseList.push_back(newPose);
this->numVotes = newPose->numVotes;
@@ -158,18 +166,19 @@ public:
* in order to preserve the consistency
* \param [in] newPose Pose to add to the cluster
*/
void addPose(Pose3D* newPose);
void addPose(Pose3DPtr newPose);
int writePoseCluster(FILE* f);
int readPoseCluster(FILE* f);
int writePoseCluster(const std::string& FileName);
int readPoseCluster(const std::string& FileName);
std::vector < Pose3D* > poseList;
std::vector<Pose3DPtr> poseList;
int numVotes;
int id;
};
} // namespace ppf_match_3d
} // namespace cv

View File

@@ -86,7 +86,7 @@ typedef struct THash
* ppf_match_3d::PPF3DDetector detector(0.05, 0.05);
* detector.trainModel(pc);
* // Search the model in a given scene
* vector < Pose3D* > results;
* vector<Pose3DPtr> results;
* detector.match(pcTest, results, 1.0/5.0,0.05);
*
*/
@@ -134,7 +134,7 @@ public:
* @param [in] relativeSceneSampleStep The ratio of scene points to be used for the matching after sampling with relativeSceneDistance. For example, if this value is set to 1.0/5.0, every 5th point from the scene is used for pose estimation. This parameter allows an easy trade-off between speed and accuracy of the matching. Increasing the value leads to less points being used and in turn to a faster but less accurate pose computation. Decreasing the value has the inverse effect.
* @param [in] relativeSceneDistance Set the distance threshold relative to the diameter of the model. This parameter is equivalent to relativeSamplingStep in the training stage. This parameter acts like a prior sampling with the relativeSceneSampleStep parameter.
*/
void match(const Mat& scene, std::vector<Pose3D*>& results, const double relativeSceneSampleStep=1.0/5.0, const double relativeSceneDistance=0.03);
void match(const Mat& scene, std::vector<Pose3DPtr> &results, const double relativeSceneSampleStep=1.0/5.0, const double relativeSceneDistance=0.03);
void read(const FileNode& fn);
void write(FileStorage& fs) const;
@@ -162,7 +162,7 @@ private:
bool matchPose(const Pose3D& sourcePose, const Pose3D& targetPose);
int clusterPoses(Pose3D** poseList, int numPoses, std::vector<Pose3D*>& finalPoses);
void clusterPoses(std::vector<Pose3DPtr> poseList, int numPoses, std::vector<Pose3DPtr> &finalPoses);
bool trained;
};