1
0
mirror of https://github.com/opencv/opencv_contrib.git synced 2025-10-20 04:25:42 +08:00

add libs hint on OS X in for cmake

This commit is contained in:
Wangyida
2015-08-26 21:24:33 +08:00
parent 97d49a8834
commit 9852e8096e
40 changed files with 105951 additions and 215 deletions

30
modules/cnn_3dobj/samples/demo_classify.cpp Normal file → Executable file
View File

@@ -84,16 +84,15 @@ void listDir(const char *path, std::vector<string>& files, bool r)
int main(int argc, char** argv)
{
const String keys = "{help | | This sample will extract featrues from reference images and target image for classification. You can add a mean_file if there little variance in data such as human faces, otherwise it is not so useful}"
"{src_dir | ../data/images_all/ | Source direction of the images ready for being used for extract feature as gallery.}"
"{caffemodel | ../../testdata/cv/3d_triplet_iter_30000.caffemodel | caffe model for feature exrtaction.}"
"{network_forIMG | ../../testdata/cv/3d_triplet_testIMG.prototxt | Network definition file used for extracting feature from a single image and making a classification}"
"{mean_file | no | The mean file generated by Caffe from all gallery images, this could be used for mean value substraction from all images. If you want to use the mean file, you can set this as ../data/images_mean/triplet_mean.binaryproto.}"
"{target_img | ../data/images_all/1_8.png | Path of image waiting to be classified.}"
"{feature_blob | feat | Name of layer which will represent as the feature, in this network, ip1 or feat is well.}"
"{num_candidate | 15 | Number of candidates in gallery as the prediction result.}"
"{device | CPU | Device type: CPU or GPU}"
"{dev_id | 0 | Device id}";
"{src_dir | ../data/images_all/ | Source direction of the images ready for being used for extract feature as gallery.}"
"{caffemodel | ../../testdata/cv/3d_triplet_iter_30000.caffemodel | caffe model for feature exrtaction.}"
"{network_forIMG | ../../testdata/cv/3d_triplet_testIMG.prototxt | Network definition file used for extracting feature from a single image and making a classification}"
"{mean_file | no | The mean file generated by Caffe from all gallery images, this could be used for mean value substraction from all images. If you want to use the mean file, you can set this as ../data/images_mean/triplet_mean.binaryproto.}"
"{target_img | ../data/images_all/4_78.png | Path of image waiting to be classified.}"
"{feature_blob | feat | Name of layer which will represent as the feature, in this network, ip1 or feat is well.}"
"{num_candidate | 15 | Number of candidates in gallery as the prediction result.}"
"{device | CPU | Device type: CPU or GPU}"
"{dev_id | 0 | Device id}";
/* get parameters from comand line */
cv::CommandLineParser parser(argc, argv, keys);
parser.about("Feature extraction and classification");
@@ -111,18 +110,15 @@ int main(int argc, char** argv)
int num_candidate = parser.get<int>("num_candidate");
string device = parser.get<string>("device");
int dev_id = parser.get<int>("dev_id");
/* Initialize a net work with Device */
cv::cnn_3dobj::descriptorExtractor descriptor(device);
std::cout << "Using" << descriptor.getDeviceType() << std::endl;
/* Load net with the caffe trained net work parameter and structure */
if (strcmp(mean_file.c_str(), "no") == 0)
descriptor.loadNet(network_forIMG, caffemodel);
else
descriptor.loadNet(network_forIMG, caffemodel, mean_file);
std::vector<string> name_gallery;
/* List the file names under a given path */
listDir(src_dir.c_str(), name_gallery, false);
for (unsigned int i = 0; i < name_gallery.size(); i++)
@@ -135,16 +131,12 @@ int main(int argc, char** argv)
{
img_gallery.push_back(cv::imread(name_gallery[i], -1));
}
/* Extract feature from a set of images */
descriptor.extract(img_gallery, feature_reference, feature_blob);
std::cout << std::endl << "---------- Prediction for " << target_img << " ----------" << std::endl;
cv::Mat img = cv::imread(target_img, -1);
std::cout << std::endl << "---------- Features of gallery images ----------" << std::endl;
std::vector<std::pair<string, float> > prediction;
/* Print features of the reference images. */
for (unsigned int i = 0; i < feature_reference.rows; i++)
std::cout << feature_reference.row(i) << endl;
@@ -155,10 +147,8 @@ int main(int argc, char** argv)
std::vector<std::vector<cv::DMatch> > matches;
/* Have a KNN match on the target and reference images. */
matcher.knnMatch(feature_test, feature_reference, matches, num_candidate);
/* Print feature of the target image waiting to be classified. */
std::cout << std::endl << "---------- Features of target image: " << target_img << "----------" << endl << feature_test << std::endl;
/* Print the top N prediction. */
std::cout << std::endl << "---------- Prediction result(Distance - File Name in Gallery) ----------" << std::endl;
for (size_t i = 0; i < matches[0].size(); ++i)
@@ -166,4 +156,4 @@ int main(int argc, char** argv)
std::cout << i << " - " << std::fixed << std::setprecision(2) << name_gallery[matches[0][i].trainIdx] << " - \"" << matches[0][i].distance << "\"" << std::endl;
}
return 0;
}
}