1
0
mirror of https://github.com/opencv/opencv_contrib.git synced 2025-10-16 22:35:51 +08:00

Merge pull request #2396 from cudawarped:fix_python_cudawarping_cudaarithm

Add python bindings to cudaobjdetect, cudawarping and cudaarithm

* Overload cudawarping functions to generate correct python bindings.
Add python wrapper to convolution funciton.

* Added shift and hog.

* Moved cuda python tests to this repo and added python bindings to SURF.

* Fix SURF documentation and allow meanshiftsegmention to create GpuMat internaly if not passed for python bindings consistency.

* Add correct cuda SURF test case.

* Fix python mog and mog2 python bindings, add tests and  correct cudawarping documentation.

* Updated KeyPoints in cuda::ORB::Convert python wrapper to be an output argument.

* Add changes suggested by alalek

* Added changes suggested by asmorkalov
This commit is contained in:
cudawarped
2020-01-29 09:54:42 +00:00
committed by GitHub
parent 223a3cdab8
commit d7d6360fce
17 changed files with 780 additions and 23 deletions

View File

@@ -162,6 +162,22 @@ public:
std::vector<Point>& found_locations,
std::vector<double>* confidences = NULL) = 0;
CV_WRAP inline void detect(InputArray img,
CV_OUT std::vector<Point>& found_locations,
CV_OUT std::vector<double>& confidences) {
detect(img, found_locations, &confidences);
}
/** @brief Performs object detection without a multi-scale window.
@param img Source image. CV_8UC1 and CV_8UC4 types are supported for now.
@param found_locations Left-top corner points of detected objects boundaries.
*/
CV_WRAP inline void detectWithoutConf(InputArray img,
CV_OUT std::vector<Point>& found_locations) {
detect(img, found_locations, NULL);
}
/** @brief Performs object detection with a multi-scale window.
@param img Source image. See cuda::HOGDescriptor::detect for type limitations.
@@ -172,6 +188,22 @@ public:
std::vector<Rect>& found_locations,
std::vector<double>* confidences = NULL) = 0;
CV_WRAP inline void detectMultiScale(InputArray img,
CV_OUT std::vector<Rect>& found_locations,
CV_OUT std::vector<double>& confidences) {
detectMultiScale(img, found_locations, &confidences);
}
/** @brief Performs object detection with a multi-scale window.
@param img Source image. See cuda::HOGDescriptor::detect for type limitations.
@param found_locations Detected objects boundaries.
*/
CV_WRAP inline void detectMultiScaleWithoutConf(InputArray img,
CV_OUT std::vector<Rect>& found_locations) {
detectMultiScale(img, found_locations, NULL);
}
/** @brief Returns block descriptors computed for the whole image.
@param img Source image. See cuda::HOGDescriptor::detect for type limitations.

View File

@@ -0,0 +1,38 @@
#!/usr/bin/env python
import os
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests, unittest
class cudaobjdetect_test(NewOpenCVTests):
def setUp(self):
super(cudaobjdetect_test, self).setUp()
if not cv.cuda.getCudaEnabledDeviceCount():
self.skipTest("No CUDA-capable device is detected")
@unittest.skipIf('OPENCV_TEST_DATA_PATH' not in os.environ,
"OPENCV_TEST_DATA_PATH is not defined")
def test_hog(self):
img_path = os.environ['OPENCV_TEST_DATA_PATH'] + '/gpu/caltech/image_00000009_0.png'
npMat = cv.cvtColor(cv.imread(img_path),cv.COLOR_BGR2BGRA)
cuMat = cv.cuda_GpuMat(npMat)
cuHog = cv.cuda.HOG_create()
cuHog.setSVMDetector(cuHog.getDefaultPeopleDetector())
loc, conf = cuHog.detect(cuMat)
self.assertTrue(len(loc) == len(conf) and len(loc) > 0 and len(loc[0]) == 2)
loc = cuHog.detectWithoutConf(cuMat)
self.assertTrue(len(loc) > 0 and len(loc[0]) == 2)
loc = cuHog.detectMultiScaleWithoutConf(cuMat)
self.assertTrue(len(loc) > 0 and len(loc[0]) == 4)
cuHog.setGroupThreshold(0)
loc, conf = cuHog.detectMultiScale(cuMat)
self.assertTrue(len(loc) == len(conf) and len(loc) > 0 and len(loc[0]) == 4)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()