diff --git a/modules/dnn/CMakeLists.txt b/modules/dnn/CMakeLists.txt index 1c580439a..0dd5f379e 100644 --- a/modules/dnn/CMakeLists.txt +++ b/modules/dnn/CMakeLists.txt @@ -52,12 +52,10 @@ endif() # ---------------------------------------------------------------------------- # Download pre-trained models for complex testing on GoogLeNet and AlexNet # ---------------------------------------------------------------------------- -OCV_OPTION(${the_module}_DOWNLOAD_CAFFE_MODELS "Use GoogLeNet Caffe model for testing" OFF IF BUILD_TESTS AND PYTHON2_EXECUTABLE AND DEFINED ENV{OPENCV_TEST_DATA_PATH}) -if(BUILD_TESTS AND PYTHON2_EXECUTABLE AND DEFINED ENV{OPENCV_TEST_DATA_PATH} - AND (DOWNLOAD_EXTERNAL_TEST_DATA OR ${the_module}_DOWNLOAD_CAFFE_MODELS)) +OCV_OPTION(${the_module}_DOWNLOAD_CAFFE_MODELS "Use GoogLeNet Caffe model for testing" OFF IF BUILD_TESTS AND DEFINED ENV{OPENCV_TEST_DATA_PATH}) +if(BUILD_TESTS AND DEFINED ENV{OPENCV_TEST_DATA_PATH} AND (DOWNLOAD_EXTERNAL_TEST_DATA OR ${the_module}_DOWNLOAD_CAFFE_MODELS)) add_custom_command( TARGET opencv_test_${name} POST_BUILD - COMMAND ${PYTHON2_EXECUTABLE} download_model.py test_models.json - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/scripts ) + COMMAND ${CMAKE_COMMAND} -Dmodel=GoogleNet -P ${CMAKE_CURRENT_SOURCE_DIR}/cmake/download_model.cmake) add_definitions(-DENABLE_CAFFE_MODEL_TESTS=1) endif() @@ -68,21 +66,29 @@ OCV_OPTION(${the_module}_BUILD_TORCH_IMPORTER "Build Torch model importer (exper if(${the_module}_BUILD_TORCH_IMPORTER) add_definitions(-DENABLE_TORCH_IMPORTER=1) ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4702 /wd4127 /wd4267) #supress warnings in original torch files + + if(NOT DEFINED HAVE_TORCH_EXE) + execute_process(COMMAND th ${CMAKE_CURRENT_SOURCE_DIR}/testdata/dnn/torch/torch_nn_echo.lua RESULT_VARIABLE TORCH_EXE_STATUS) + set(HAVE_TORCH_EXE OFF) + if(${TORCH_EXE_STATUS} EQUAL 0) + set(HAVE_TORCH_EXE ON) + endif() + set(HAVE_TORCH_EXE ${HAVE_TORCH_EXE} CACHE INTERNAL "Have torch binary") + endif() endif() # ---------------------------------------------------------------------------- # Generating test data for Torch importer # ---------------------------------------------------------------------------- -OCV_OPTION(${the_module}_BUILD_TORCH_TESTS "Build Torch tests (installed torch7 with nn module is required)" ON IF BUILD_TESTS AND ${the_module}_BUILD_TORCH_IMPORTER) +OCV_OPTION(${the_module}_BUILD_TORCH_TESTS "Build Torch tests (installed torch7 with nn module is required)" ON IF BUILD_TESTS AND ${the_module}_BUILD_TORCH_IMPORTER AND HAVE_TORCH_EXE) if(${the_module}_BUILD_TORCH_TESTS) if(NOT DEFINED ENV{OPENCV_TEST_DATA_PATH}) message(FATAL_ERROR "OPENCV_TEST_DATA_PATH environment variable was not specified") endif() - execute_process(COMMAND th ${CMAKE_CURRENT_SOURCE_DIR}/testdata/dnn/torch/torch_nn_echo.lua RESULT_VARIABLE TORCH_STATUS) - if(TORCH_STATUS) - message(FATAL_ERROR "Torch executable \"th\" not found (status: ${TORCH_STATUS}) or nn module not found") + if(NOT HAVE_TORCH_EXE) + message(FATAL_ERROR "Torch executable \"th\" not found or nn module not found") endif() add_custom_command( TARGET opencv_test_${name} POST_BUILD diff --git a/modules/dnn/cmake/download_model.cmake b/modules/dnn/cmake/download_model.cmake new file mode 100644 index 000000000..f92e59269 --- /dev/null +++ b/modules/dnn/cmake/download_model.cmake @@ -0,0 +1,31 @@ +set(GoogleNet_url "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel") +set(GoogleNet_dst "$ENV{OPENCV_TEST_DATA_PATH}/dnn/bvlc_googlenet.caffemodel") +set(GoogleNet_sha "405fc5acd08a3bb12de8ee5e23a96bec22f08204") + +set(VGG16_url "http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_16_layers.caffemodel") +set(GG16_dst "$ENV{OPENCV_TEST_DATA_PATH}/dnn/VGG_ILSVRC_16_layers.caffemodel") + +set(voc-fcn32s_url "http://dl.caffe.berkeleyvision.org/fcn32s-heavy-pascal.caffemodel") +set(voc-fcn32s_dst "$ENV{OPENCV_TEST_DATA_PATH}/dnn/fcn32s-heavy-pascal.caffemodel") + +if(NOT model) + set(model "GoogleNet") +endif() + +message(STATUS "Downloading ${${model}_url} to ${${model}_dst}") + +if(NOT EXISTS ${${model}_dst}) + if(${${model}_sha}) + file(DOWNLOAD ${${model}_url} ${${model}_dst} SHOW_PROGRESS EXPECTED_HASH SHA1=${${model}_sha} STATUS status_vec) + else() + file(DOWNLOAD ${${model}_url} ${${model}_dst} SHOW_PROGRESS STATUS status_vec) + endif() + + list(GET status_vec 0 status) + list(GET status_vec 1 status_msg) + if(status EQUAL 0) + message(STATUS "Ok! ${status_msg}") + else() + message(STATUS "Fail! ${status_msg}") + endif() +endif() diff --git a/modules/dnn/scripts/download_model.py b/modules/dnn/scripts/download_model.py deleted file mode 100644 index d2951f5ea..000000000 --- a/modules/dnn/scripts/download_model.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python -import os -import sys -import time -import urllib -import hashlib -import argparse -import json - - -def reporthook(count, block_size, total_size): - """ - From http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/ - """ - global start_time - global prev_duration - if count == 0: - start_time = time.time() - prev_duration = -1 - return - duration = max(1, time.time() - start_time) - if int(duration) == int(prev_duration): - return - - progress_size = int(count * block_size) - speed = int(progress_size / (1024 * duration)) - percent = int(count * block_size * 100 / total_size) - sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" % - (percent, progress_size / (1024 * 1024), speed, duration)) - sys.stdout.flush() - prev_duration = duration - - -# Function for checking SHA1. -def model_checks_out(filename, sha1): - with open(filename, 'r') as f: - return hashlib.sha1(f.read()).hexdigest() == sha1 - -def model_download(filename, url, sha1): - # Check if model exists. - if os.path.exists(filename) and model_checks_out(filename, sha1): - print("Model {} already exists.".format(filename)) - return - - # Download and verify model. - urllib.urlretrieve(url, filename, reporthook) - print model_checks_out(filename, sha1) - if not model_checks_out(filename, sha1): - print("ERROR: model {} did not download correctly!".format(url)) - sys.exit(1) - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description="Downloading trained model binaries.") - parser.add_argument("download_list") - args = parser.parse_args() - - test_dir = os.environ.get("OPENCV_TEST_DATA_PATH") - if not test_dir: - print "ERROR: OPENCV_TEST_DATA_PATH environment not specified" - sys.exit(1) - - try: - with open(args.download_list, 'r') as f: - models_to_download = json.load(f) - except: - print "ERROR: Can't pasrse {}".format(args.download_list) - sys.exit(1) - - for model_name in models_to_download: - model = models_to_download[model_name] - - dst_dir = os.path.join(test_dir, os.path.dirname(model['file'])) - dst_file = os.path.join(test_dir, model['file']) - if not os.path.exists(dst_dir): - print "ERROR: Can't find module testdata path '{}'".format(dst_dir) - sys.exit(1) - - print "Downloading model '{}' to {} from {} ...".format(model_name, dst_file, model['url']) - model_download(dst_file, model['url'], model['sha1']) \ No newline at end of file diff --git a/modules/dnn/scripts/test_models.json b/modules/dnn/scripts/test_models.json deleted file mode 100644 index 47b131531..000000000 --- a/modules/dnn/scripts/test_models.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "googlenet": { - "file": "dnn/bvlc_googlenet.caffemodel", - "url": "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel", - "sha1": "405fc5acd08a3bb12de8ee5e23a96bec22f08204" - } -} \ No newline at end of file