test_ie_models.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500
  1. // This file is part of OpenCV project.
  2. // It is subject to the license terms in the LICENSE file found in the top-level directory
  3. // of this distribution and at http://opencv.org/license.html.
  4. //
  5. // Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
  6. // Third party copyrights are property of their respective owners.
  7. #include "test_precomp.hpp"
  8. #ifdef HAVE_INF_ENGINE
  9. #include <opencv2/core/utils/filesystem.hpp>
  10. //
  11. // Synchronize headers include statements with src/op_inf_engine.hpp
  12. //
  13. //#define INFERENCE_ENGINE_DEPRECATED // turn off deprecation warnings from IE
  14. //there is no way to suppress warnings from IE only at this moment, so we are forced to suppress warnings globally
  15. #if defined(__GNUC__)
  16. #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
  17. #endif
  18. #ifdef _MSC_VER
  19. #pragma warning(disable: 4996) // was declared deprecated
  20. #endif
  21. #if defined(__GNUC__)
  22. #pragma GCC visibility push(default)
  23. #endif
  24. #include <inference_engine.hpp>
  25. #include <ie_icnn_network.hpp>
  26. #include <ie_extension.h>
  27. #if defined(__GNUC__)
  28. #pragma GCC visibility pop
  29. #endif
  30. namespace opencv_test { namespace {
  31. static void initDLDTDataPath()
  32. {
  33. #ifndef WINRT
  34. static bool initialized = false;
  35. if (!initialized)
  36. {
  37. #if INF_ENGINE_RELEASE <= 2018050000
  38. const char* dldtTestDataPath = getenv("INTEL_CVSDK_DIR");
  39. if (dldtTestDataPath)
  40. cvtest::addDataSearchPath(dldtTestDataPath);
  41. #else
  42. const char* omzDataPath = getenv("OPENCV_OPEN_MODEL_ZOO_DATA_PATH");
  43. if (omzDataPath)
  44. cvtest::addDataSearchPath(omzDataPath);
  45. const char* dnnDataPath = getenv("OPENCV_DNN_TEST_DATA_PATH");
  46. if (dnnDataPath)
  47. cvtest::addDataSearchPath(std::string(dnnDataPath) + "/omz_intel_models");
  48. #endif
  49. initialized = true;
  50. }
  51. #endif
  52. }
  53. using namespace cv;
  54. using namespace cv::dnn;
  55. using namespace InferenceEngine;
  56. struct OpenVINOModelTestCaseInfo
  57. {
  58. const char* modelPathFP32;
  59. const char* modelPathFP16;
  60. };
  61. static const std::map<std::string, OpenVINOModelTestCaseInfo>& getOpenVINOTestModels()
  62. {
  63. static std::map<std::string, OpenVINOModelTestCaseInfo> g_models {
  64. #if INF_ENGINE_RELEASE >= 2018050000 && \
  65. INF_ENGINE_RELEASE <= 2020999999 // don't use IRv5 models with 2020.1+
  66. // layout is defined by open_model_zoo/model_downloader
  67. // Downloaded using these parameters for Open Model Zoo downloader (2019R1):
  68. // ./downloader.py -o ${OPENCV_DNN_TEST_DATA_PATH}/omz_intel_models --cache_dir ${OPENCV_DNN_TEST_DATA_PATH}/.omz_cache/ \
  69. // --name face-person-detection-retail-0002,face-person-detection-retail-0002-fp16,age-gender-recognition-retail-0013,age-gender-recognition-retail-0013-fp16,head-pose-estimation-adas-0001,head-pose-estimation-adas-0001-fp16,person-detection-retail-0002,person-detection-retail-0002-fp16,vehicle-detection-adas-0002,vehicle-detection-adas-0002-fp16
  70. { "age-gender-recognition-retail-0013", {
  71. "Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013",
  72. "Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013-fp16"
  73. }},
  74. { "face-person-detection-retail-0002", {
  75. "Retail/object_detection/face_pedestrian/rmnet-ssssd-2heads/0002/dldt/face-person-detection-retail-0002",
  76. "Retail/object_detection/face_pedestrian/rmnet-ssssd-2heads/0002/dldt/face-person-detection-retail-0002-fp16"
  77. }},
  78. { "head-pose-estimation-adas-0001", {
  79. "Transportation/object_attributes/headpose/vanilla_cnn/dldt/head-pose-estimation-adas-0001",
  80. "Transportation/object_attributes/headpose/vanilla_cnn/dldt/head-pose-estimation-adas-0001-fp16"
  81. }},
  82. { "person-detection-retail-0002", {
  83. "Retail/object_detection/pedestrian/hypernet-rfcn/0026/dldt/person-detection-retail-0002",
  84. "Retail/object_detection/pedestrian/hypernet-rfcn/0026/dldt/person-detection-retail-0002-fp16"
  85. }},
  86. { "vehicle-detection-adas-0002", {
  87. "Transportation/object_detection/vehicle/mobilenet-reduced-ssd/dldt/vehicle-detection-adas-0002",
  88. "Transportation/object_detection/vehicle/mobilenet-reduced-ssd/dldt/vehicle-detection-adas-0002-fp16"
  89. }},
  90. #endif
  91. #if INF_ENGINE_RELEASE >= 2020010000
  92. // Downloaded using these parameters for Open Model Zoo downloader (2020.1):
  93. // ./downloader.py -o ${OPENCV_DNN_TEST_DATA_PATH}/omz_intel_models --cache_dir ${OPENCV_DNN_TEST_DATA_PATH}/.omz_cache/ \
  94. // --name person-detection-retail-0013,age-gender-recognition-retail-0013
  95. { "person-detection-retail-0013", { // IRv10
  96. "intel/person-detection-retail-0013/FP32/person-detection-retail-0013",
  97. "intel/person-detection-retail-0013/FP16/person-detection-retail-0013"
  98. }},
  99. { "age-gender-recognition-retail-0013", {
  100. "intel/age-gender-recognition-retail-0013/FP16/age-gender-recognition-retail-0013",
  101. "intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013"
  102. }},
  103. #endif
  104. #if INF_ENGINE_RELEASE >= 2021020000
  105. // OMZ: 2020.2
  106. { "face-detection-0105", {
  107. "intel/face-detection-0105/FP32/face-detection-0105",
  108. "intel/face-detection-0105/FP16/face-detection-0105"
  109. }},
  110. { "face-detection-0106", {
  111. "intel/face-detection-0106/FP32/face-detection-0106",
  112. "intel/face-detection-0106/FP16/face-detection-0106"
  113. }},
  114. #endif
  115. #if INF_ENGINE_RELEASE >= 2021040000
  116. // OMZ: 2021.4
  117. { "person-vehicle-bike-detection-2004", {
  118. "intel/person-vehicle-bike-detection-2004/FP32/person-vehicle-bike-detection-2004",
  119. "intel/person-vehicle-bike-detection-2004/FP16/person-vehicle-bike-detection-2004"
  120. //"intel/person-vehicle-bike-detection-2004/FP16-INT8/person-vehicle-bike-detection-2004"
  121. }},
  122. #endif
  123. };
  124. return g_models;
  125. }
  126. static const std::vector<std::string> getOpenVINOTestModelsList()
  127. {
  128. std::vector<std::string> result;
  129. const std::map<std::string, OpenVINOModelTestCaseInfo>& models = getOpenVINOTestModels();
  130. for (const auto& it : models)
  131. result.push_back(it.first);
  132. return result;
  133. }
  134. inline static std::string getOpenVINOModel(const std::string &modelName, bool isFP16)
  135. {
  136. const std::map<std::string, OpenVINOModelTestCaseInfo>& models = getOpenVINOTestModels();
  137. const auto it = models.find(modelName);
  138. if (it != models.end())
  139. {
  140. OpenVINOModelTestCaseInfo modelInfo = it->second;
  141. if (isFP16 && modelInfo.modelPathFP16)
  142. return std::string(modelInfo.modelPathFP16);
  143. else if (!isFP16 && modelInfo.modelPathFP32)
  144. return std::string(modelInfo.modelPathFP32);
  145. }
  146. return std::string();
  147. }
  148. static inline void genData(const InferenceEngine::TensorDesc& desc, Mat& m, Blob::Ptr& dataPtr)
  149. {
  150. const std::vector<size_t>& dims = desc.getDims();
  151. if (desc.getPrecision() == InferenceEngine::Precision::FP32)
  152. {
  153. m.create(std::vector<int>(dims.begin(), dims.end()), CV_32F);
  154. randu(m, -1, 1);
  155. dataPtr = make_shared_blob<float>(desc, (float*)m.data);
  156. }
  157. else if (desc.getPrecision() == InferenceEngine::Precision::I32)
  158. {
  159. m.create(std::vector<int>(dims.begin(), dims.end()), CV_32S);
  160. randu(m, -100, 100);
  161. dataPtr = make_shared_blob<int>(desc, (int*)m.data);
  162. }
  163. else
  164. {
  165. FAIL() << "Unsupported precision: " << desc.getPrecision();
  166. }
  167. }
  168. void runIE(Target target, const std::string& xmlPath, const std::string& binPath,
  169. std::map<std::string, cv::Mat>& inputsMap, std::map<std::string, cv::Mat>& outputsMap)
  170. {
  171. SCOPED_TRACE("runIE");
  172. std::string device_name;
  173. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
  174. Core ie;
  175. #else
  176. InferenceEnginePluginPtr enginePtr;
  177. InferencePlugin plugin;
  178. #endif
  179. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019030000)
  180. CNNNetwork net = ie.ReadNetwork(xmlPath, binPath);
  181. #else
  182. CNNNetReader reader;
  183. reader.ReadNetwork(xmlPath);
  184. reader.ReadWeights(binPath);
  185. CNNNetwork net = reader.getNetwork();
  186. #endif
  187. ExecutableNetwork netExec;
  188. InferRequest infRequest;
  189. try
  190. {
  191. switch (target)
  192. {
  193. case DNN_TARGET_CPU:
  194. device_name = "CPU";
  195. break;
  196. case DNN_TARGET_OPENCL:
  197. case DNN_TARGET_OPENCL_FP16:
  198. device_name = "GPU";
  199. break;
  200. case DNN_TARGET_MYRIAD:
  201. device_name = "MYRIAD";
  202. break;
  203. case DNN_TARGET_FPGA:
  204. device_name = "FPGA";
  205. break;
  206. default:
  207. CV_Error(Error::StsNotImplemented, "Unknown target");
  208. };
  209. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
  210. auto dispatcher = InferenceEngine::PluginDispatcher({""});
  211. enginePtr = dispatcher.getPluginByDevice(device_name);
  212. #endif
  213. if (target == DNN_TARGET_CPU || target == DNN_TARGET_FPGA)
  214. {
  215. std::string suffixes[] = {"_avx2", "_sse4", ""};
  216. bool haveFeature[] = {
  217. checkHardwareSupport(CPU_AVX2),
  218. checkHardwareSupport(CPU_SSE4_2),
  219. true
  220. };
  221. for (int i = 0; i < 3; ++i)
  222. {
  223. if (!haveFeature[i])
  224. continue;
  225. #ifdef _WIN32
  226. std::string libName = "cpu_extension" + suffixes[i] + ".dll";
  227. #elif defined(__APPLE__)
  228. std::string libName = "libcpu_extension" + suffixes[i] + ".dylib";
  229. #else
  230. std::string libName = "libcpu_extension" + suffixes[i] + ".so";
  231. #endif // _WIN32
  232. try
  233. {
  234. IExtensionPtr extension = make_so_pointer<IExtension>(libName);
  235. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
  236. ie.AddExtension(extension, device_name);
  237. #else
  238. enginePtr->AddExtension(extension, 0);
  239. #endif
  240. break;
  241. }
  242. catch(...) {}
  243. }
  244. // Some of networks can work without a library of extra layers.
  245. }
  246. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
  247. netExec = ie.LoadNetwork(net, device_name);
  248. #else
  249. plugin = InferencePlugin(enginePtr);
  250. netExec = plugin.LoadNetwork(net, {});
  251. #endif
  252. infRequest = netExec.CreateInferRequest();
  253. }
  254. catch (const std::exception& ex)
  255. {
  256. CV_Error(Error::StsAssert, format("Failed to initialize Inference Engine backend: %s", ex.what()));
  257. }
  258. // Fill input blobs.
  259. inputsMap.clear();
  260. BlobMap inputBlobs;
  261. for (auto& it : net.getInputsInfo())
  262. {
  263. const InferenceEngine::TensorDesc& desc = it.second->getTensorDesc();
  264. genData(desc, inputsMap[it.first], inputBlobs[it.first]);
  265. if (cvtest::debugLevel > 0)
  266. {
  267. const std::vector<size_t>& dims = desc.getDims();
  268. std::cout << "Input: '" << it.first << "' precison=" << desc.getPrecision() << " dims=" << dims.size() << " [";
  269. for (auto d : dims)
  270. std::cout << " " << d;
  271. std::cout << "] ocv_mat=" << inputsMap[it.first].size << " of " << typeToString(inputsMap[it.first].type()) << std::endl;
  272. }
  273. }
  274. infRequest.SetInput(inputBlobs);
  275. // Fill output blobs.
  276. outputsMap.clear();
  277. BlobMap outputBlobs;
  278. for (auto& it : net.getOutputsInfo())
  279. {
  280. const InferenceEngine::TensorDesc& desc = it.second->getTensorDesc();
  281. genData(desc, outputsMap[it.first], outputBlobs[it.first]);
  282. if (cvtest::debugLevel > 0)
  283. {
  284. const std::vector<size_t>& dims = desc.getDims();
  285. std::cout << "Output: '" << it.first << "' precison=" << desc.getPrecision() << " dims=" << dims.size() << " [";
  286. for (auto d : dims)
  287. std::cout << " " << d;
  288. std::cout << "] ocv_mat=" << outputsMap[it.first].size << " of " << typeToString(outputsMap[it.first].type()) << std::endl;
  289. }
  290. }
  291. infRequest.SetOutput(outputBlobs);
  292. infRequest.Infer();
  293. }
  294. void runCV(Backend backendId, Target targetId, const std::string& xmlPath, const std::string& binPath,
  295. const std::map<std::string, cv::Mat>& inputsMap,
  296. std::map<std::string, cv::Mat>& outputsMap)
  297. {
  298. SCOPED_TRACE("runOCV");
  299. Net net = readNet(xmlPath, binPath);
  300. for (auto& it : inputsMap)
  301. net.setInput(it.second, it.first);
  302. net.setPreferableBackend(backendId);
  303. net.setPreferableTarget(targetId);
  304. std::vector<String> outNames = net.getUnconnectedOutLayersNames();
  305. if (cvtest::debugLevel > 0)
  306. {
  307. std::cout << "OpenCV output names: " << outNames.size() << std::endl;
  308. for (auto name : outNames)
  309. std::cout << "- " << name << std::endl;
  310. }
  311. std::vector<Mat> outs;
  312. net.forward(outs, outNames);
  313. outputsMap.clear();
  314. EXPECT_EQ(outs.size(), outNames.size());
  315. for (int i = 0; i < outs.size(); ++i)
  316. {
  317. EXPECT_TRUE(outputsMap.insert({outNames[i], outs[i]}).second);
  318. }
  319. }
  320. typedef TestWithParam<tuple< tuple<Backend, Target>, std::string> > DNNTestOpenVINO;
  321. TEST_P(DNNTestOpenVINO, models)
  322. {
  323. initDLDTDataPath();
  324. const Backend backendId = get<0>(get<0>(GetParam()));
  325. const Target targetId = get<1>(get<0>(GetParam()));
  326. std::string modelName = get<1>(GetParam());
  327. ASSERT_FALSE(backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) <<
  328. "Inference Engine backend is required";
  329. #if INF_ENGINE_VER_MAJOR_GE(2021030000)
  330. if (targetId == DNN_TARGET_MYRIAD && (false
  331. || modelName == "person-detection-retail-0013" // ncDeviceOpen:1013 Failed to find booted device after boot
  332. || modelName == "age-gender-recognition-retail-0013" // ncDeviceOpen:1013 Failed to find booted device after boot
  333. || modelName == "face-detection-0105" // get_element_type() must be called on a node with exactly one output
  334. || modelName == "face-detection-0106" // get_element_type() must be called on a node with exactly one output
  335. || modelName == "person-vehicle-bike-detection-2004" // 2021.4+: ncDeviceOpen:1013 Failed to find booted device after boot
  336. )
  337. )
  338. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  339. if (targetId == DNN_TARGET_OPENCL && (false
  340. || modelName == "face-detection-0106" // Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported
  341. )
  342. )
  343. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  344. if (targetId == DNN_TARGET_OPENCL_FP16 && (false
  345. || modelName == "face-detection-0106" // Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported
  346. )
  347. )
  348. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  349. #endif
  350. #if INF_ENGINE_VER_MAJOR_GE(2020020000)
  351. if (targetId == DNN_TARGET_MYRIAD && backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
  352. {
  353. if (modelName == "person-detection-retail-0013") // IRv10
  354. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  355. }
  356. #endif
  357. #if INF_ENGINE_VER_MAJOR_EQ(2020040000)
  358. if (targetId == DNN_TARGET_MYRIAD && modelName == "person-detection-retail-0002") // IRv5, OpenVINO 2020.4 regression
  359. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  360. #endif
  361. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
  362. setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
  363. else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
  364. setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
  365. else
  366. FAIL() << "Unknown backendId";
  367. bool isFP16 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD);
  368. const std::string modelPath = getOpenVINOModel(modelName, isFP16);
  369. ASSERT_FALSE(modelPath.empty()) << modelName;
  370. std::string xmlPath = findDataFile(modelPath + ".xml", false);
  371. std::string binPath = findDataFile(modelPath + ".bin", false);
  372. std::map<std::string, cv::Mat> inputsMap;
  373. std::map<std::string, cv::Mat> ieOutputsMap, cvOutputsMap;
  374. // Single Myriad device cannot be shared across multiple processes.
  375. if (targetId == DNN_TARGET_MYRIAD)
  376. resetMyriadDevice();
  377. if (targetId == DNN_TARGET_HDDL)
  378. releaseHDDLPlugin();
  379. EXPECT_NO_THROW(runIE(targetId, xmlPath, binPath, inputsMap, ieOutputsMap)) << "runIE";
  380. if (targetId == DNN_TARGET_MYRIAD)
  381. resetMyriadDevice();
  382. EXPECT_NO_THROW(runCV(backendId, targetId, xmlPath, binPath, inputsMap, cvOutputsMap)) << "runCV";
  383. double eps = 0;
  384. #if INF_ENGINE_VER_MAJOR_GE(2020010000)
  385. if (targetId == DNN_TARGET_CPU && checkHardwareSupport(CV_CPU_AVX_512F))
  386. eps = 1e-5;
  387. #endif
  388. #if INF_ENGINE_VER_MAJOR_GE(2021030000)
  389. if (targetId == DNN_TARGET_CPU && modelName == "face-detection-0105")
  390. eps = 2e-4;
  391. #endif
  392. #if INF_ENGINE_VER_MAJOR_GE(2021040000)
  393. if (targetId == DNN_TARGET_CPU && modelName == "person-vehicle-bike-detection-2004")
  394. eps = 1e-6;
  395. #endif
  396. EXPECT_EQ(ieOutputsMap.size(), cvOutputsMap.size());
  397. for (auto& srcIt : ieOutputsMap)
  398. {
  399. auto dstIt = cvOutputsMap.find(srcIt.first);
  400. CV_Assert(dstIt != cvOutputsMap.end());
  401. double normInf = cvtest::norm(srcIt.second, dstIt->second, cv::NORM_INF);
  402. EXPECT_LE(normInf, eps) << "output=" << srcIt.first;
  403. }
  404. }
  405. INSTANTIATE_TEST_CASE_P(/**/,
  406. DNNTestOpenVINO,
  407. Combine(dnnBackendsAndTargetsIE(),
  408. testing::ValuesIn(getOpenVINOTestModelsList())
  409. )
  410. );
  411. typedef TestWithParam<Target> DNNTestHighLevelAPI;
  412. TEST_P(DNNTestHighLevelAPI, predict)
  413. {
  414. initDLDTDataPath();
  415. Target target = (dnn::Target)(int)GetParam();
  416. bool isFP16 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD);
  417. const std::string modelName = "age-gender-recognition-retail-0013";
  418. const std::string modelPath = getOpenVINOModel(modelName, isFP16);
  419. ASSERT_FALSE(modelPath.empty()) << modelName;
  420. std::string xmlPath = findDataFile(modelPath + ".xml");
  421. std::string binPath = findDataFile(modelPath + ".bin");
  422. Model model(xmlPath, binPath);
  423. Mat frame = imread(findDataFile("dnn/googlenet_1.png"));
  424. std::vector<Mat> outs;
  425. model.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE);
  426. model.setPreferableTarget(target);
  427. model.predict(frame, outs);
  428. Net net = readNet(xmlPath, binPath);
  429. Mat input = blobFromImage(frame, 1.0, Size(62, 62));
  430. net.setInput(input);
  431. net.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE);
  432. net.setPreferableTarget(target);
  433. std::vector<String> outNames = net.getUnconnectedOutLayersNames();
  434. std::vector<Mat> refs;
  435. net.forward(refs, outNames);
  436. CV_Assert(refs.size() == outs.size());
  437. for (int i = 0; i < refs.size(); ++i)
  438. normAssert(outs[i], refs[i]);
  439. }
  440. INSTANTIATE_TEST_CASE_P(/**/,
  441. DNNTestHighLevelAPI, testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE))
  442. );
  443. }}
  444. #endif // HAVE_INF_ENGINE