test_halide_layers.cpp 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978
  1. // This file is part of OpenCV project.
  2. // It is subject to the license terms in the LICENSE file found in the top-level directory
  3. // of this distribution and at http://opencv.org/license.html.
  4. //
  5. // Copyright (C) 2017-2019, Intel Corporation, all rights reserved.
  6. // Third party copyrights are property of their respective owners.
  7. // This tests doesn't require any external data. They just compare outputs of
  8. // layers using different computation backends. Input and parameters are random.
  9. #include "test_precomp.hpp"
  10. namespace opencv_test { namespace {
  11. using namespace cv;
  12. using namespace cv::dnn;
  13. using namespace testing;
  14. static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool skipCheck = false, bool randInput = true, double l1 = 0.0, double lInf = 0.0)
  15. {
  16. DNNTestLayer::checkBackend(backendId, targetId);
  17. if (randInput)
  18. randu(input, -1.0f, 1.0f);
  19. net.setInput(input);
  20. net.setPreferableBackend(DNN_BACKEND_OPENCV);
  21. Mat outputDefault = net.forward().clone();
  22. net.setPreferableBackend(backendId);
  23. net.setPreferableTarget(targetId);
  24. Mat outputHalide = net.forward().clone();
  25. if (skipCheck)
  26. return;
  27. double default_l1, default_lInf;
  28. DNNTestLayer::getDefaultThresholds(backendId, targetId, &default_l1, &default_lInf);
  29. if (l1 == 0.0)
  30. l1 = default_l1;
  31. if (lInf == 0.0)
  32. lInf = default_lInf;
  33. normAssert(outputDefault, outputHalide, "", l1, lInf);
  34. if (cvtest::debugLevel > 0 || testing::Test::HasFailure())
  35. {
  36. std::cout << "l1=" << l1 << " lInf=" << lInf << std::endl;
  37. std::cout << outputDefault.reshape(1, outputDefault.total()).t() << std::endl;
  38. std::cout << outputHalide.reshape(1, outputDefault.total()).t() << std::endl;
  39. }
  40. }
  41. static void test(LayerParams& params, Mat& input, Backend backendId, Target targetId, bool skipCheck = false, double l1 = 0.0, double lInf = 0.0)
  42. {
  43. Net net;
  44. net.addLayerToPrev(params.name, params.type, params);
  45. test(input, net, backendId, targetId, skipCheck, true, l1, lInf);
  46. }
  47. static inline testing::internal::ParamGenerator<tuple<Backend, Target> > dnnBackendsAndTargetsWithHalide()
  48. {
  49. return dnnBackendsAndTargets(true, true, false); // OpenCV/CPU is used as reference
  50. }
  51. class Test_Halide_layers : public DNNTestLayer {};
  52. ////////////////////////////////////////////////////////////////////////////////
  53. // Padding
  54. ////////////////////////////////////////////////////////////////////////////////
  55. TEST_P(Test_Halide_layers, Padding)
  56. {
  57. static const int kNumRuns = 10;
  58. std::vector<int> paddings(8);
  59. cv::RNG& rng = cv::theRNG();
  60. for (int t = 0; t < kNumRuns; ++t)
  61. {
  62. for (int i = 0; i < paddings.size(); ++i)
  63. paddings[i] = rng(5);
  64. LayerParams lp;
  65. lp.set("paddings", DictValue::arrayInt<int*>(&paddings[0], paddings.size()));
  66. lp.type = "Padding";
  67. lp.name = "testLayer";
  68. int sz[] = {1 + (int)rng(10), 1 + (int)rng(10), 1 + (int)rng(10), 1 + (int)rng(10)};
  69. Mat input(4, &sz[0], CV_32F);
  70. test(lp, input, backend, target);
  71. }
  72. }
  73. ////////////////////////////////////////////////////////////////////////////////
  74. // Convolution
  75. ////////////////////////////////////////////////////////////////////////////////
  76. typedef TestWithParam<tuple<Vec3i, Size, Size, Size, Size, Size, bool, tuple<Backend, Target> > > Convolution;
  77. TEST_P(Convolution, Accuracy)
  78. {
  79. int inChannels = get<0>(GetParam())[0];
  80. int outChannels = get<0>(GetParam())[1];
  81. int group = get<0>(GetParam())[2];
  82. Size inSize = get<1>(GetParam());
  83. Size kernel = get<2>(GetParam());
  84. Size stride = get<3>(GetParam());
  85. Size pad = get<4>(GetParam());
  86. Size dilation = get<5>(GetParam());
  87. bool hasBias = get<6>(GetParam());
  88. Backend backendId = get<0>(get<7>(GetParam()));
  89. Target targetId = get<1>(get<7>(GetParam()));
  90. bool skipCheck = false;
  91. int sz[] = {outChannels, inChannels / group, kernel.height, kernel.width};
  92. Mat weights(4, &sz[0], CV_32F);
  93. randu(weights, -1.0f, 1.0f);
  94. LayerParams lp;
  95. lp.set("kernel_w", kernel.width);
  96. lp.set("kernel_h", kernel.height);
  97. lp.set("pad_w", pad.width);
  98. lp.set("pad_h", pad.height);
  99. lp.set("stride_w", stride.width);
  100. lp.set("stride_h", stride.height);
  101. lp.set("dilation_w", dilation.width);
  102. lp.set("dilation_h", dilation.height);
  103. lp.set("num_output", outChannels);
  104. lp.set("group", group);
  105. lp.set("bias_term", hasBias);
  106. lp.type = "Convolution";
  107. lp.name = "testLayer";
  108. lp.blobs.push_back(weights);
  109. if (hasBias)
  110. {
  111. Mat bias(1, outChannels, CV_32F);
  112. randu(bias, -1.0f, 1.0f);
  113. lp.blobs.push_back(bias);
  114. }
  115. int inpSz[] = {1, inChannels, inSize.height, inSize.width};
  116. Mat input(4, &inpSz[0], CV_32F);
  117. test(lp, input, backendId, targetId, skipCheck);
  118. if (skipCheck)
  119. throw SkipTestException("Skip checks in unstable test");
  120. }
  121. INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Convolution, Combine(
  122. /*in channels, out channels, group*/
  123. Values(Vec3i(6, 4, 1), Vec3i(6, 9, 1),
  124. Vec3i(6, 4, 2), Vec3i(6, 9, 3)),
  125. /*in size*/ Values(Size(5, 6)),
  126. /*kernel*/ Values(Size(3, 1), Size(1, 3)),
  127. /*stride*/ Values(Size(1, 1), Size(2, 2)),
  128. /*pad*/ Values(Size(1, 0), Size(0, 1)),
  129. /*dilation*/ Values(Size(1, 1), Size(2, 2)),
  130. /*has bias*/ Bool(),
  131. dnnBackendsAndTargetsWithHalide()
  132. ));
  133. ////////////////////////////////////////////////////////////////////////////////
  134. // Deconvolution
  135. ////////////////////////////////////////////////////////////////////////////////
  136. typedef TestWithParam<tuple<Vec3i, Size, Size, Size, Size, Vec4i, bool, tuple<Backend, Target> > > Deconvolution;
  137. TEST_P(Deconvolution, Accuracy)
  138. {
  139. int inChannels = get<0>(GetParam())[0];
  140. int outChannels = get<0>(GetParam())[1];
  141. int group = get<0>(GetParam())[2];
  142. Size inSize = get<1>(GetParam());
  143. Size kernel = get<2>(GetParam());
  144. Size pad = get<3>(GetParam());
  145. Size dilation = get<4>(GetParam());
  146. Size stride = Size(get<5>(GetParam())[0], get<5>(GetParam())[1]);
  147. Size adjPad = Size(get<5>(GetParam())[2], get<5>(GetParam())[3]);
  148. bool hasBias = get<6>(GetParam());
  149. Backend backendId = get<0>(get<7>(GetParam()));
  150. Target targetId = get<1>(get<7>(GetParam()));
  151. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
  152. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
  153. && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
  154. && inChannels == 6 && outChannels == 4 && group == 1
  155. && kernel == Size(1, 3) && pad == Size(1, 0)
  156. && stride == Size(1, 1) && dilation == Size(1, 1))
  157. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
  158. #endif
  159. if (targetId == DNN_TARGET_CUDA_FP16)
  160. applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA_FP16);
  161. int sz[] = {inChannels, outChannels / group, kernel.height, kernel.width};
  162. Mat weights(4, &sz[0], CV_32F);
  163. randu(weights, -1.0f, 1.0f);
  164. LayerParams lp;
  165. lp.set("kernel_w", kernel.width);
  166. lp.set("kernel_h", kernel.height);
  167. lp.set("pad_w", pad.width);
  168. lp.set("pad_h", pad.height);
  169. lp.set("stride_w", stride.width);
  170. lp.set("stride_h", stride.height);
  171. lp.set("dilation_w", dilation.width);
  172. lp.set("dilation_h", dilation.height);
  173. lp.set("adj_w", adjPad.width);
  174. lp.set("adj_h", adjPad.height);
  175. lp.set("num_output", outChannels);
  176. lp.set("group", group);
  177. lp.set("bias_term", hasBias);
  178. lp.type = "Deconvolution";
  179. lp.name = "testLayer";
  180. lp.blobs.push_back(weights);
  181. if (hasBias)
  182. {
  183. Mat bias(1, outChannels, CV_32F);
  184. randu(bias, -1.0f, 1.0f);
  185. lp.blobs.push_back(bias);
  186. }
  187. int inpSz[] = {1, inChannels, inSize.height, inSize.width};
  188. Mat input(4, &inpSz[0], CV_32F);
  189. test(lp, input, backendId, targetId);
  190. }
  191. INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Deconvolution, Combine(
  192. /*in channels, out channels, group*/
  193. Values(Vec3i(6, 4, 1), Vec3i(6, 9, 3)),
  194. /*in size*/ Values(Size(5, 6)),
  195. /*kernel*/ Values(Size(3, 1), Size(1, 3)),
  196. /*pad*/ Values(Size(1, 0), Size(0, 1)),
  197. /*dilation*/ Values(Size(1, 1)),
  198. /*stride, adj. pad*/ Values(Vec4i(1,1, 0,0), Vec4i(2,2, 1,0), Vec4i(1,2, 0,1)),
  199. /*has bias*/ Bool(),
  200. dnnBackendsAndTargetsWithHalide()
  201. ));
  202. ////////////////////////////////////////////////////////////////////////////////
  203. // LRN
  204. ////////////////////////////////////////////////////////////////////////////////
  205. typedef TestWithParam<tuple<Vec3i, int, Vec3f, bool, std::string, tuple<Backend, Target> > > LRN;
  206. TEST_P(LRN, Accuracy)
  207. {
  208. int inChannels = get<0>(GetParam())[0];
  209. Size inSize = Size(get<0>(GetParam())[1], get<0>(GetParam())[2]);
  210. int localSize = get<1>(GetParam());
  211. float alpha = get<2>(GetParam())[0];
  212. float beta = get<2>(GetParam())[1];
  213. float bias = get<2>(GetParam())[2];
  214. bool normBySize = get<3>(GetParam());
  215. std::string nrmType = get<4>(GetParam());
  216. Backend backendId = get<0>(get<5>(GetParam()));
  217. Target targetId = get<1>(get<5>(GetParam()));
  218. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
  219. if ((inSize.width == 5 || inSize.height == 5) && targetId == DNN_TARGET_MYRIAD &&
  220. nrmType == "ACROSS_CHANNELS")
  221. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
  222. #endif
  223. LayerParams lp;
  224. lp.set("norm_region", nrmType);
  225. lp.set("local_size", localSize);
  226. lp.set("alpha", alpha);
  227. lp.set("beta", beta);
  228. lp.set("bias", bias);
  229. lp.set("norm_by_size", normBySize);
  230. lp.type = "LRN";
  231. lp.name = "testLayer";
  232. int sz[] = {1, inChannels, inSize.height, inSize.width};
  233. Mat input(4, &sz[0], CV_32F);
  234. double l1 = 0.0, lInf = 0.0;
  235. // The OpenCL kernels use the native_ math functions which have
  236. // implementation defined accuracy, so we use relaxed thresholds. See
  237. // https://github.com/opencv/opencv/issues/9821 for more details.
  238. if (targetId == DNN_TARGET_OPENCL)
  239. {
  240. l1 = 0.01;
  241. lInf = 0.01;
  242. }
  243. test(lp, input, backendId, targetId, false, l1, lInf);
  244. }
  245. INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, LRN, Combine(
  246. /*input ch,w,h*/ Values(Vec3i(6, 5, 8), Vec3i(7, 11, 6)),
  247. /*local size*/ Values(3, 5),
  248. Values(Vec3f(0.9f, 1.0f, 1.1f), Vec3f(0.9f, 1.1f, 1.0f),
  249. /*alpha, beta, bias*/ Vec3f(1.0f, 0.9f, 1.1f), Vec3f(1.0f, 1.1f, 0.9f),
  250. Vec3f(1.1f, 0.9f, 1.0f), Vec3f(1.1f, 1.0f, 0.9f)),
  251. /*norm_by_size*/ Bool(),
  252. /*norm_type*/ Values("ACROSS_CHANNELS", "WITHIN_CHANNEL"),
  253. dnnBackendsAndTargetsWithHalide()
  254. ));
  255. ////////////////////////////////////////////////////////////////////////////////
  256. // Average pooling
  257. ////////////////////////////////////////////////////////////////////////////////
  258. typedef TestWithParam<tuple<int, Size, Size, Size, tuple<Backend, Target> > > AvePooling;
  259. TEST_P(AvePooling, Accuracy)
  260. {
  261. int inChannels = get<0>(GetParam());
  262. Size outSize = get<1>(GetParam());; // Input size will be computed from parameters.
  263. Size kernel = get<2>(GetParam());
  264. Size stride = get<3>(GetParam());
  265. Backend backendId = get<0>(get<4>(GetParam()));
  266. Target targetId = get<1>(get<4>(GetParam()));
  267. #if defined(INF_ENGINE_RELEASE)
  268. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
  269. && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
  270. && kernel == Size(1, 1) && (stride == Size(1, 1) || stride == Size(2, 2)))
  271. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
  272. #endif
  273. const int inWidth = (outSize.width - 1) * stride.width + kernel.width;
  274. const int inHeight = (outSize.height - 1) * stride.height + kernel.height;
  275. LayerParams lp;
  276. lp.set("pool", "ave");
  277. lp.set("kernel_w", kernel.width);
  278. lp.set("kernel_h", kernel.height);
  279. lp.set("stride_w", stride.width);
  280. lp.set("stride_h", stride.height);
  281. lp.type = "Pooling";
  282. lp.name = "testLayer";
  283. int sz[] = {1, inChannels, inHeight, inWidth};
  284. Mat input(4, &sz[0], CV_32F);
  285. test(lp, input, backendId, targetId);
  286. }
  287. INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, AvePooling, Combine(
  288. /*in channels*/ Values(3, 4),
  289. /*out size*/ Values(Size(1, 1), Size(2, 2), Size(3, 2), Size(4, 7)),
  290. /*kernel*/ Values(Size(1, 1), Size(2, 2), Size(3, 3), Size(3, 2)),
  291. /*stride*/ Values(Size(1, 1), Size(2, 2), Size(3, 2)),
  292. dnnBackendsAndTargetsWithHalide()
  293. ));
  294. ////////////////////////////////////////////////////////////////////////////////
  295. // Maximum pooling
  296. ////////////////////////////////////////////////////////////////////////////////
  297. typedef TestWithParam<tuple<int, Size, Size, Size, Size, tuple<Backend, Target> > > MaxPooling;
  298. TEST_P(MaxPooling, Accuracy)
  299. {
  300. int inChannels = get<0>(GetParam());
  301. Size inSize = get<1>(GetParam());
  302. Size kernel = get<2>(GetParam());
  303. Size stride = get<3>(GetParam());
  304. Size pad = get<4>(GetParam());
  305. Backend backendId = get<0>(get<5>(GetParam()));
  306. Target targetId = get<1>(get<5>(GetParam()));
  307. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
  308. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
  309. && inSize == Size(7, 6) && kernel == Size(3, 2)
  310. && (stride == Size(1, 1) || stride == Size(2, 2))
  311. && (pad == Size(0, 1) || pad == Size(1, 1))
  312. )
  313. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  314. #endif
  315. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
  316. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
  317. && (kernel == Size(2, 2) || kernel == Size(3, 2))
  318. && stride == Size(1, 1) && (pad == Size(0, 0) || pad == Size(0, 1))
  319. )
  320. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  321. #endif
  322. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
  323. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
  324. && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
  325. && (stride == Size(1, 1) || stride == Size(2, 2))
  326. && (pad == Size(0, 1) || pad == Size(1, 1))
  327. )
  328. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  329. #endif
  330. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000)
  331. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_MYRIAD)
  332. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  333. #endif
  334. LayerParams lp;
  335. lp.set("pool", "max");
  336. lp.set("kernel_w", kernel.width);
  337. lp.set("kernel_h", kernel.height);
  338. lp.set("stride_w", stride.width);
  339. lp.set("stride_h", stride.height);
  340. lp.set("pad_w", pad.width);
  341. lp.set("pad_h", pad.height);
  342. lp.type = "Pooling";
  343. lp.name = "testLayer";
  344. int sz[] = {1, inChannels, inSize.height, inSize.width};
  345. Mat input(4, &sz[0], CV_32F);
  346. test(lp, input, backendId, targetId);
  347. }
  348. INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, MaxPooling, Combine(
  349. /*in channels*/ Values(3, 4),
  350. /*in size*/ Values(Size(5, 5), Size(7, 6)),
  351. /*kernel*/ Values(Size(2, 2), Size(3, 3), Size(3, 2)),
  352. /*stride*/ Values(Size(1, 1), Size(2, 2), Size(3, 2)),
  353. /*pad*/ Values(Size(0, 0), Size(1, 1), Size(0, 1)),
  354. dnnBackendsAndTargetsWithHalide()
  355. ));
  356. ////////////////////////////////////////////////////////////////////////////////
  357. // Fully-connected
  358. ////////////////////////////////////////////////////////////////////////////////
  359. typedef TestWithParam<tuple<int, Size, int, bool, tuple<Backend, Target> > > FullyConnected;
  360. TEST_P(FullyConnected, Accuracy)
  361. {
  362. int inChannels = get<0>(GetParam());
  363. Size inSize = get<1>(GetParam());
  364. int outChannels = get<2>(GetParam());
  365. bool hasBias = get<3>(GetParam());
  366. Backend backendId = get<0>(get<4>(GetParam()));
  367. Target targetId = get<1>(get<4>(GetParam()));
  368. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
  369. if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
  370. backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && (targetId == DNN_TARGET_OPENCL_FP16 ||
  371. (targetId == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X))) {
  372. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
  373. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
  374. }
  375. #endif
  376. Mat weights(outChannels, inChannels * inSize.height * inSize.width, CV_32F);
  377. randu(weights, -1.0f, 1.0f);
  378. Mat bias(1, outChannels, CV_32F);
  379. randu(bias, -1.0f, 1.0f);
  380. LayerParams lp;
  381. lp.set("num_output", outChannels);
  382. lp.set("bias_term", hasBias);
  383. lp.blobs.push_back(weights);
  384. lp.blobs.push_back(bias);
  385. lp.type = "InnerProduct";
  386. lp.name = "testLayer";
  387. int sz[] = {1, inChannels, inSize.height, inSize.width};
  388. Mat input(4, &sz[0], CV_32F);
  389. double l1 = 0.0;
  390. double lInf = 0.0;
  391. #if defined(INF_ENGINE_RELEASE)
  392. if (targetId == DNN_TARGET_MYRIAD)
  393. {
  394. l1 = 0.015;
  395. lInf = 0.025;
  396. }
  397. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL_FP16)
  398. {
  399. l1 = 0.01;
  400. }
  401. #endif
  402. if (targetId == DNN_TARGET_CUDA_FP16)
  403. l1 = 0.015;
  404. test(lp, input, backendId, targetId, false, l1, lInf);
  405. }
  406. INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, FullyConnected, Combine(
  407. /*in channels*/ Values(3, 4),
  408. /*in size*/ Values(Size(5, 4), Size(4, 5), Size(1, 1)),
  409. /*out channels*/ Values(3, 4),
  410. /*has bias*/ Bool(),
  411. dnnBackendsAndTargetsWithHalide()
  412. ));
  413. ////////////////////////////////////////////////////////////////////////////////
  414. // SoftMax
  415. ////////////////////////////////////////////////////////////////////////////////
  416. typedef TestWithParam<tuple<int, tuple<Backend, Target> > > SoftMax;
  417. TEST_P(SoftMax, Accuracy)
  418. {
  419. int inChannels = get<0>(GetParam());
  420. Backend backendId = get<0>(get<1>(GetParam()));
  421. Target targetId = get<1>(get<1>(GetParam()));
  422. LayerParams lp;
  423. lp.type = "Softmax";
  424. lp.name = "testLayer";
  425. int sz[] = {1, inChannels, 1, 1};
  426. Mat input(4, &sz[0], CV_32F);
  427. test(lp, input, backendId, targetId);
  428. }
  429. INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, SoftMax, Combine(
  430. Values(3, 4, 5, 1024),
  431. dnnBackendsAndTargetsWithHalide()
  432. ));
  433. //////////////////////////////////////////////////////////////////////////////
  434. // Max pooling - unpooling
  435. //////////////////////////////////////////////////////////////////////////////
  436. TEST_P(Test_Halide_layers, MaxPoolUnpool)
  437. {
  438. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
  439. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
  440. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
  441. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
  442. LayerParams pool;
  443. pool.set("pool", "max");
  444. pool.set("kernel_w", 2);
  445. pool.set("kernel_h", 2);
  446. pool.set("stride_w", 2);
  447. pool.set("stride_h", 2);
  448. pool.set("pad_w", 0);
  449. pool.set("pad_h", 0);
  450. pool.type = "Pooling";
  451. pool.name = "testPool";
  452. LayerParams unpool;
  453. unpool.set("pool_k_w", 2);
  454. unpool.set("pool_k_h", 2);
  455. unpool.set("pool_stride_w", 2);
  456. unpool.set("pool_stride_h", 2);
  457. unpool.set("pool_pad_w", 0);
  458. unpool.set("pool_pad_h", 0);
  459. unpool.type = "MaxUnpool";
  460. unpool.name = "testUnpool";
  461. Net net;
  462. int poolId = net.addLayer(pool.name, pool.type, pool);
  463. net.connect(0, 0, poolId, 0);
  464. int unpoolId = net.addLayer(unpool.name, unpool.type, unpool);
  465. net.connect(poolId, 0, unpoolId, 0);
  466. net.connect(poolId, 1, unpoolId, 1);
  467. int sz[] = {1, 1, 4, 4};
  468. Mat input(4, &sz[0], CV_32F);
  469. test(input, net, backend, target);
  470. }
  471. ////////////////////////////////////////////////////////////////////////////////
  472. // AvePooling + in-place layers
  473. ////////////////////////////////////////////////////////////////////////////////
  474. static const int kNumChannels = 3;
  475. void testInPlaceActivation(LayerParams& lp, Backend backendId, Target targetId, double l1 = 0.0, double lInf = 0.0)
  476. {
  477. EXPECT_FALSE(lp.name.empty());
  478. LayerParams pool;
  479. pool.set("pool", "ave");
  480. pool.set("kernel_w", 2);
  481. pool.set("kernel_h", 2);
  482. pool.set("stride_w", 2);
  483. pool.set("stride_h", 2);
  484. pool.type = "Pooling";
  485. pool.name = "ave_pool";
  486. Net net;
  487. int poolId = net.addLayer(pool.name, pool.type, pool);
  488. net.connect(0, 0, poolId, 0);
  489. net.addLayerToPrev(lp.name, lp.type, lp);
  490. int sz[] = {1, kNumChannels, 10, 10};
  491. Mat input(4, &sz[0], CV_32F);
  492. test(input, net, backendId, targetId, false, true, l1, lInf);
  493. }
  494. typedef TestWithParam<tuple<bool, bool, float, tuple<Backend, Target> > > BatchNorm;
  495. TEST_P(BatchNorm, Accuracy)
  496. {
  497. bool hasWeights = get<0>(GetParam());
  498. bool hasBias = get<1>(GetParam());
  499. float epsilon = get<2>(GetParam());
  500. Backend backendId = get<0>(get<3>(GetParam()));
  501. Target targetId = get<1>(get<3>(GetParam()));
  502. LayerParams lp;
  503. lp.set("has_weight", hasWeights);
  504. lp.set("has_bias", hasBias);
  505. lp.set("eps", epsilon);
  506. lp.type = "BatchNorm";
  507. lp.name = "testLayer";
  508. lp.blobs.reserve(4);
  509. for (int i = 0; i < 3; ++i)
  510. lp.blobs.push_back(Mat(1, kNumChannels, CV_32F));
  511. if (hasBias || hasWeights)
  512. lp.blobs.push_back(Mat(1, kNumChannels, CV_32F));
  513. for (int i = 0; i < lp.blobs.size(); ++i)
  514. randu(lp.blobs[i], 0.0f, 1.0f);
  515. testInPlaceActivation(lp, backendId, targetId);
  516. }
  517. INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, BatchNorm, Combine(
  518. /*has weights*/ Bool(),
  519. /*has bias*/ Bool(),
  520. /*epsilon*/ Values(1e-3f, 1e-5f),
  521. dnnBackendsAndTargetsWithHalide()
  522. ));
  523. typedef TestWithParam<tuple<float, tuple<Backend, Target> > > ReLU;
  524. TEST_P(ReLU, Accuracy)
  525. {
  526. float negativeSlope = get<0>(GetParam());
  527. Backend backendId = get<0>(get<1>(GetParam()));
  528. Target targetId = get<1>(get<1>(GetParam()));
  529. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019020000)
  530. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD && negativeSlope < 0)
  531. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  532. #endif
  533. LayerParams lp;
  534. lp.set("negative_slope", negativeSlope);
  535. lp.type = "ReLU";
  536. lp.name = "testLayer";
  537. testInPlaceActivation(lp, backendId, targetId);
  538. }
  539. INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, ReLU, Combine(
  540. /*negative slope*/ Values(2.0f, 0.3f, -0.1f, 0.0f),
  541. dnnBackendsAndTargetsWithHalide()
  542. ));
  543. typedef TestWithParam<tuple<std::string, tuple<Backend, Target> > > NoParamActivation;
  544. TEST_P(NoParamActivation, Accuracy)
  545. {
  546. Backend backendId = get<0>(get<1>(GetParam()));
  547. Target targetId = get<1>(get<1>(GetParam()));
  548. LayerParams lp;
  549. lp.type = get<0>(GetParam());
  550. lp.name = "testLayer";
  551. testInPlaceActivation(lp, backendId, targetId);
  552. }
  553. INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, NoParamActivation, Combine(
  554. /*type*/ Values("TanH", "Sigmoid", "AbsVal", "BNLL", "Swish", "Mish"),
  555. dnnBackendsAndTargetsWithHalide()
  556. ));
  557. typedef TestWithParam<tuple<Vec3f, tuple<Backend, Target> > > Power;
  558. TEST_P(Power, Accuracy)
  559. {
  560. float power = get<0>(GetParam())[0];
  561. float scale = get<0>(GetParam())[1];
  562. float shift = get<0>(GetParam())[2];
  563. Backend backendId = get<0>(get<1>(GetParam()));
  564. Target targetId = get<1>(get<1>(GetParam()));
  565. LayerParams lp;
  566. lp.set("power", power);
  567. lp.set("scale", scale);
  568. lp.set("shift", shift);
  569. lp.type = "Power";
  570. lp.name = "testLayer";
  571. testInPlaceActivation(lp, backendId, targetId);
  572. }
  573. INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Power, Combine(
  574. /*power, scale, shift*/ Values(Vec3f(0.9f, 1.0f, 1.1f), Vec3f(0.9f, 1.1f, 1.0f),
  575. Vec3f(1.0f, 0.9f, 1.1f), Vec3f(1.0f, 1.1f, 0.9f),
  576. Vec3f(1.1f, 0.9f, 1.0f), Vec3f(1.1f, 1.0f, 0.9f)),
  577. dnnBackendsAndTargetsWithHalide()
  578. ));
  579. typedef TestWithParam<tuple<Vec3f, tuple<Backend, Target> > > Exp;
  580. TEST_P(Exp, Accuracy)
  581. {
  582. float base = get<0>(GetParam())[0];
  583. float scale = get<0>(GetParam())[1];
  584. float shift = get<0>(GetParam())[2];
  585. Backend backendId = get<0>(get<1>(GetParam()));
  586. Target targetId = get<1>(get<1>(GetParam()));
  587. LayerParams lp;
  588. lp.set("base", base);
  589. lp.set("scale", scale);
  590. lp.set("shift", shift);
  591. lp.type = "Exp";
  592. lp.name = "testLayer";
  593. testInPlaceActivation(lp, backendId, targetId);
  594. }
  595. INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Exp, Combine(
  596. /*base, scale, shift*/ Values(Vec3f(0.9f, -1.0f, 1.1f), Vec3f(0.9f, 1.1f, -1.0f),
  597. Vec3f(-1.0f, 0.9f, 1.1f), Vec3f(-1.0f, 1.1f, 0.9f),
  598. Vec3f(1.1f, 0.9f, -1.0f), Vec3f(1.1f, -1.0f, 0.9f)),
  599. dnnBackendsAndTargetsWithHalide()
  600. ));
  601. TEST_P(Test_Halide_layers, ChannelsPReLU)
  602. {
  603. LayerParams lp;
  604. lp.type = "ChannelsPReLU";
  605. lp.name = "testLayer";
  606. lp.blobs.push_back(Mat(1, kNumChannels, CV_32F));
  607. randu(lp.blobs[0], -1.0f, 1.0f);
  608. testInPlaceActivation(lp, backend, target);
  609. }
  610. typedef TestWithParam<tuple<bool, tuple<Backend, Target> > > Scale;
  611. TEST_P(Scale, Accuracy)
  612. {
  613. bool hasBias = get<0>(GetParam());
  614. Backend backendId = get<0>(get<1>(GetParam()));
  615. Target targetId = get<1>(get<1>(GetParam()));
  616. LayerParams lp;
  617. lp.set("bias_term", hasBias);
  618. lp.type = "Scale";
  619. lp.name = "testLayer";
  620. lp.blobs.push_back(Mat(1, kNumChannels, CV_32F));
  621. randu(lp.blobs[0], -1.0f, 1.0f);
  622. if (hasBias)
  623. {
  624. lp.blobs.push_back(Mat(1, kNumChannels, CV_32F));
  625. randu(lp.blobs[1], -1.0f, 1.0f);
  626. }
  627. testInPlaceActivation(lp, backendId, targetId);
  628. }
  629. INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Scale, Combine(
  630. Bool(),
  631. dnnBackendsAndTargetsWithHalide()
  632. ));
  633. ////////////////////////////////////////////////////////////////////////////////
  634. // Concat layer
  635. ////////////////////////////////////////////////////////////////////////////////
  636. //
  637. // input --- conv --- concat --- output
  638. // `--- conv ----^ ^ ^
  639. // `---- ... ------' '
  640. // `-----------------'
  641. typedef TestWithParam<tuple<Vec3i, Vec3i, tuple<Backend, Target> > > Concat;
  642. TEST_P(Concat, Accuracy)
  643. {
  644. Vec3i inSize = get<0>(GetParam());
  645. Vec3i numChannels = get<1>(GetParam());
  646. Backend backendId = get<0>(get<2>(GetParam()));
  647. Target targetId = get<1>(get<2>(GetParam()));
  648. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
  649. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
  650. && inSize == Vec3i(1, 4, 5) && numChannels == Vec3i(1, 6, 2)
  651. )
  652. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION); // crash
  653. #endif
  654. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
  655. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_CPU
  656. && inSize == Vec3i(1, 4, 5) && numChannels == Vec3i(1, 6, 2)
  657. )
  658. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION); // TODO: IE_CPU
  659. #endif
  660. Net net;
  661. std::vector<int> convLayerIds;
  662. convLayerIds.reserve(numChannels.channels);
  663. for (int i = 0, n = numChannels.channels; i < n; ++i)
  664. {
  665. if (!numChannels[i])
  666. break;
  667. int sz[] = {numChannels[i], inSize[0], 1, 1};
  668. Mat weights(4, &sz[0], CV_32F);
  669. randu(weights, -1.0f, 1.0f);
  670. LayerParams convParam;
  671. convParam.set("kernel_w", 1);
  672. convParam.set("kernel_h", 1);
  673. convParam.set("num_output", numChannels[i]);
  674. convParam.set("bias_term", false);
  675. convParam.type = "Convolution";
  676. std::ostringstream ss;
  677. ss << "convLayer" << i;
  678. convParam.name = ss.str();
  679. convParam.blobs.push_back(weights);
  680. int layerId = net.addLayer(convParam.name, convParam.type, convParam);
  681. convLayerIds.push_back(layerId);
  682. net.connect(0, 0, layerId, 0);
  683. }
  684. LayerParams concatParam;
  685. concatParam.type = "Concat";
  686. concatParam.name = "testLayer";
  687. int concatId = net.addLayer(concatParam.name, concatParam.type, concatParam);
  688. net.connect(0, 0, concatId, 0);
  689. for (int i = 0; i < convLayerIds.size(); ++i)
  690. {
  691. net.connect(convLayerIds[i], 0, concatId, i + 1);
  692. }
  693. int sz[] = {1, inSize[0], inSize[1], inSize[2]};
  694. Mat input(4, &sz[0], CV_32F);
  695. test(input, net, backendId, targetId);
  696. }
  697. INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Concat, Combine(
  698. /*input size*/ Values(Vec3i(1, 4, 5), Vec3i(2, 8, 6)),
  699. /*channels*/ Values(Vec3i(2, 0, 0), Vec3i(3, 4, 0), Vec3i(1, 6, 2)),
  700. dnnBackendsAndTargetsWithHalide()
  701. ));
  702. ////////////////////////////////////////////////////////////////////////////////
  703. // Element-wise layers
  704. ////////////////////////////////////////////////////////////////////////////////
  705. //
  706. // input --- conv --- eltwise --- output
  707. // `--- conv ----^ ^ ^
  708. // `---- ... ------' '
  709. // `-----------------'
  710. typedef TestWithParam<tuple<Vec3i, std::string, int, bool, tuple<Backend, Target> > > Eltwise;
  711. TEST_P(Eltwise, Accuracy)
  712. {
  713. Vec3i inSize = get<0>(GetParam());
  714. std::string op = get<1>(GetParam());
  715. int numConv = get<2>(GetParam());
  716. bool weighted = get<3>(GetParam());
  717. Backend backendId = get<0>(get<4>(GetParam()));
  718. Target targetId = get<1>(get<4>(GetParam()));
  719. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
  720. // accuracy
  721. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL &&
  722. inSize == Vec3i(1, 4, 5) && op == "sum" && numConv == 1 && !weighted)
  723. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  724. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL &&
  725. inSize == Vec3i(2, 8, 6) && op == "sum" && numConv == 1 && !weighted)
  726. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  727. #endif
  728. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
  729. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD &&
  730. inSize == Vec3i(1, 4, 5))
  731. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  732. #endif
  733. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000) && INF_ENGINE_VER_MAJOR_LT(2021040000)
  734. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && numConv > 1)
  735. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  736. #endif
  737. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
  738. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_OPENCL &&
  739. op == "sum" && numConv == 1 && !weighted)
  740. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
  741. #endif
  742. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
  743. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && numConv > 1)
  744. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  745. #endif
  746. bool convInputShift = 1;
  747. int numEltwiseInputs = numConv;
  748. if (op == "div")
  749. {
  750. numConv = 1;
  751. convInputShift = 0; // first input is convolution
  752. }
  753. Net net;
  754. std::vector<int> convLayerIds(numConv);
  755. for (int i = 0; i < numConv; ++i)
  756. {
  757. int sz[] = {inSize[0], inSize[0], 1, 1};
  758. Mat weights(4, &sz[0], CV_32F);
  759. randu(weights, -1.0f, 1.0f);
  760. LayerParams convParam;
  761. convParam.set("kernel_w", 1);
  762. convParam.set("kernel_h", 1);
  763. convParam.set("num_output", inSize[0]);
  764. convParam.set("bias_term", false);
  765. convParam.type = "Convolution";
  766. std::ostringstream ss;
  767. ss << "convLayer" << i;
  768. convParam.name = ss.str();
  769. convParam.blobs.push_back(weights);
  770. convLayerIds[i] = net.addLayer(convParam.name, convParam.type, convParam);
  771. net.connect(0, 0, convLayerIds[i], 0);
  772. }
  773. LayerParams eltwiseParam;
  774. eltwiseParam.set("operation", op);
  775. if (op == "sum" && weighted)
  776. {
  777. RNG& rng = cv::theRNG();
  778. std::vector<float> coeff(1 + numConv);
  779. for (int i = 0; i < coeff.size(); ++i)
  780. {
  781. coeff[i] = rng.uniform(-2.0f, 2.0f);
  782. }
  783. eltwiseParam.set("coeff", DictValue::arrayReal<float*>(&coeff[0], coeff.size()));
  784. }
  785. eltwiseParam.type = "Eltwise";
  786. eltwiseParam.name = "testLayer";
  787. int eltwiseId = net.addLayer(eltwiseParam.name, eltwiseParam.type, eltwiseParam);
  788. if (convInputShift == 1)
  789. net.connect(0, 0, eltwiseId, 0);
  790. for (int i = 0; i < numConv; ++i)
  791. {
  792. net.connect(convLayerIds[i], 0, eltwiseId, i + convInputShift);
  793. }
  794. if (convInputShift == 0)
  795. net.connect(0, 0, eltwiseId, numConv);
  796. for (int i = numConv; i < numEltwiseInputs; ++i)
  797. {
  798. net.connect(0, 0, eltwiseId, i + 1);
  799. }
  800. int sz[] = {1, inSize[0], inSize[1], inSize[2]};
  801. Mat input(4, &sz[0], CV_32F);
  802. if (op == "div")
  803. randu(input, 1.0f, 1.0f); // ensure no divisor value has absouluate value of less than 0.5
  804. test(input, net, backendId, targetId, /*skipCheck*/false, (op == "div") ? false : true);
  805. }
  806. INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Eltwise, Combine(
  807. /*input size*/ Values(Vec3i(1, 4, 5), Vec3i(2, 8, 6)),
  808. /*operation*/ Values("prod", "sum", "div", "max", "min"),
  809. /*num convs*/ Values(1, 2, 3),
  810. /*weighted(for sum only)*/ Bool(),
  811. dnnBackendsAndTargetsWithHalide()
  812. ));
  813. ////////////////////////////////////////////////////////////////////////////
  814. // Mixed backends
  815. ////////////////////////////////////////////////////////////////////////////
  816. #ifdef HAVE_HALIDE
  817. TEST(MixedBackends_Halide_Default_Halide, Accuracy)
  818. {
  819. // Just a layer that supports Halide backend.
  820. LayerParams lrn;
  821. lrn.type = "LRN";
  822. lrn.name = "testLRN";
  823. // Some of layers that doesn't supports Halide backend yet.
  824. LayerParams mvn;
  825. mvn.type = "MVN";
  826. mvn.name = "testMVN";
  827. // Halide layer again.
  828. LayerParams lrn2;
  829. lrn2.type = "LRN";
  830. lrn2.name = "testLRN2";
  831. Net net;
  832. int lrnId = net.addLayer(lrn.name, lrn.type, lrn);
  833. net.connect(0, 0, lrnId, 0);
  834. net.addLayerToPrev(mvn.name, mvn.type, mvn);
  835. net.addLayerToPrev(lrn2.name, lrn2.type, lrn2);
  836. int sz[] = {4, 3, 5, 6};
  837. Mat input(4, &sz[0], CV_32F);
  838. randu(input, -1.0f, 1.0f);
  839. net.setInput(input);
  840. net.setPreferableBackend(DNN_BACKEND_OPENCV);
  841. Mat outputDefault = net.forward().clone();
  842. net.setPreferableBackend(DNN_BACKEND_HALIDE);
  843. net.setInput(input);
  844. Mat outputHalide = net.forward().clone();
  845. normAssert(outputDefault, outputHalide);
  846. net.setPreferableTarget(DNN_TARGET_OPENCL);
  847. net.setInput(input);
  848. outputHalide = net.forward().clone();
  849. normAssert(outputDefault, outputHalide);
  850. }
  851. #endif // HAVE_HALIDE
  852. INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_Halide_layers, dnnBackendsAndTargetsWithHalide());
  853. }} // namespace