test_gpumat.cpp 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514
  1. /*M///////////////////////////////////////////////////////////////////////////////////////
  2. //
  3. // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
  4. //
  5. // By downloading, copying, installing or using the software you agree to this license.
  6. // If you do not agree to this license, do not download, install,
  7. // copy or use the software.
  8. //
  9. //
  10. // License Agreement
  11. // For Open Source Computer Vision Library
  12. //
  13. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
  14. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
  15. // Third party copyrights are property of their respective owners.
  16. //
  17. // Redistribution and use in source and binary forms, with or without modification,
  18. // are permitted provided that the following conditions are met:
  19. //
  20. // * Redistribution's of source code must retain the above copyright notice,
  21. // this list of conditions and the following disclaimer.
  22. //
  23. // * Redistribution's in binary form must reproduce the above copyright notice,
  24. // this list of conditions and the following disclaimer in the documentation
  25. // and/or other materials provided with the distribution.
  26. //
  27. // * The name of the copyright holders may not be used to endorse or promote products
  28. // derived from this software without specific prior written permission.
  29. //
  30. // This software is provided by the copyright holders and contributors "as is" and
  31. // any express or implied warranties, including, but not limited to, the implied
  32. // warranties of merchantability and fitness for a particular purpose are disclaimed.
  33. // In no event shall the Intel Corporation or contributors be liable for any direct,
  34. // indirect, incidental, special, exemplary, or consequential damages
  35. // (including, but not limited to, procurement of substitute goods or services;
  36. // loss of use, data, or profits; or business interruption) however caused
  37. // and on any theory of liability, whether in contract, strict liability,
  38. // or tort (including negligence or otherwise) arising in any way out of
  39. // the use of this software, even if advised of the possibility of such damage.
  40. //
  41. //M*/
  42. #include "test_precomp.hpp"
  43. #ifdef HAVE_CUDA
  44. #include "opencv2/core/cuda.hpp"
  45. #include "opencv2/ts/cuda_test.hpp"
  46. namespace opencv_test { namespace {
  47. ////////////////////////////////////////////////////////////////////////////////
  48. // SetTo
  49. PARAM_TEST_CASE(GpuMat_SetTo, cv::cuda::DeviceInfo, cv::Size, MatType, UseRoi)
  50. {
  51. cv::cuda::DeviceInfo devInfo;
  52. cv::Size size;
  53. int type;
  54. bool useRoi;
  55. virtual void SetUp()
  56. {
  57. devInfo = GET_PARAM(0);
  58. size = GET_PARAM(1);
  59. type = GET_PARAM(2);
  60. useRoi = GET_PARAM(3);
  61. cv::cuda::setDevice(devInfo.deviceID());
  62. }
  63. };
  64. CUDA_TEST_P(GpuMat_SetTo, Zero)
  65. {
  66. cv::Scalar zero = cv::Scalar::all(0);
  67. cv::cuda::GpuMat mat = createMat(size, type, useRoi);
  68. mat.setTo(zero);
  69. EXPECT_MAT_NEAR(cv::Mat::zeros(size, type), mat, 0.0);
  70. }
  71. CUDA_TEST_P(GpuMat_SetTo, SameVal)
  72. {
  73. cv::Scalar val = cv::Scalar::all(randomDouble(0.0, 255.0));
  74. if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
  75. {
  76. try
  77. {
  78. cv::cuda::GpuMat mat = createMat(size, type, useRoi);
  79. mat.setTo(val);
  80. }
  81. catch (const cv::Exception& e)
  82. {
  83. ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
  84. }
  85. }
  86. else
  87. {
  88. cv::cuda::GpuMat mat = createMat(size, type, useRoi);
  89. mat.setTo(val);
  90. EXPECT_MAT_NEAR(cv::Mat(size, type, val), mat, 0.0);
  91. }
  92. }
  93. CUDA_TEST_P(GpuMat_SetTo, DifferentVal)
  94. {
  95. cv::Scalar val = randomScalar(0.0, 255.0);
  96. if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
  97. {
  98. try
  99. {
  100. cv::cuda::GpuMat mat = createMat(size, type, useRoi);
  101. mat.setTo(val);
  102. }
  103. catch (const cv::Exception& e)
  104. {
  105. ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
  106. }
  107. }
  108. else
  109. {
  110. cv::cuda::GpuMat mat = createMat(size, type, useRoi);
  111. mat.setTo(val);
  112. EXPECT_MAT_NEAR(cv::Mat(size, type, val), mat, 0.0);
  113. }
  114. }
  115. CUDA_TEST_P(GpuMat_SetTo, Masked)
  116. {
  117. cv::Scalar val = randomScalar(0.0, 255.0);
  118. cv::Mat mat_gold = randomMat(size, type);
  119. cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
  120. if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
  121. {
  122. try
  123. {
  124. cv::cuda::GpuMat mat = createMat(size, type, useRoi);
  125. mat.setTo(val, loadMat(mask));
  126. }
  127. catch (const cv::Exception& e)
  128. {
  129. ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
  130. }
  131. }
  132. else
  133. {
  134. cv::cuda::GpuMat mat = loadMat(mat_gold, useRoi);
  135. mat.setTo(val, loadMat(mask, useRoi));
  136. mat_gold.setTo(val, mask);
  137. EXPECT_MAT_NEAR(mat_gold, mat, 0.0);
  138. }
  139. }
  140. INSTANTIATE_TEST_CASE_P(CUDA, GpuMat_SetTo, testing::Combine(
  141. ALL_DEVICES,
  142. DIFFERENT_SIZES,
  143. ALL_TYPES,
  144. WHOLE_SUBMAT));
  145. ////////////////////////////////////////////////////////////////////////////////
  146. // CopyTo
  147. PARAM_TEST_CASE(GpuMat_CopyTo, cv::cuda::DeviceInfo, cv::Size, MatType, UseRoi)
  148. {
  149. cv::cuda::DeviceInfo devInfo;
  150. cv::Size size;
  151. int type;
  152. bool useRoi;
  153. virtual void SetUp()
  154. {
  155. devInfo = GET_PARAM(0);
  156. size = GET_PARAM(1);
  157. type = GET_PARAM(2);
  158. useRoi = GET_PARAM(3);
  159. cv::cuda::setDevice(devInfo.deviceID());
  160. }
  161. };
  162. CUDA_TEST_P(GpuMat_CopyTo, WithOutMask)
  163. {
  164. cv::Mat src = randomMat(size, type);
  165. cv::cuda::GpuMat d_src = loadMat(src, useRoi);
  166. cv::cuda::GpuMat dst = createMat(size, type, useRoi);
  167. d_src.copyTo(dst);
  168. EXPECT_MAT_NEAR(src, dst, 0.0);
  169. }
  170. CUDA_TEST_P(GpuMat_CopyTo, Masked)
  171. {
  172. cv::Mat src = randomMat(size, type);
  173. cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
  174. if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
  175. {
  176. try
  177. {
  178. cv::cuda::GpuMat d_src = loadMat(src);
  179. cv::cuda::GpuMat dst;
  180. d_src.copyTo(dst, loadMat(mask, useRoi));
  181. }
  182. catch (const cv::Exception& e)
  183. {
  184. ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
  185. }
  186. }
  187. else
  188. {
  189. cv::cuda::GpuMat d_src = loadMat(src, useRoi);
  190. cv::cuda::GpuMat dst = loadMat(cv::Mat::zeros(size, type), useRoi);
  191. d_src.copyTo(dst, loadMat(mask, useRoi));
  192. cv::Mat dst_gold = cv::Mat::zeros(size, type);
  193. src.copyTo(dst_gold, mask);
  194. EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
  195. }
  196. }
  197. INSTANTIATE_TEST_CASE_P(CUDA, GpuMat_CopyTo, testing::Combine(
  198. ALL_DEVICES,
  199. DIFFERENT_SIZES,
  200. ALL_TYPES,
  201. WHOLE_SUBMAT));
  202. ////////////////////////////////////////////////////////////////////////////////
  203. // ConvertTo
  204. PARAM_TEST_CASE(GpuMat_ConvertTo, cv::cuda::DeviceInfo, cv::Size, MatDepth, MatDepth, UseRoi)
  205. {
  206. cv::cuda::DeviceInfo devInfo;
  207. cv::Size size;
  208. int depth1;
  209. int depth2;
  210. bool useRoi;
  211. virtual void SetUp()
  212. {
  213. devInfo = GET_PARAM(0);
  214. size = GET_PARAM(1);
  215. depth1 = GET_PARAM(2);
  216. depth2 = GET_PARAM(3);
  217. useRoi = GET_PARAM(4);
  218. cv::cuda::setDevice(devInfo.deviceID());
  219. }
  220. };
  221. CUDA_TEST_P(GpuMat_ConvertTo, WithOutScaling)
  222. {
  223. cv::Mat src = randomMat(size, depth1);
  224. if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
  225. {
  226. try
  227. {
  228. cv::cuda::GpuMat d_src = loadMat(src);
  229. cv::cuda::GpuMat dst;
  230. d_src.convertTo(dst, depth2);
  231. }
  232. catch (const cv::Exception& e)
  233. {
  234. ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
  235. }
  236. }
  237. else
  238. {
  239. cv::cuda::GpuMat d_src = loadMat(src, useRoi);
  240. cv::cuda::GpuMat dst = createMat(size, depth2, useRoi);
  241. d_src.convertTo(dst, depth2);
  242. cv::Mat dst_gold;
  243. src.convertTo(dst_gold, depth2);
  244. EXPECT_MAT_NEAR(dst_gold, dst, depth2 < CV_32F ? 1.0 : 1e-4);
  245. }
  246. }
  247. CUDA_TEST_P(GpuMat_ConvertTo, WithScaling)
  248. {
  249. cv::Mat src = randomMat(size, depth1);
  250. double a = randomDouble(0.0, 1.0);
  251. double b = randomDouble(-10.0, 10.0);
  252. if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
  253. {
  254. try
  255. {
  256. cv::cuda::GpuMat d_src = loadMat(src);
  257. cv::cuda::GpuMat dst;
  258. d_src.convertTo(dst, depth2, a, b);
  259. }
  260. catch (const cv::Exception& e)
  261. {
  262. ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
  263. }
  264. }
  265. else
  266. {
  267. cv::cuda::GpuMat d_src = loadMat(src, useRoi);
  268. cv::cuda::GpuMat dst = createMat(size, depth2, useRoi);
  269. d_src.convertTo(dst, depth2, a, b);
  270. cv::Mat dst_gold;
  271. src.convertTo(dst_gold, depth2, a, b);
  272. EXPECT_MAT_NEAR(dst_gold, dst, depth2 < CV_32F ? 1.0 : 1e-4);
  273. }
  274. }
  275. CUDA_TEST_P(GpuMat_ConvertTo, InplaceWithOutScaling)
  276. {
  277. cv::Mat src = randomMat(size, depth1);
  278. if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
  279. {
  280. try
  281. {
  282. cv::cuda::GpuMat d_srcDst = loadMat(src);
  283. d_srcDst.convertTo(d_srcDst, depth2);
  284. }
  285. catch (const cv::Exception& e)
  286. {
  287. ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
  288. }
  289. }
  290. else
  291. {
  292. cv::cuda::GpuMat d_srcDst = loadMat(src, useRoi);
  293. d_srcDst.convertTo(d_srcDst, depth2);
  294. cv::Mat dst_gold;
  295. src.convertTo(dst_gold, depth2);
  296. EXPECT_MAT_NEAR(dst_gold, d_srcDst, depth2 < CV_32F ? 1.0 : 1e-4);
  297. }
  298. }
  299. CUDA_TEST_P(GpuMat_ConvertTo, InplaceWithScaling)
  300. {
  301. cv::Mat src = randomMat(size, depth1);
  302. double a = randomDouble(0.0, 1.0);
  303. double b = randomDouble(-10.0, 10.0);
  304. if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
  305. {
  306. try
  307. {
  308. cv::cuda::GpuMat d_srcDst = loadMat(src);
  309. d_srcDst.convertTo(d_srcDst, depth2, a, b);
  310. }
  311. catch (const cv::Exception& e)
  312. {
  313. ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
  314. }
  315. }
  316. else
  317. {
  318. cv::cuda::GpuMat d_srcDst = loadMat(src, useRoi);
  319. d_srcDst.convertTo(d_srcDst, depth2, a, b);
  320. cv::Mat dst_gold;
  321. src.convertTo(dst_gold, depth2, a, b);
  322. EXPECT_MAT_NEAR(dst_gold, d_srcDst, depth2 < CV_32F ? 1.0 : 1e-4);
  323. }
  324. }
  325. INSTANTIATE_TEST_CASE_P(CUDA, GpuMat_ConvertTo, testing::Combine(
  326. ALL_DEVICES,
  327. DIFFERENT_SIZES,
  328. ALL_DEPTH,
  329. ALL_DEPTH,
  330. WHOLE_SUBMAT));
  331. ////////////////////////////////////////////////////////////////////////////////
  332. // locateROI
  333. PARAM_TEST_CASE(GpuMat_LocateROI, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
  334. {
  335. cv::cuda::DeviceInfo devInfo;
  336. cv::Size size;
  337. int depth;
  338. bool useRoi;
  339. virtual void SetUp()
  340. {
  341. devInfo = GET_PARAM(0);
  342. size = GET_PARAM(1);
  343. depth = GET_PARAM(2);
  344. useRoi = GET_PARAM(3);
  345. cv::cuda::setDevice(devInfo.deviceID());
  346. }
  347. };
  348. CUDA_TEST_P(GpuMat_LocateROI, locateROI)
  349. {
  350. Point ofsGold;
  351. Size wholeSizeGold;
  352. GpuMat src = createMat(size, depth, wholeSizeGold, ofsGold, useRoi);
  353. Point ofs;
  354. Size wholeSize;
  355. src.locateROI(wholeSize, ofs);
  356. ASSERT_TRUE(ofs == ofsGold && wholeSize == wholeSizeGold);
  357. GpuMat srcPtr(src.size(), src.type(), src.data, src.step);
  358. src.locateROI(wholeSize, ofs);
  359. ASSERT_TRUE(ofs == ofsGold && wholeSize == wholeSizeGold);
  360. }
  361. INSTANTIATE_TEST_CASE_P(CUDA, GpuMat_LocateROI, testing::Combine(
  362. ALL_DEVICES,
  363. DIFFERENT_SIZES,
  364. ALL_DEPTH,
  365. WHOLE_SUBMAT));
  366. ////////////////////////////////////////////////////////////////////////////////
  367. // ensureSizeIsEnough
  368. struct EnsureSizeIsEnough : testing::TestWithParam<cv::cuda::DeviceInfo>
  369. {
  370. virtual void SetUp()
  371. {
  372. cv::cuda::DeviceInfo devInfo = GetParam();
  373. cv::cuda::setDevice(devInfo.deviceID());
  374. }
  375. };
  376. CUDA_TEST_P(EnsureSizeIsEnough, BufferReuse)
  377. {
  378. cv::cuda::GpuMat buffer(100, 100, CV_8U);
  379. cv::cuda::GpuMat old = buffer;
  380. // don't reallocate memory
  381. cv::cuda::ensureSizeIsEnough(10, 20, CV_8U, buffer);
  382. EXPECT_EQ(10, buffer.rows);
  383. EXPECT_EQ(20, buffer.cols);
  384. EXPECT_EQ(CV_8UC1, buffer.type());
  385. EXPECT_EQ(reinterpret_cast<intptr_t>(old.data), reinterpret_cast<intptr_t>(buffer.data));
  386. // don't reallocate memory
  387. cv::cuda::ensureSizeIsEnough(20, 30, CV_8U, buffer);
  388. EXPECT_EQ(20, buffer.rows);
  389. EXPECT_EQ(30, buffer.cols);
  390. EXPECT_EQ(CV_8UC1, buffer.type());
  391. EXPECT_EQ(reinterpret_cast<intptr_t>(old.data), reinterpret_cast<intptr_t>(buffer.data));
  392. }
  393. INSTANTIATE_TEST_CASE_P(CUDA, EnsureSizeIsEnough, ALL_DEVICES);
  394. ////////////////////////////////////////////////////////////////////////////////
  395. // createContinuous
  396. struct CreateContinuous : testing::TestWithParam<cv::cuda::DeviceInfo>
  397. {
  398. virtual void SetUp()
  399. {
  400. cv::cuda::DeviceInfo devInfo = GetParam();
  401. cv::cuda::setDevice(devInfo.deviceID());
  402. }
  403. };
  404. CUDA_TEST_P(CreateContinuous, BufferReuse)
  405. {
  406. cv::cuda::GpuMat buffer;
  407. cv::cuda::createContinuous(100, 100, CV_8UC1, buffer);
  408. EXPECT_EQ(100, buffer.rows);
  409. EXPECT_EQ(100, buffer.cols);
  410. EXPECT_EQ(CV_8UC1, buffer.type());
  411. EXPECT_TRUE(buffer.isContinuous());
  412. EXPECT_EQ(buffer.cols * sizeof(uchar), buffer.step);
  413. cv::cuda::createContinuous(10, 1000, CV_8UC1, buffer);
  414. EXPECT_EQ(10, buffer.rows);
  415. EXPECT_EQ(1000, buffer.cols);
  416. EXPECT_EQ(CV_8UC1, buffer.type());
  417. EXPECT_TRUE(buffer.isContinuous());
  418. EXPECT_EQ(buffer.cols * sizeof(uchar), buffer.step);
  419. cv::cuda::createContinuous(10, 10, CV_8UC1, buffer);
  420. EXPECT_EQ(10, buffer.rows);
  421. EXPECT_EQ(10, buffer.cols);
  422. EXPECT_EQ(CV_8UC1, buffer.type());
  423. EXPECT_TRUE(buffer.isContinuous());
  424. EXPECT_EQ(buffer.cols * sizeof(uchar), buffer.step);
  425. cv::cuda::createContinuous(100, 100, CV_8UC1, buffer);
  426. EXPECT_EQ(100, buffer.rows);
  427. EXPECT_EQ(100, buffer.cols);
  428. EXPECT_EQ(CV_8UC1, buffer.type());
  429. EXPECT_TRUE(buffer.isContinuous());
  430. EXPECT_EQ(buffer.cols * sizeof(uchar), buffer.step);
  431. }
  432. INSTANTIATE_TEST_CASE_P(CUDA, CreateContinuous, ALL_DEVICES);
  433. }} // namespace
  434. #endif // HAVE_CUDA