test_features2d.cpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760
  1. /*M///////////////////////////////////////////////////////////////////////////////////////
  2. //
  3. // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
  4. //
  5. // By downloading, copying, installing or using the software you agree to this license.
  6. // If you do not agree to this license, do not download, install,
  7. // copy or use the software.
  8. //
  9. //
  10. // License Agreement
  11. // For Open Source Computer Vision Library
  12. //
  13. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
  14. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
  15. // Third party copyrights are property of their respective owners.
  16. //
  17. // Redistribution and use in source and binary forms, with or without modification,
  18. // are permitted provided that the following conditions are met:
  19. //
  20. // * Redistribution's of source code must retain the above copyright notice,
  21. // this list of conditions and the following disclaimer.
  22. //
  23. // * Redistribution's in binary form must reproduce the above copyright notice,
  24. // this list of conditions and the following disclaimer in the documentation
  25. // and/or other materials provided with the distribution.
  26. //
  27. // * The name of the copyright holders may not be used to endorse or promote products
  28. // derived from this software without specific prior written permission.
  29. //
  30. // This software is provided by the copyright holders and contributors "as is" and
  31. // any express or implied warranties, including, but not limited to, the implied
  32. // warranties of merchantability and fitness for a particular purpose are disclaimed.
  33. // In no event shall the Intel Corporation or contributors be liable for any direct,
  34. // indirect, incidental, special, exemplary, or consequential damages
  35. // (including, but not limited to, procurement of substitute goods or services;
  36. // loss of use, data, or profits; or business interruption) however caused
  37. // and on any theory of liability, whether in contract, strict liability,
  38. // or tort (including negligence or otherwise) arising in any way out of
  39. // the use of this software, even if advised of the possibility of such damage.
  40. //
  41. //M*/
  42. #include "test_precomp.hpp"
  43. #ifdef HAVE_CUDA
  44. #include <cuda_runtime_api.h>
  45. namespace opencv_test { namespace {
  46. /////////////////////////////////////////////////////////////////////////////////////////////////
  47. // FAST
  48. namespace
  49. {
  50. IMPLEMENT_PARAM_CLASS(FAST_Threshold, int)
  51. IMPLEMENT_PARAM_CLASS(FAST_NonmaxSuppression, bool)
  52. }
  53. PARAM_TEST_CASE(FAST, cv::cuda::DeviceInfo, FAST_Threshold, FAST_NonmaxSuppression)
  54. {
  55. cv::cuda::DeviceInfo devInfo;
  56. int threshold;
  57. bool nonmaxSuppression;
  58. virtual void SetUp()
  59. {
  60. devInfo = GET_PARAM(0);
  61. threshold = GET_PARAM(1);
  62. nonmaxSuppression = GET_PARAM(2);
  63. cv::cuda::setDevice(devInfo.deviceID());
  64. }
  65. };
  66. CUDA_TEST_P(FAST, Accuracy)
  67. {
  68. cv::Mat image = readImage("features2d/aloe.png", cv::IMREAD_GRAYSCALE);
  69. ASSERT_FALSE(image.empty());
  70. cv::Ptr<cv::cuda::FastFeatureDetector> fast = cv::cuda::FastFeatureDetector::create(threshold, nonmaxSuppression);
  71. if (!supportFeature(devInfo, cv::cuda::GLOBAL_ATOMICS))
  72. {
  73. throw SkipTestException("CUDA device doesn't support global atomics");
  74. }
  75. else
  76. {
  77. std::vector<cv::KeyPoint> keypoints;
  78. fast->detect(loadMat(image), keypoints);
  79. std::vector<cv::KeyPoint> keypoints_gold;
  80. cv::FAST(image, keypoints_gold, threshold, nonmaxSuppression);
  81. ASSERT_KEYPOINTS_EQ(keypoints_gold, keypoints);
  82. }
  83. }
  84. class FastAsyncParallelLoopBody : public cv::ParallelLoopBody
  85. {
  86. public:
  87. FastAsyncParallelLoopBody(cv::cuda::HostMem& src, cv::cuda::GpuMat* d_kpts, cv::Ptr<cv::cuda::FastFeatureDetector>* d_fast)
  88. : src_(src), kpts_(d_kpts), fast_(d_fast) {}
  89. ~FastAsyncParallelLoopBody() {};
  90. void operator()(const cv::Range& r) const
  91. {
  92. for (int i = r.start; i < r.end; i++) {
  93. cv::cuda::Stream stream;
  94. cv::cuda::GpuMat d_src_(src_.rows, src_.cols, CV_8UC1);
  95. d_src_.upload(src_);
  96. fast_[i]->detectAsync(d_src_, kpts_[i], noArray(), stream);
  97. }
  98. }
  99. protected:
  100. cv::cuda::HostMem src_;
  101. cv::cuda::GpuMat* kpts_;
  102. cv::Ptr<cv::cuda::FastFeatureDetector>* fast_;
  103. };
  104. CUDA_TEST_P(FAST, Async)
  105. {
  106. if (!supportFeature(devInfo, cv::cuda::GLOBAL_ATOMICS))
  107. {
  108. throw SkipTestException("CUDA device doesn't support global atomics");
  109. }
  110. else
  111. {
  112. cv::Mat image_ = readImage("features2d/aloe.png", cv::IMREAD_GRAYSCALE);
  113. ASSERT_FALSE(image_.empty());
  114. cv::cuda::HostMem image(image_);
  115. cv::cuda::GpuMat d_keypoints[2];
  116. cv::Ptr<cv::cuda::FastFeatureDetector> d_fast[2];
  117. d_fast[0] = cv::cuda::FastFeatureDetector::create(threshold, nonmaxSuppression);
  118. d_fast[1] = cv::cuda::FastFeatureDetector::create(threshold, nonmaxSuppression);
  119. cv::parallel_for_(cv::Range(0, 2), FastAsyncParallelLoopBody(image, d_keypoints, d_fast));
  120. cudaDeviceSynchronize();
  121. std::vector<cv::KeyPoint> keypoints[2];
  122. d_fast[0]->convert(d_keypoints[0], keypoints[0]);
  123. d_fast[1]->convert(d_keypoints[1], keypoints[1]);
  124. std::vector<cv::KeyPoint> keypoints_gold;
  125. cv::FAST(image, keypoints_gold, threshold, nonmaxSuppression);
  126. ASSERT_KEYPOINTS_EQ(keypoints_gold, keypoints[0]);
  127. ASSERT_KEYPOINTS_EQ(keypoints_gold, keypoints[1]);
  128. }
  129. }
  130. INSTANTIATE_TEST_CASE_P(CUDA_Features2D, FAST, testing::Combine(
  131. ALL_DEVICES,
  132. testing::Values(FAST_Threshold(25), FAST_Threshold(50)),
  133. testing::Values(FAST_NonmaxSuppression(false), FAST_NonmaxSuppression(true))));
  134. /////////////////////////////////////////////////////////////////////////////////////////////////
  135. // ORB
  136. namespace
  137. {
  138. IMPLEMENT_PARAM_CLASS(ORB_FeaturesCount, int)
  139. IMPLEMENT_PARAM_CLASS(ORB_ScaleFactor, float)
  140. IMPLEMENT_PARAM_CLASS(ORB_LevelsCount, int)
  141. IMPLEMENT_PARAM_CLASS(ORB_EdgeThreshold, int)
  142. IMPLEMENT_PARAM_CLASS(ORB_firstLevel, int)
  143. IMPLEMENT_PARAM_CLASS(ORB_WTA_K, int)
  144. IMPLEMENT_PARAM_CLASS(ORB_PatchSize, int)
  145. IMPLEMENT_PARAM_CLASS(ORB_BlurForDescriptor, bool)
  146. }
  147. PARAM_TEST_CASE(ORB, cv::cuda::DeviceInfo, ORB_FeaturesCount, ORB_ScaleFactor, ORB_LevelsCount, ORB_EdgeThreshold, ORB_firstLevel, ORB_WTA_K, cv::ORB::ScoreType, ORB_PatchSize, ORB_BlurForDescriptor)
  148. {
  149. cv::cuda::DeviceInfo devInfo;
  150. int nFeatures;
  151. float scaleFactor;
  152. int nLevels;
  153. int edgeThreshold;
  154. int firstLevel;
  155. int WTA_K;
  156. cv::ORB::ScoreType scoreType;
  157. int patchSize;
  158. bool blurForDescriptor;
  159. virtual void SetUp()
  160. {
  161. devInfo = GET_PARAM(0);
  162. nFeatures = GET_PARAM(1);
  163. scaleFactor = GET_PARAM(2);
  164. nLevels = GET_PARAM(3);
  165. edgeThreshold = GET_PARAM(4);
  166. firstLevel = GET_PARAM(5);
  167. WTA_K = GET_PARAM(6);
  168. scoreType = GET_PARAM(7);
  169. patchSize = GET_PARAM(8);
  170. blurForDescriptor = GET_PARAM(9);
  171. cv::cuda::setDevice(devInfo.deviceID());
  172. }
  173. };
  174. CUDA_TEST_P(ORB, Accuracy)
  175. {
  176. cv::Mat image = readImage("features2d/aloe.png", cv::IMREAD_GRAYSCALE);
  177. ASSERT_FALSE(image.empty());
  178. cv::Mat mask(image.size(), CV_8UC1, cv::Scalar::all(1));
  179. mask(cv::Range(0, image.rows / 2), cv::Range(0, image.cols / 2)).setTo(cv::Scalar::all(0));
  180. cv::Ptr<cv::cuda::ORB> orb =
  181. cv::cuda::ORB::create(nFeatures, scaleFactor, nLevels, edgeThreshold, firstLevel,
  182. WTA_K, scoreType, patchSize, 20, blurForDescriptor);
  183. if (!supportFeature(devInfo, cv::cuda::GLOBAL_ATOMICS))
  184. {
  185. try
  186. {
  187. std::vector<cv::KeyPoint> keypoints;
  188. cv::cuda::GpuMat descriptors;
  189. orb->detectAndComputeAsync(loadMat(image), loadMat(mask), rawOut(keypoints), descriptors);
  190. }
  191. catch (const cv::Exception& e)
  192. {
  193. ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
  194. }
  195. }
  196. else
  197. {
  198. std::vector<cv::KeyPoint> keypoints;
  199. cv::cuda::GpuMat descriptors;
  200. orb->detectAndCompute(loadMat(image), loadMat(mask), keypoints, descriptors);
  201. cv::Ptr<cv::ORB> orb_gold = cv::ORB::create(nFeatures, scaleFactor, nLevels, edgeThreshold, firstLevel, WTA_K, scoreType, patchSize);
  202. std::vector<cv::KeyPoint> keypoints_gold;
  203. cv::Mat descriptors_gold;
  204. orb_gold->detectAndCompute(image, mask, keypoints_gold, descriptors_gold);
  205. cv::BFMatcher matcher(cv::NORM_HAMMING);
  206. std::vector<cv::DMatch> matches;
  207. matcher.match(descriptors_gold, cv::Mat(descriptors), matches);
  208. int matchedCount = getMatchedPointsCount(keypoints_gold, keypoints, matches);
  209. double matchedRatio = static_cast<double>(matchedCount) / keypoints.size();
  210. EXPECT_GT(matchedRatio, 0.35);
  211. }
  212. }
  213. INSTANTIATE_TEST_CASE_P(CUDA_Features2D, ORB, testing::Combine(
  214. ALL_DEVICES,
  215. testing::Values(ORB_FeaturesCount(1000)),
  216. testing::Values(ORB_ScaleFactor(1.2f)),
  217. testing::Values(ORB_LevelsCount(4), ORB_LevelsCount(8)),
  218. testing::Values(ORB_EdgeThreshold(31)),
  219. testing::Values(ORB_firstLevel(0)),
  220. testing::Values(ORB_WTA_K(2), ORB_WTA_K(3), ORB_WTA_K(4)),
  221. testing::Values(cv::ORB::HARRIS_SCORE),
  222. testing::Values(ORB_PatchSize(31), ORB_PatchSize(29)),
  223. testing::Values(ORB_BlurForDescriptor(false), ORB_BlurForDescriptor(true))));
  224. /////////////////////////////////////////////////////////////////////////////////////////////////
  225. // BruteForceMatcher
  226. namespace
  227. {
  228. IMPLEMENT_PARAM_CLASS(DescriptorSize, int)
  229. IMPLEMENT_PARAM_CLASS(UseMask, bool)
  230. }
  231. PARAM_TEST_CASE(BruteForceMatcher, cv::cuda::DeviceInfo, NormCode, DescriptorSize, UseMask)
  232. {
  233. cv::cuda::DeviceInfo devInfo;
  234. int normCode;
  235. int dim;
  236. bool useMask;
  237. int queryDescCount;
  238. int countFactor;
  239. cv::Mat query, train;
  240. virtual void SetUp()
  241. {
  242. devInfo = GET_PARAM(0);
  243. normCode = GET_PARAM(1);
  244. dim = GET_PARAM(2);
  245. useMask = GET_PARAM(3);
  246. cv::cuda::setDevice(devInfo.deviceID());
  247. queryDescCount = 300; // must be even number because we split train data in some cases in two
  248. countFactor = 4; // do not change it
  249. cv::RNG& rng = cvtest::TS::ptr()->get_rng();
  250. cv::Mat queryBuf, trainBuf;
  251. // Generate query descriptors randomly.
  252. // Descriptor vector elements are integer values.
  253. queryBuf.create(queryDescCount, dim, CV_32SC1);
  254. rng.fill(queryBuf, cv::RNG::UNIFORM, cv::Scalar::all(0), cv::Scalar::all(3));
  255. queryBuf.convertTo(queryBuf, CV_32FC1);
  256. // Generate train descriptors as follows:
  257. // copy each query descriptor to train set countFactor times
  258. // and perturb some one element of the copied descriptors in
  259. // in ascending order. General boundaries of the perturbation
  260. // are (0.f, 1.f).
  261. trainBuf.create(queryDescCount * countFactor, dim, CV_32FC1);
  262. float step = 1.f / countFactor;
  263. for (int qIdx = 0; qIdx < queryDescCount; qIdx++)
  264. {
  265. cv::Mat queryDescriptor = queryBuf.row(qIdx);
  266. for (int c = 0; c < countFactor; c++)
  267. {
  268. int tIdx = qIdx * countFactor + c;
  269. cv::Mat trainDescriptor = trainBuf.row(tIdx);
  270. queryDescriptor.copyTo(trainDescriptor);
  271. int elem = rng(dim);
  272. float diff = rng.uniform(step * c, step * (c + 1));
  273. trainDescriptor.at<float>(0, elem) += diff;
  274. }
  275. }
  276. queryBuf.convertTo(query, CV_32F);
  277. trainBuf.convertTo(train, CV_32F);
  278. }
  279. };
  280. CUDA_TEST_P(BruteForceMatcher, Match_Single)
  281. {
  282. cv::Ptr<cv::cuda::DescriptorMatcher> matcher =
  283. cv::cuda::DescriptorMatcher::createBFMatcher(normCode);
  284. cv::cuda::GpuMat mask;
  285. if (useMask)
  286. {
  287. mask.create(query.rows, train.rows, CV_8UC1);
  288. mask.setTo(cv::Scalar::all(1));
  289. }
  290. std::vector<cv::DMatch> matches;
  291. matcher->match(loadMat(query), loadMat(train), matches, mask);
  292. ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
  293. int badCount = 0;
  294. for (size_t i = 0; i < matches.size(); i++)
  295. {
  296. cv::DMatch match = matches[i];
  297. if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor) || (match.imgIdx != 0))
  298. badCount++;
  299. }
  300. ASSERT_EQ(0, badCount);
  301. }
  302. CUDA_TEST_P(BruteForceMatcher, Match_Collection)
  303. {
  304. cv::Ptr<cv::cuda::DescriptorMatcher> matcher =
  305. cv::cuda::DescriptorMatcher::createBFMatcher(normCode);
  306. cv::cuda::GpuMat d_train(train);
  307. // make add() twice to test such case
  308. matcher->add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(0, train.rows / 2)));
  309. matcher->add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(train.rows / 2, train.rows)));
  310. // prepare masks (make first nearest match illegal)
  311. std::vector<cv::cuda::GpuMat> masks(2);
  312. for (int mi = 0; mi < 2; mi++)
  313. {
  314. masks[mi] = cv::cuda::GpuMat(query.rows, train.rows/2, CV_8UC1, cv::Scalar::all(1));
  315. for (int di = 0; di < queryDescCount/2; di++)
  316. masks[mi].col(di * countFactor).setTo(cv::Scalar::all(0));
  317. }
  318. std::vector<cv::DMatch> matches;
  319. if (useMask)
  320. matcher->match(cv::cuda::GpuMat(query), matches, masks);
  321. else
  322. matcher->match(cv::cuda::GpuMat(query), matches);
  323. ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
  324. int badCount = 0;
  325. int shift = useMask ? 1 : 0;
  326. for (size_t i = 0; i < matches.size(); i++)
  327. {
  328. cv::DMatch match = matches[i];
  329. if ((int)i < queryDescCount / 2)
  330. {
  331. bool validQueryIdx = (match.queryIdx == (int)i);
  332. bool validTrainIdx = (match.trainIdx == (int)i * countFactor + shift);
  333. bool validImgIdx = (match.imgIdx == 0);
  334. if (!validQueryIdx || !validTrainIdx || !validImgIdx)
  335. badCount++;
  336. }
  337. else
  338. {
  339. bool validQueryIdx = (match.queryIdx == (int)i);
  340. bool validTrainIdx = (match.trainIdx == ((int)i - queryDescCount / 2) * countFactor + shift);
  341. bool validImgIdx = (match.imgIdx == 1);
  342. if (!validQueryIdx || !validTrainIdx || !validImgIdx)
  343. badCount++;
  344. }
  345. }
  346. ASSERT_EQ(0, badCount);
  347. }
  348. CUDA_TEST_P(BruteForceMatcher, KnnMatch_2_Single)
  349. {
  350. cv::Ptr<cv::cuda::DescriptorMatcher> matcher =
  351. cv::cuda::DescriptorMatcher::createBFMatcher(normCode);
  352. const int knn = 2;
  353. cv::cuda::GpuMat mask;
  354. if (useMask)
  355. {
  356. mask.create(query.rows, train.rows, CV_8UC1);
  357. mask.setTo(cv::Scalar::all(1));
  358. }
  359. std::vector< std::vector<cv::DMatch> > matches;
  360. matcher->knnMatch(loadMat(query), loadMat(train), matches, knn, mask);
  361. ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
  362. int badCount = 0;
  363. for (size_t i = 0; i < matches.size(); i++)
  364. {
  365. if ((int)matches[i].size() != knn)
  366. badCount++;
  367. else
  368. {
  369. int localBadCount = 0;
  370. for (int k = 0; k < knn; k++)
  371. {
  372. cv::DMatch match = matches[i][k];
  373. if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor + k) || (match.imgIdx != 0))
  374. localBadCount++;
  375. }
  376. badCount += localBadCount > 0 ? 1 : 0;
  377. }
  378. }
  379. ASSERT_EQ(0, badCount);
  380. }
  381. CUDA_TEST_P(BruteForceMatcher, KnnMatch_3_Single)
  382. {
  383. cv::Ptr<cv::cuda::DescriptorMatcher> matcher =
  384. cv::cuda::DescriptorMatcher::createBFMatcher(normCode);
  385. const int knn = 3;
  386. cv::cuda::GpuMat mask;
  387. if (useMask)
  388. {
  389. mask.create(query.rows, train.rows, CV_8UC1);
  390. mask.setTo(cv::Scalar::all(1));
  391. }
  392. std::vector< std::vector<cv::DMatch> > matches;
  393. matcher->knnMatch(loadMat(query), loadMat(train), matches, knn, mask);
  394. ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
  395. int badCount = 0;
  396. for (size_t i = 0; i < matches.size(); i++)
  397. {
  398. if ((int)matches[i].size() != knn)
  399. badCount++;
  400. else
  401. {
  402. int localBadCount = 0;
  403. for (int k = 0; k < knn; k++)
  404. {
  405. cv::DMatch match = matches[i][k];
  406. if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor + k) || (match.imgIdx != 0))
  407. localBadCount++;
  408. }
  409. badCount += localBadCount > 0 ? 1 : 0;
  410. }
  411. }
  412. ASSERT_EQ(0, badCount);
  413. }
  414. CUDA_TEST_P(BruteForceMatcher, KnnMatch_2_Collection)
  415. {
  416. cv::Ptr<cv::cuda::DescriptorMatcher> matcher =
  417. cv::cuda::DescriptorMatcher::createBFMatcher(normCode);
  418. const int knn = 2;
  419. cv::cuda::GpuMat d_train(train);
  420. // make add() twice to test such case
  421. matcher->add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(0, train.rows / 2)));
  422. matcher->add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(train.rows / 2, train.rows)));
  423. // prepare masks (make first nearest match illegal)
  424. std::vector<cv::cuda::GpuMat> masks(2);
  425. for (int mi = 0; mi < 2; mi++ )
  426. {
  427. masks[mi] = cv::cuda::GpuMat(query.rows, train.rows / 2, CV_8UC1, cv::Scalar::all(1));
  428. for (int di = 0; di < queryDescCount / 2; di++)
  429. masks[mi].col(di * countFactor).setTo(cv::Scalar::all(0));
  430. }
  431. std::vector< std::vector<cv::DMatch> > matches;
  432. if (useMask)
  433. matcher->knnMatch(cv::cuda::GpuMat(query), matches, knn, masks);
  434. else
  435. matcher->knnMatch(cv::cuda::GpuMat(query), matches, knn);
  436. ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
  437. int badCount = 0;
  438. int shift = useMask ? 1 : 0;
  439. for (size_t i = 0; i < matches.size(); i++)
  440. {
  441. if ((int)matches[i].size() != knn)
  442. badCount++;
  443. else
  444. {
  445. int localBadCount = 0;
  446. for (int k = 0; k < knn; k++)
  447. {
  448. cv::DMatch match = matches[i][k];
  449. {
  450. if ((int)i < queryDescCount / 2)
  451. {
  452. if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor + k + shift) || (match.imgIdx != 0) )
  453. localBadCount++;
  454. }
  455. else
  456. {
  457. if ((match.queryIdx != (int)i) || (match.trainIdx != ((int)i - queryDescCount / 2) * countFactor + k + shift) || (match.imgIdx != 1) )
  458. localBadCount++;
  459. }
  460. }
  461. }
  462. badCount += localBadCount > 0 ? 1 : 0;
  463. }
  464. }
  465. ASSERT_EQ(0, badCount);
  466. }
  467. CUDA_TEST_P(BruteForceMatcher, KnnMatch_3_Collection)
  468. {
  469. cv::Ptr<cv::cuda::DescriptorMatcher> matcher =
  470. cv::cuda::DescriptorMatcher::createBFMatcher(normCode);
  471. const int knn = 3;
  472. cv::cuda::GpuMat d_train(train);
  473. // make add() twice to test such case
  474. matcher->add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(0, train.rows / 2)));
  475. matcher->add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(train.rows / 2, train.rows)));
  476. // prepare masks (make first nearest match illegal)
  477. std::vector<cv::cuda::GpuMat> masks(2);
  478. for (int mi = 0; mi < 2; mi++ )
  479. {
  480. masks[mi] = cv::cuda::GpuMat(query.rows, train.rows / 2, CV_8UC1, cv::Scalar::all(1));
  481. for (int di = 0; di < queryDescCount / 2; di++)
  482. masks[mi].col(di * countFactor).setTo(cv::Scalar::all(0));
  483. }
  484. std::vector< std::vector<cv::DMatch> > matches;
  485. if (useMask)
  486. matcher->knnMatch(cv::cuda::GpuMat(query), matches, knn, masks);
  487. else
  488. matcher->knnMatch(cv::cuda::GpuMat(query), matches, knn);
  489. ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
  490. int badCount = 0;
  491. int shift = useMask ? 1 : 0;
  492. for (size_t i = 0; i < matches.size(); i++)
  493. {
  494. if ((int)matches[i].size() != knn)
  495. badCount++;
  496. else
  497. {
  498. int localBadCount = 0;
  499. for (int k = 0; k < knn; k++)
  500. {
  501. cv::DMatch match = matches[i][k];
  502. {
  503. if ((int)i < queryDescCount / 2)
  504. {
  505. if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor + k + shift) || (match.imgIdx != 0) )
  506. localBadCount++;
  507. }
  508. else
  509. {
  510. if ((match.queryIdx != (int)i) || (match.trainIdx != ((int)i - queryDescCount / 2) * countFactor + k + shift) || (match.imgIdx != 1) )
  511. localBadCount++;
  512. }
  513. }
  514. }
  515. badCount += localBadCount > 0 ? 1 : 0;
  516. }
  517. }
  518. ASSERT_EQ(0, badCount);
  519. }
  520. CUDA_TEST_P(BruteForceMatcher, RadiusMatch_Single)
  521. {
  522. cv::Ptr<cv::cuda::DescriptorMatcher> matcher =
  523. cv::cuda::DescriptorMatcher::createBFMatcher(normCode);
  524. const float radius = 1.f / countFactor;
  525. if (!supportFeature(devInfo, cv::cuda::GLOBAL_ATOMICS))
  526. {
  527. try
  528. {
  529. std::vector< std::vector<cv::DMatch> > matches;
  530. matcher->radiusMatch(loadMat(query), loadMat(train), matches, radius);
  531. }
  532. catch (const cv::Exception& e)
  533. {
  534. ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
  535. }
  536. }
  537. else
  538. {
  539. cv::cuda::GpuMat mask;
  540. if (useMask)
  541. {
  542. mask.create(query.rows, train.rows, CV_8UC1);
  543. mask.setTo(cv::Scalar::all(1));
  544. }
  545. std::vector< std::vector<cv::DMatch> > matches;
  546. matcher->radiusMatch(loadMat(query), loadMat(train), matches, radius, mask);
  547. ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
  548. int badCount = 0;
  549. for (size_t i = 0; i < matches.size(); i++)
  550. {
  551. if ((int)matches[i].size() != 1)
  552. badCount++;
  553. else
  554. {
  555. cv::DMatch match = matches[i][0];
  556. if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i*countFactor) || (match.imgIdx != 0))
  557. badCount++;
  558. }
  559. }
  560. ASSERT_EQ(0, badCount);
  561. }
  562. }
  563. CUDA_TEST_P(BruteForceMatcher, RadiusMatch_Collection)
  564. {
  565. cv::Ptr<cv::cuda::DescriptorMatcher> matcher =
  566. cv::cuda::DescriptorMatcher::createBFMatcher(normCode);
  567. const int n = 3;
  568. const float radius = 1.f / countFactor * n;
  569. cv::cuda::GpuMat d_train(train);
  570. // make add() twice to test such case
  571. matcher->add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(0, train.rows / 2)));
  572. matcher->add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(train.rows / 2, train.rows)));
  573. // prepare masks (make first nearest match illegal)
  574. std::vector<cv::cuda::GpuMat> masks(2);
  575. for (int mi = 0; mi < 2; mi++)
  576. {
  577. masks[mi] = cv::cuda::GpuMat(query.rows, train.rows / 2, CV_8UC1, cv::Scalar::all(1));
  578. for (int di = 0; di < queryDescCount / 2; di++)
  579. masks[mi].col(di * countFactor).setTo(cv::Scalar::all(0));
  580. }
  581. if (!supportFeature(devInfo, cv::cuda::GLOBAL_ATOMICS))
  582. {
  583. try
  584. {
  585. std::vector< std::vector<cv::DMatch> > matches;
  586. matcher->radiusMatch(cv::cuda::GpuMat(query), matches, radius, masks);
  587. }
  588. catch (const cv::Exception& e)
  589. {
  590. ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
  591. }
  592. }
  593. else
  594. {
  595. std::vector< std::vector<cv::DMatch> > matches;
  596. if (useMask)
  597. matcher->radiusMatch(cv::cuda::GpuMat(query), matches, radius, masks);
  598. else
  599. matcher->radiusMatch(cv::cuda::GpuMat(query), matches, radius);
  600. ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
  601. int badCount = 0;
  602. int shift = useMask ? 1 : 0;
  603. int needMatchCount = useMask ? n-1 : n;
  604. for (size_t i = 0; i < matches.size(); i++)
  605. {
  606. if ((int)matches[i].size() != needMatchCount)
  607. badCount++;
  608. else
  609. {
  610. int localBadCount = 0;
  611. for (int k = 0; k < needMatchCount; k++)
  612. {
  613. cv::DMatch match = matches[i][k];
  614. {
  615. if ((int)i < queryDescCount / 2)
  616. {
  617. if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor + k + shift) || (match.imgIdx != 0) )
  618. localBadCount++;
  619. }
  620. else
  621. {
  622. if ((match.queryIdx != (int)i) || (match.trainIdx != ((int)i - queryDescCount / 2) * countFactor + k + shift) || (match.imgIdx != 1) )
  623. localBadCount++;
  624. }
  625. }
  626. }
  627. badCount += localBadCount > 0 ? 1 : 0;
  628. }
  629. }
  630. ASSERT_EQ(0, badCount);
  631. }
  632. }
  633. INSTANTIATE_TEST_CASE_P(CUDA_Features2D, BruteForceMatcher, testing::Combine(
  634. ALL_DEVICES,
  635. testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2)),
  636. testing::Values(DescriptorSize(57), DescriptorSize(64), DescriptorSize(83), DescriptorSize(128), DescriptorSize(179), DescriptorSize(256), DescriptorSize(304)),
  637. testing::Values(UseMask(false), UseMask(true))));
  638. }} // namespace
  639. #endif // HAVE_CUDA