TestHaarCascadeApplication.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. /*M///////////////////////////////////////////////////////////////////////////////////////
  2. //
  3. // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
  4. //
  5. // By downloading, copying, installing or using the software you agree to this license.
  6. // If you do not agree to this license, do not download, install,
  7. // copy or use the software.
  8. //
  9. //
  10. // License Agreement
  11. // For Open Source Computer Vision Library
  12. //
  13. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
  14. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
  15. // Third party copyrights are property of their respective owners.
  16. //
  17. // Redistribution and use in source and binary forms, with or without modification,
  18. // are permitted provided that the following conditions are met:
  19. //
  20. // * Redistribution's of source code must retain the above copyright notice,
  21. // this list of conditions and the following disclaimer.
  22. //
  23. // * Redistribution's in binary form must reproduce the above copyright notice,
  24. // this list of conditions and the following disclaimer in the documentation
  25. // and/or other materials provided with the distribution.
  26. //
  27. // * The name of the copyright holders may not be used to endorse or promote products
  28. // derived from this software without specific prior written permission.
  29. //
  30. // This software is provided by the copyright holders and contributors "as is" and
  31. // any express or implied warranties, including, but not limited to, the implied
  32. // warranties of merchantability and fitness for a particular purpose are disclaimed.
  33. // In no event shall the Intel Corporation or contributors be liable for any direct,
  34. // indirect, incidental, special, exemplary, or consequential damages
  35. // (including, but not limited to, procurement of substitute goods or services;
  36. // loss of use, data, or profits; or business interruption) however caused
  37. // and on any theory of liability, whether in contract, strict liability,
  38. // or tort (including negligence or otherwise) arising in any way out of
  39. // the use of this software, even if advised of the possibility of such damage.
  40. //
  41. //M*/
  42. #include "test_precomp.hpp"
  43. namespace
  44. {
  45. // http://www.christian-seiler.de/projekte/fpmath/
  46. class FpuControl
  47. {
  48. public:
  49. FpuControl();
  50. ~FpuControl();
  51. private:
  52. #if defined(__GNUC__) && !defined(__APPLE__) && !defined(__arm__) && !defined(__aarch64__) && !defined(__powerpc64__)
  53. fpu_control_t fpu_oldcw, fpu_cw;
  54. #elif defined(_WIN32) && !defined(_WIN64)
  55. unsigned int fpu_oldcw, fpu_cw;
  56. #endif
  57. };
  58. FpuControl::FpuControl()
  59. {
  60. #if defined(__GNUC__) && !defined(__APPLE__) && !defined(__arm__) && !defined(__aarch64__) && !defined(__powerpc64__)
  61. _FPU_GETCW(fpu_oldcw);
  62. fpu_cw = (fpu_oldcw & ~_FPU_EXTENDED & ~_FPU_DOUBLE & ~_FPU_SINGLE) | _FPU_SINGLE;
  63. _FPU_SETCW(fpu_cw);
  64. #elif defined(_WIN32) && !defined(_WIN64)
  65. _controlfp_s(&fpu_cw, 0, 0);
  66. fpu_oldcw = fpu_cw;
  67. _controlfp_s(&fpu_cw, _PC_24, _MCW_PC);
  68. #endif
  69. }
  70. FpuControl::~FpuControl()
  71. {
  72. #if defined(__GNUC__) && !defined(__APPLE__) && !defined(__arm__) && !defined(__aarch64__) && !defined(__powerpc64__)
  73. _FPU_SETCW(fpu_oldcw);
  74. #elif defined(_WIN32) && !defined(_WIN64)
  75. _controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC);
  76. #endif
  77. }
  78. }
  79. TestHaarCascadeApplication::TestHaarCascadeApplication(std::string testName_, NCVTestSourceProvider<Ncv8u> &src_,
  80. std::string cascadeName_, Ncv32u width_, Ncv32u height_)
  81. :
  82. NCVTestProvider(testName_),
  83. src(src_),
  84. cascadeName(cascadeName_),
  85. width(width_),
  86. height(height_)
  87. {
  88. }
  89. bool TestHaarCascadeApplication::toString(std::ofstream &strOut)
  90. {
  91. strOut << "cascadeName=" << cascadeName << std::endl;
  92. strOut << "width=" << width << std::endl;
  93. strOut << "height=" << height << std::endl;
  94. return true;
  95. }
  96. bool TestHaarCascadeApplication::init()
  97. {
  98. return true;
  99. }
  100. bool TestHaarCascadeApplication::process()
  101. {
  102. NCVStatus ncvStat;
  103. bool rcode = false;
  104. Ncv32u numStages, numNodes, numFeatures;
  105. ncvStat = ncvHaarGetClassifierSize(this->cascadeName, numStages, numNodes, numFeatures);
  106. ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
  107. NCVVectorAlloc<HaarStage64> h_HaarStages(*this->allocatorCPU.get(), numStages);
  108. ncvAssertReturn(h_HaarStages.isMemAllocated(), false);
  109. NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes(*this->allocatorCPU.get(), numNodes);
  110. ncvAssertReturn(h_HaarNodes.isMemAllocated(), false);
  111. NCVVectorAlloc<HaarFeature64> h_HaarFeatures(*this->allocatorCPU.get(), numFeatures);
  112. ncvAssertReturn(h_HaarFeatures.isMemAllocated(), false);
  113. NCVVectorAlloc<HaarStage64> d_HaarStages(*this->allocatorGPU.get(), numStages);
  114. ncvAssertReturn(d_HaarStages.isMemAllocated(), false);
  115. NCVVectorAlloc<HaarClassifierNode128> d_HaarNodes(*this->allocatorGPU.get(), numNodes);
  116. ncvAssertReturn(d_HaarNodes.isMemAllocated(), false);
  117. NCVVectorAlloc<HaarFeature64> d_HaarFeatures(*this->allocatorGPU.get(), numFeatures);
  118. ncvAssertReturn(d_HaarFeatures.isMemAllocated(), false);
  119. HaarClassifierCascadeDescriptor haar;
  120. haar.ClassifierSize.width = haar.ClassifierSize.height = 1;
  121. haar.bNeedsTiltedII = false;
  122. haar.NumClassifierRootNodes = numNodes;
  123. haar.NumClassifierTotalNodes = numNodes;
  124. haar.NumFeatures = numFeatures;
  125. haar.NumStages = numStages;
  126. NCV_SET_SKIP_COND(this->allocatorGPU.get()->isCounting());
  127. NCV_SKIP_COND_BEGIN
  128. ncvStat = ncvHaarLoadFromFile_host(this->cascadeName, haar, h_HaarStages, h_HaarNodes, h_HaarFeatures);
  129. ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
  130. ncvAssertReturn(NCV_SUCCESS == h_HaarStages.copySolid(d_HaarStages, 0), false);
  131. ncvAssertReturn(NCV_SUCCESS == h_HaarNodes.copySolid(d_HaarNodes, 0), false);
  132. ncvAssertReturn(NCV_SUCCESS == h_HaarFeatures.copySolid(d_HaarFeatures, 0), false);
  133. ncvAssertCUDAReturn(cudaStreamSynchronize(0), false);
  134. NCV_SKIP_COND_END
  135. NcvSize32s srcRoi, srcIIRoi, searchRoi;
  136. srcRoi.width = this->width;
  137. srcRoi.height = this->height;
  138. srcIIRoi.width = srcRoi.width + 1;
  139. srcIIRoi.height = srcRoi.height + 1;
  140. searchRoi.width = srcIIRoi.width - haar.ClassifierSize.width;
  141. searchRoi.height = srcIIRoi.height - haar.ClassifierSize.height;
  142. if (searchRoi.width <= 0 || searchRoi.height <= 0)
  143. {
  144. return false;
  145. }
  146. NcvSize32u searchRoiU(searchRoi.width, searchRoi.height);
  147. NCVMatrixAlloc<Ncv8u> d_img(*this->allocatorGPU.get(), this->width, this->height);
  148. ncvAssertReturn(d_img.isMemAllocated(), false);
  149. NCVMatrixAlloc<Ncv8u> h_img(*this->allocatorCPU.get(), this->width, this->height);
  150. ncvAssertReturn(h_img.isMemAllocated(), false);
  151. Ncv32u integralWidth = this->width + 1;
  152. Ncv32u integralHeight = this->height + 1;
  153. NCVMatrixAlloc<Ncv32u> d_integralImage(*this->allocatorGPU.get(), integralWidth, integralHeight);
  154. ncvAssertReturn(d_integralImage.isMemAllocated(), false);
  155. NCVMatrixAlloc<Ncv64u> d_sqIntegralImage(*this->allocatorGPU.get(), integralWidth, integralHeight);
  156. ncvAssertReturn(d_sqIntegralImage.isMemAllocated(), false);
  157. NCVMatrixAlloc<Ncv32u> h_integralImage(*this->allocatorCPU.get(), integralWidth, integralHeight);
  158. ncvAssertReturn(h_integralImage.isMemAllocated(), false);
  159. NCVMatrixAlloc<Ncv64u> h_sqIntegralImage(*this->allocatorCPU.get(), integralWidth, integralHeight);
  160. ncvAssertReturn(h_sqIntegralImage.isMemAllocated(), false);
  161. NCVMatrixAlloc<Ncv32f> d_rectStdDev(*this->allocatorGPU.get(), this->width, this->height);
  162. ncvAssertReturn(d_rectStdDev.isMemAllocated(), false);
  163. NCVMatrixAlloc<Ncv32u> d_pixelMask(*this->allocatorGPU.get(), this->width, this->height);
  164. ncvAssertReturn(d_pixelMask.isMemAllocated(), false);
  165. NCVMatrixAlloc<Ncv32f> h_rectStdDev(*this->allocatorCPU.get(), this->width, this->height);
  166. ncvAssertReturn(h_rectStdDev.isMemAllocated(), false);
  167. NCVMatrixAlloc<Ncv32u> h_pixelMask(*this->allocatorCPU.get(), this->width, this->height);
  168. ncvAssertReturn(h_pixelMask.isMemAllocated(), false);
  169. NCVVectorAlloc<NcvRect32u> d_hypotheses(*this->allocatorGPU.get(), this->width * this->height);
  170. ncvAssertReturn(d_hypotheses.isMemAllocated(), false);
  171. NCVVectorAlloc<NcvRect32u> h_hypotheses(*this->allocatorCPU.get(), this->width * this->height);
  172. ncvAssertReturn(h_hypotheses.isMemAllocated(), false);
  173. NCVStatus nppStat;
  174. Ncv32u szTmpBufIntegral, szTmpBufSqIntegral;
  175. nppStat = nppiStIntegralGetSize_8u32u(NcvSize32u(this->width, this->height), &szTmpBufIntegral, this->devProp);
  176. ncvAssertReturn(nppStat == NPPST_SUCCESS, false);
  177. nppStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(this->width, this->height), &szTmpBufSqIntegral, this->devProp);
  178. ncvAssertReturn(nppStat == NPPST_SUCCESS, false);
  179. NCVVectorAlloc<Ncv8u> d_tmpIIbuf(*this->allocatorGPU.get(), std::max(szTmpBufIntegral, szTmpBufSqIntegral));
  180. ncvAssertReturn(d_tmpIIbuf.isMemAllocated(), false);
  181. Ncv32u detectionsOnThisScale_d = 0;
  182. Ncv32u detectionsOnThisScale_h = 0;
  183. NCV_SKIP_COND_BEGIN
  184. ncvAssertReturn(this->src.fill(h_img), false);
  185. ncvStat = h_img.copySolid(d_img, 0);
  186. ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
  187. ncvAssertCUDAReturn(cudaStreamSynchronize(0), false);
  188. nppStat = nppiStIntegral_8u32u_C1R(d_img.ptr(), d_img.pitch(),
  189. d_integralImage.ptr(), d_integralImage.pitch(),
  190. NcvSize32u(d_img.width(), d_img.height()),
  191. d_tmpIIbuf.ptr(), szTmpBufIntegral, this->devProp);
  192. ncvAssertReturn(nppStat == NPPST_SUCCESS, false);
  193. nppStat = nppiStSqrIntegral_8u64u_C1R(d_img.ptr(), d_img.pitch(),
  194. d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),
  195. NcvSize32u(d_img.width(), d_img.height()),
  196. d_tmpIIbuf.ptr(), szTmpBufSqIntegral, this->devProp);
  197. ncvAssertReturn(nppStat == NPPST_SUCCESS, false);
  198. const NcvRect32u rect(
  199. HAAR_STDDEV_BORDER,
  200. HAAR_STDDEV_BORDER,
  201. haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER,
  202. haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER);
  203. nppStat = nppiStRectStdDev_32f_C1R(
  204. d_integralImage.ptr(), d_integralImage.pitch(),
  205. d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),
  206. d_rectStdDev.ptr(), d_rectStdDev.pitch(),
  207. NcvSize32u(searchRoi.width, searchRoi.height), rect,
  208. 1.0f, true);
  209. ncvAssertReturn(nppStat == NPPST_SUCCESS, false);
  210. ncvStat = d_integralImage.copySolid(h_integralImage, 0);
  211. ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
  212. ncvStat = d_rectStdDev.copySolid(h_rectStdDev, 0);
  213. ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
  214. for (Ncv32u i=0; i<searchRoiU.height; i++)
  215. {
  216. for (Ncv32u j=0; j<h_pixelMask.stride(); j++)
  217. {
  218. if (j<searchRoiU.width)
  219. {
  220. h_pixelMask.ptr()[i*h_pixelMask.stride()+j] = (i << 16) | j;
  221. }
  222. else
  223. {
  224. h_pixelMask.ptr()[i*h_pixelMask.stride()+j] = OBJDET_MASK_ELEMENT_INVALID_32U;
  225. }
  226. }
  227. }
  228. ncvAssertReturn(cudaSuccess == cudaStreamSynchronize(0), false);
  229. {
  230. // calculations here
  231. FpuControl fpu;
  232. CV_UNUSED(fpu);
  233. ncvStat = ncvApplyHaarClassifierCascade_host(
  234. h_integralImage, h_rectStdDev, h_pixelMask,
  235. detectionsOnThisScale_h,
  236. haar, h_HaarStages, h_HaarNodes, h_HaarFeatures, false,
  237. searchRoiU, 1, 1.0f);
  238. ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
  239. }
  240. NCV_SKIP_COND_END
  241. int devId;
  242. ncvAssertCUDAReturn(cudaGetDevice(&devId), false);
  243. cudaDeviceProp _devProp;
  244. ncvAssertCUDAReturn(cudaGetDeviceProperties(&_devProp, devId), false);
  245. ncvStat = ncvApplyHaarClassifierCascade_device(
  246. d_integralImage, d_rectStdDev, d_pixelMask,
  247. detectionsOnThisScale_d,
  248. haar, h_HaarStages, d_HaarStages, d_HaarNodes, d_HaarFeatures, false,
  249. searchRoiU, 1, 1.0f,
  250. *this->allocatorGPU.get(), *this->allocatorCPU.get(),
  251. _devProp, 0);
  252. ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
  253. NCVMatrixAlloc<Ncv32u> h_pixelMask_d(*this->allocatorCPU.get(), this->width, this->height);
  254. ncvAssertReturn(h_pixelMask_d.isMemAllocated(), false);
  255. //bit-to-bit check
  256. bool bLoopVirgin = true;
  257. NCV_SKIP_COND_BEGIN
  258. ncvStat = d_pixelMask.copySolid(h_pixelMask_d, 0);
  259. ncvAssertReturn(ncvStat == NCV_SUCCESS, false);
  260. if (detectionsOnThisScale_d != detectionsOnThisScale_h)
  261. {
  262. bLoopVirgin = false;
  263. }
  264. else
  265. {
  266. std::sort(h_pixelMask_d.ptr(), h_pixelMask_d.ptr() + detectionsOnThisScale_d);
  267. for (Ncv32u i=0; i<detectionsOnThisScale_d && bLoopVirgin; i++)
  268. {
  269. if (h_pixelMask.ptr()[i] != h_pixelMask_d.ptr()[i])
  270. {
  271. bLoopVirgin = false;
  272. }
  273. }
  274. }
  275. NCV_SKIP_COND_END
  276. if (bLoopVirgin)
  277. {
  278. rcode = true;
  279. }
  280. return rcode;
  281. }
  282. bool TestHaarCascadeApplication::deinit()
  283. {
  284. return true;
  285. }