cudalegacy.hpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*M///////////////////////////////////////////////////////////////////////////////////////
  2. //
  3. // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
  4. //
  5. // By downloading, copying, installing or using the software you agree to this license.
  6. // If you do not agree to this license, do not download, install,
  7. // copy or use the software.
  8. //
  9. //
  10. // License Agreement
  11. // For Open Source Computer Vision Library
  12. //
  13. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
  14. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
  15. // Third party copyrights are property of their respective owners.
  16. //
  17. // Redistribution and use in source and binary forms, with or without modification,
  18. // are permitted provided that the following conditions are met:
  19. //
  20. // * Redistribution's of source code must retain the above copyright notice,
  21. // this list of conditions and the following disclaimer.
  22. //
  23. // * Redistribution's in binary form must reproduce the above copyright notice,
  24. // this list of conditions and the following disclaimer in the documentation
  25. // and/or other materials provided with the distribution.
  26. //
  27. // * The name of the copyright holders may not be used to endorse or promote products
  28. // derived from this software without specific prior written permission.
  29. //
  30. // This software is provided by the copyright holders and contributors "as is" and
  31. // any express or implied warranties, including, but not limited to, the implied
  32. // warranties of merchantability and fitness for a particular purpose are disclaimed.
  33. // In no event shall the Intel Corporation or contributors be liable for any direct,
  34. // indirect, incidental, special, exemplary, or consequential damages
  35. // (including, but not limited to, procurement of substitute goods or services;
  36. // loss of use, data, or profits; or business interruption) however caused
  37. // and on any theory of liability, whether in contract, strict liability,
  38. // or tort (including negligence or otherwise) arising in any way out of
  39. // the use of this software, even if advised of the possibility of such damage.
  40. //
  41. //M*/
  42. #ifndef OPENCV_CUDALEGACY_HPP
  43. #define OPENCV_CUDALEGACY_HPP
  44. #include "opencv2/core/cuda.hpp"
  45. #include "opencv2/cudalegacy/NCV.hpp"
  46. #include "opencv2/cudalegacy/NPP_staging.hpp"
  47. #include "opencv2/cudalegacy/NCVPyramid.hpp"
  48. #include "opencv2/cudalegacy/NCVHaarObjectDetection.hpp"
  49. #include "opencv2/cudalegacy/NCVBroxOpticalFlow.hpp"
  50. #include "opencv2/video/background_segm.hpp"
  51. /**
  52. @addtogroup cuda
  53. @{
  54. @defgroup cudalegacy Legacy support
  55. @}
  56. */
  57. namespace cv { namespace cuda {
  58. //! @addtogroup cudalegacy
  59. //! @{
  60. //
  61. // ImagePyramid
  62. //
  63. class CV_EXPORTS ImagePyramid : public Algorithm
  64. {
  65. public:
  66. virtual void getLayer(OutputArray outImg, Size outRoi, Stream& stream = Stream::Null()) const = 0;
  67. };
  68. CV_EXPORTS Ptr<ImagePyramid> createImagePyramid(InputArray img, int nLayers = -1, Stream& stream = Stream::Null());
  69. //
  70. // GMG
  71. //
  72. /** @brief Background/Foreground Segmentation Algorithm.
  73. The class discriminates between foreground and background pixels by building and maintaining a model
  74. of the background. Any pixel which does not fit this model is then deemed to be foreground. The
  75. class implements algorithm described in @cite Gold2012 .
  76. */
  77. class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractor
  78. {
  79. public:
  80. using cv::BackgroundSubtractor::apply;
  81. virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0;
  82. virtual int getMaxFeatures() const = 0;
  83. virtual void setMaxFeatures(int maxFeatures) = 0;
  84. virtual double getDefaultLearningRate() const = 0;
  85. virtual void setDefaultLearningRate(double lr) = 0;
  86. virtual int getNumFrames() const = 0;
  87. virtual void setNumFrames(int nframes) = 0;
  88. virtual int getQuantizationLevels() const = 0;
  89. virtual void setQuantizationLevels(int nlevels) = 0;
  90. virtual double getBackgroundPrior() const = 0;
  91. virtual void setBackgroundPrior(double bgprior) = 0;
  92. virtual int getSmoothingRadius() const = 0;
  93. virtual void setSmoothingRadius(int radius) = 0;
  94. virtual double getDecisionThreshold() const = 0;
  95. virtual void setDecisionThreshold(double thresh) = 0;
  96. virtual bool getUpdateBackgroundModel() const = 0;
  97. virtual void setUpdateBackgroundModel(bool update) = 0;
  98. virtual double getMinVal() const = 0;
  99. virtual void setMinVal(double val) = 0;
  100. virtual double getMaxVal() const = 0;
  101. virtual void setMaxVal(double val) = 0;
  102. };
  103. /** @brief Creates GMG Background Subtractor
  104. @param initializationFrames Number of frames of video to use to initialize histograms.
  105. @param decisionThreshold Value above which pixel is determined to be FG.
  106. */
  107. CV_EXPORTS Ptr<cuda::BackgroundSubtractorGMG>
  108. createBackgroundSubtractorGMG(int initializationFrames = 120, double decisionThreshold = 0.8);
  109. //
  110. // FGD
  111. //
  112. /** @brief The class discriminates between foreground and background pixels by building and maintaining a model
  113. of the background.
  114. Any pixel which does not fit this model is then deemed to be foreground. The class implements
  115. algorithm described in @cite FGD2003 .
  116. @sa BackgroundSubtractor
  117. */
  118. class CV_EXPORTS BackgroundSubtractorFGD : public cv::BackgroundSubtractor
  119. {
  120. public:
  121. /** @brief Returns the output foreground regions calculated by findContours.
  122. @param foreground_regions Output array (CPU memory).
  123. */
  124. virtual void getForegroundRegions(OutputArrayOfArrays foreground_regions) = 0;
  125. };
  126. struct CV_EXPORTS FGDParams
  127. {
  128. int Lc; //!< Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
  129. int N1c; //!< Number of color vectors used to model normal background color variation at a given pixel.
  130. int N2c; //!< Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c.
  131. //!< Used to allow the first N1c vectors to adapt over time to changing background.
  132. int Lcc; //!< Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64.
  133. int N1cc; //!< Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
  134. int N2cc; //!< Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc.
  135. //!< Used to allow the first N1cc vectors to adapt over time to changing background.
  136. bool is_obj_without_holes; //!< If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
  137. int perform_morphing; //!< Number of erode-dilate-erode foreground-blob cleanup iterations.
  138. //!< These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
  139. float alpha1; //!< How quickly we forget old background pixel values seen. Typically set to 0.1.
  140. float alpha2; //!< "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
  141. float alpha3; //!< Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
  142. float delta; //!< Affects color and color co-occurrence quantization, typically set to 2.
  143. float T; //!< A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
  144. float minArea; //!< Discard foreground blobs whose bounding box is smaller than this threshold.
  145. //! default Params
  146. FGDParams();
  147. };
  148. /** @brief Creates FGD Background Subtractor
  149. @param params Algorithm's parameters. See @cite FGD2003 for explanation.
  150. */
  151. CV_EXPORTS Ptr<cuda::BackgroundSubtractorFGD>
  152. createBackgroundSubtractorFGD(const FGDParams& params = FGDParams());
  153. //
  154. // Optical flow
  155. //
  156. //! Calculates optical flow for 2 images using block matching algorithm */
  157. CV_EXPORTS void calcOpticalFlowBM(const GpuMat& prev, const GpuMat& curr,
  158. Size block_size, Size shift_size, Size max_range, bool use_previous,
  159. GpuMat& velx, GpuMat& vely, GpuMat& buf,
  160. Stream& stream = Stream::Null());
  161. class CV_EXPORTS FastOpticalFlowBM
  162. {
  163. public:
  164. void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, int search_window = 21, int block_window = 7, Stream& s = Stream::Null());
  165. private:
  166. GpuMat buffer;
  167. GpuMat extended_I0;
  168. GpuMat extended_I1;
  169. };
  170. /** @brief Interpolates frames (images) using provided optical flow (displacement field).
  171. @param frame0 First frame (32-bit floating point images, single channel).
  172. @param frame1 Second frame. Must have the same type and size as frame0 .
  173. @param fu Forward horizontal displacement.
  174. @param fv Forward vertical displacement.
  175. @param bu Backward horizontal displacement.
  176. @param bv Backward vertical displacement.
  177. @param pos New frame position.
  178. @param newFrame Output image.
  179. @param buf Temporary buffer, will have width x 6\*height size, CV_32FC1 type and contain 6
  180. GpuMat: occlusion masks for first frame, occlusion masks for second, interpolated forward
  181. horizontal flow, interpolated forward vertical flow, interpolated backward horizontal flow,
  182. interpolated backward vertical flow.
  183. @param stream Stream for the asynchronous version.
  184. */
  185. CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1,
  186. const GpuMat& fu, const GpuMat& fv,
  187. const GpuMat& bu, const GpuMat& bv,
  188. float pos, GpuMat& newFrame, GpuMat& buf,
  189. Stream& stream = Stream::Null());
  190. CV_EXPORTS void createOpticalFlowNeedleMap(const GpuMat& u, const GpuMat& v, GpuMat& vertex, GpuMat& colors);
  191. //
  192. // Labeling
  193. //
  194. //!performs labeling via graph cuts of a 2D regular 4-connected graph.
  195. CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels,
  196. GpuMat& buf, Stream& stream = Stream::Null());
  197. //!performs labeling via graph cuts of a 2D regular 8-connected graph.
  198. CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& topLeft, GpuMat& topRight,
  199. GpuMat& bottom, GpuMat& bottomLeft, GpuMat& bottomRight,
  200. GpuMat& labels,
  201. GpuMat& buf, Stream& stream = Stream::Null());
  202. //! compute mask for Generalized Flood fill componetns labeling.
  203. CV_EXPORTS void connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Scalar& lo, const cv::Scalar& hi, Stream& stream = Stream::Null());
  204. //! performs connected componnents labeling.
  205. CV_EXPORTS void labelComponents(const GpuMat& mask, GpuMat& components, int flags = 0, Stream& stream = Stream::Null());
  206. //
  207. // Calib3d
  208. //
  209. CV_EXPORTS void transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec,
  210. GpuMat& dst, Stream& stream = Stream::Null());
  211. CV_EXPORTS void projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec,
  212. const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst,
  213. Stream& stream = Stream::Null());
  214. /** @brief Finds the object pose from 3D-2D point correspondences.
  215. @param object Single-row matrix of object points.
  216. @param image Single-row matrix of image points.
  217. @param camera_mat 3x3 matrix of intrinsic camera parameters.
  218. @param dist_coef Distortion coefficients. See undistortPoints for details.
  219. @param rvec Output 3D rotation vector.
  220. @param tvec Output 3D translation vector.
  221. @param use_extrinsic_guess Flag to indicate that the function must use rvec and tvec as an
  222. initial transformation guess. It is not supported for now.
  223. @param num_iters Maximum number of RANSAC iterations.
  224. @param max_dist Euclidean distance threshold to detect whether point is inlier or not.
  225. @param min_inlier_count Flag to indicate that the function must stop if greater or equal number
  226. of inliers is achieved. It is not supported for now.
  227. @param inliers Output vector of inlier indices.
  228. */
  229. CV_EXPORTS void solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat,
  230. const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess=false,
  231. int num_iters=100, float max_dist=8.0, int min_inlier_count=100,
  232. std::vector<int>* inliers=NULL);
  233. //! @}
  234. }}
  235. #endif /* OPENCV_CUDALEGACY_HPP */