aruco.hpp 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640
  1. /*
  2. By downloading, copying, installing or using the software you agree to this
  3. license. If you do not agree to this license, do not download, install,
  4. copy or use the software.
  5. License Agreement
  6. For Open Source Computer Vision Library
  7. (3-clause BSD License)
  8. Copyright (C) 2013, OpenCV Foundation, all rights reserved.
  9. Third party copyrights are property of their respective owners.
  10. Redistribution and use in source and binary forms, with or without modification,
  11. are permitted provided that the following conditions are met:
  12. * Redistributions of source code must retain the above copyright notice,
  13. this list of conditions and the following disclaimer.
  14. * Redistributions in binary form must reproduce the above copyright notice,
  15. this list of conditions and the following disclaimer in the documentation
  16. and/or other materials provided with the distribution.
  17. * Neither the names of the copyright holders nor the names of the contributors
  18. may be used to endorse or promote products derived from this software
  19. without specific prior written permission.
  20. This software is provided by the copyright holders and contributors "as is" and
  21. any express or implied warranties, including, but not limited to, the implied
  22. warranties of merchantability and fitness for a particular purpose are
  23. disclaimed. In no event shall copyright holders or contributors be liable for
  24. any direct, indirect, incidental, special, exemplary, or consequential damages
  25. (including, but not limited to, procurement of substitute goods or services;
  26. loss of use, data, or profits; or business interruption) however caused
  27. and on any theory of liability, whether in contract, strict liability,
  28. or tort (including negligence or otherwise) arising in any way out of
  29. the use of this software, even if advised of the possibility of such damage.
  30. */
  31. #ifndef __OPENCV_ARUCO_HPP__
  32. #define __OPENCV_ARUCO_HPP__
  33. #include <opencv2/core.hpp>
  34. #include <vector>
  35. #include "opencv2/aruco/dictionary.hpp"
  36. /**
  37. * @defgroup aruco ArUco Marker Detection
  38. * This module is dedicated to square fiducial markers (also known as Augmented Reality Markers)
  39. * These markers are useful for easy, fast and robust camera pose estimation.ç
  40. *
  41. * The main functionalities are:
  42. * - Detection of markers in an image
  43. * - Pose estimation from a single marker or from a board/set of markers
  44. * - Detection of ChArUco board for high subpixel accuracy
  45. * - Camera calibration from both, ArUco boards and ChArUco boards.
  46. * - Detection of ChArUco diamond markers
  47. * The samples directory includes easy examples of how to use the module.
  48. *
  49. * The implementation is based on the ArUco Library by R. Muñoz-Salinas and S. Garrido-Jurado @cite Aruco2014.
  50. *
  51. * Markers can also be detected based on the AprilTag 2 @cite wang2016iros fiducial detection method.
  52. *
  53. * @sa S. Garrido-Jurado, R. Muñoz-Salinas, F. J. Madrid-Cuevas, and M. J. Marín-Jiménez. 2014.
  54. * "Automatic generation and detection of highly reliable fiducial markers under occlusion".
  55. * Pattern Recogn. 47, 6 (June 2014), 2280-2292. DOI=10.1016/j.patcog.2014.01.005
  56. *
  57. * @sa http://www.uco.es/investiga/grupos/ava/node/26
  58. *
  59. * This module has been originally developed by Sergio Garrido-Jurado as a project
  60. * for Google Summer of Code 2015 (GSoC 15).
  61. *
  62. *
  63. */
  64. namespace cv {
  65. namespace aruco {
  66. //! @addtogroup aruco
  67. //! @{
  68. enum CornerRefineMethod{
  69. CORNER_REFINE_NONE, ///< Tag and corners detection based on the ArUco approach
  70. CORNER_REFINE_SUBPIX, ///< ArUco approach and refine the corners locations using corner subpixel accuracy
  71. CORNER_REFINE_CONTOUR, ///< ArUco approach and refine the corners locations using the contour-points line fitting
  72. CORNER_REFINE_APRILTAG, ///< Tag and corners detection based on the AprilTag 2 approach @cite wang2016iros
  73. };
  74. /**
  75. * @brief Parameters for the detectMarker process:
  76. * - adaptiveThreshWinSizeMin: minimum window size for adaptive thresholding before finding
  77. * contours (default 3).
  78. * - adaptiveThreshWinSizeMax: maximum window size for adaptive thresholding before finding
  79. * contours (default 23).
  80. * - adaptiveThreshWinSizeStep: increments from adaptiveThreshWinSizeMin to adaptiveThreshWinSizeMax
  81. * during the thresholding (default 10).
  82. * - adaptiveThreshConstant: constant for adaptive thresholding before finding contours (default 7)
  83. * - minMarkerPerimeterRate: determine minimum perimeter for marker contour to be detected. This
  84. * is defined as a rate respect to the maximum dimension of the input image (default 0.03).
  85. * - maxMarkerPerimeterRate: determine maximum perimeter for marker contour to be detected. This
  86. * is defined as a rate respect to the maximum dimension of the input image (default 4.0).
  87. * - polygonalApproxAccuracyRate: minimum accuracy during the polygonal approximation process to
  88. * determine which contours are squares. (default 0.03)
  89. * - minCornerDistanceRate: minimum distance between corners for detected markers relative to its
  90. * perimeter (default 0.05)
  91. * - minDistanceToBorder: minimum distance of any corner to the image border for detected markers
  92. * (in pixels) (default 3)
  93. * - minMarkerDistanceRate: minimum mean distance beetween two marker corners to be considered
  94. * similar, so that the smaller one is removed. The rate is relative to the smaller perimeter
  95. * of the two markers (default 0.05).
  96. * - cornerRefinementMethod: corner refinement method. (CORNER_REFINE_NONE, no refinement.
  97. * CORNER_REFINE_SUBPIX, do subpixel refinement. CORNER_REFINE_CONTOUR use contour-Points,
  98. * CORNER_REFINE_APRILTAG use the AprilTag2 approach). (default CORNER_REFINE_NONE)
  99. * - cornerRefinementWinSize: window size for the corner refinement process (in pixels) (default 5).
  100. * - cornerRefinementMaxIterations: maximum number of iterations for stop criteria of the corner
  101. * refinement process (default 30).
  102. * - cornerRefinementMinAccuracy: minimum error for the stop cristeria of the corner refinement
  103. * process (default: 0.1)
  104. * - markerBorderBits: number of bits of the marker border, i.e. marker border width (default 1).
  105. * - perspectiveRemovePixelPerCell: number of bits (per dimension) for each cell of the marker
  106. * when removing the perspective (default 4).
  107. * - perspectiveRemoveIgnoredMarginPerCell: width of the margin of pixels on each cell not
  108. * considered for the determination of the cell bit. Represents the rate respect to the total
  109. * size of the cell, i.e. perspectiveRemovePixelPerCell (default 0.13)
  110. * - maxErroneousBitsInBorderRate: maximum number of accepted erroneous bits in the border (i.e.
  111. * number of allowed white bits in the border). Represented as a rate respect to the total
  112. * number of bits per marker (default 0.35).
  113. * - minOtsuStdDev: minimun standard deviation in pixels values during the decodification step to
  114. * apply Otsu thresholding (otherwise, all the bits are set to 0 or 1 depending on mean higher
  115. * than 128 or not) (default 5.0)
  116. * - errorCorrectionRate error correction rate respect to the maximun error correction capability
  117. * for each dictionary. (default 0.6).
  118. * - aprilTagMinClusterPixels: reject quads containing too few pixels. (default 5)
  119. * - aprilTagMaxNmaxima: how many corner candidates to consider when segmenting a group of pixels into a quad. (default 10)
  120. * - aprilTagCriticalRad: Reject quads where pairs of edges have angles that are close to straight or close to
  121. * 180 degrees. Zero means that no quads are rejected. (In radians) (default 10*PI/180)
  122. * - aprilTagMaxLineFitMse: When fitting lines to the contours, what is the maximum mean squared error
  123. * allowed? This is useful in rejecting contours that are far from being quad shaped; rejecting
  124. * these quads "early" saves expensive decoding processing. (default 10.0)
  125. * - aprilTagMinWhiteBlackDiff: When we build our model of black & white pixels, we add an extra check that
  126. * the white model must be (overall) brighter than the black model. How much brighter? (in pixel values, [0,255]). (default 5)
  127. * - aprilTagDeglitch: should the thresholded image be deglitched? Only useful for very noisy images. (default 0)
  128. * - aprilTagQuadDecimate: Detection of quads can be done on a lower-resolution image, improving speed at a
  129. * cost of pose accuracy and a slight decrease in detection rate. Decoding the binary payload is still
  130. * done at full resolution. (default 0.0)
  131. * - aprilTagQuadSigma: What Gaussian blur should be applied to the segmented image (used for quad detection?)
  132. * Parameter is the standard deviation in pixels. Very noisy images benefit from non-zero values (e.g. 0.8). (default 0.0)
  133. * - detectInvertedMarker: to check if there is a white marker. In order to generate a "white" marker just
  134. * invert a normal marker by using a tilde, ~markerImage. (default false)
  135. * - useAruco3Detection: to enable the new and faster Aruco detection strategy. The most important observation from the authors of
  136. * Romero-Ramirez et al: Speeded up detection of squared fiducial markers (2018) is, that the binary
  137. * code of a marker can be reliably detected if the canonical image (that is used to extract the binary code)
  138. * has a size of minSideLengthCanonicalImg (in practice tau_c=16-32 pixels).
  139. * Link to article: https://www.researchgate.net/publication/325787310_Speeded_Up_Detection_of_Squared_Fiducial_Markers
  140. * In addition, very small markers are barely useful for pose estimation and thus a we can define a minimum marker size that we
  141. * still want to be able to detect (e.g. 50x50 pixel).
  142. * To decouple this from the initial image size they propose to resize the input image
  143. * to (I_w_r, I_h_r) = (tau_c / tau_dot_i) * (I_w, I_h), with tau_dot_i = tau_c + max(I_w,I_h) * tau_i.
  144. * Here tau_i (parameter: minMarkerLengthRatioOriginalImg) is a ratio in the range [0,1].
  145. * If we set this to 0, the smallest marker we can detect
  146. * has a side length of tau_c. If we set it to 1 the marker would fill the entire image.
  147. * For a FullHD video a good value to start with is 0.1.
  148. * - minSideLengthCanonicalImg: minimum side length of a marker in the canonical image.
  149. * Latter is the binarized image in which contours are searched.
  150. * So all contours with a size smaller than minSideLengthCanonicalImg*minSideLengthCanonicalImg will omitted from the search.
  151. * - minMarkerLengthRatioOriginalImg: range [0,1], eq (2) from paper
  152. * The parameter tau_i has a direct influence on the processing speed.
  153. */
  154. struct CV_EXPORTS_W DetectorParameters {
  155. DetectorParameters();
  156. CV_WRAP static Ptr<DetectorParameters> create();
  157. CV_WRAP static bool readDetectorParameters(const FileNode& fn, Ptr<DetectorParameters>& params);
  158. CV_PROP_RW int adaptiveThreshWinSizeMin;
  159. CV_PROP_RW int adaptiveThreshWinSizeMax;
  160. CV_PROP_RW int adaptiveThreshWinSizeStep;
  161. CV_PROP_RW double adaptiveThreshConstant;
  162. CV_PROP_RW double minMarkerPerimeterRate;
  163. CV_PROP_RW double maxMarkerPerimeterRate;
  164. CV_PROP_RW double polygonalApproxAccuracyRate;
  165. CV_PROP_RW double minCornerDistanceRate;
  166. CV_PROP_RW int minDistanceToBorder;
  167. CV_PROP_RW double minMarkerDistanceRate;
  168. CV_PROP_RW int cornerRefinementMethod;
  169. CV_PROP_RW int cornerRefinementWinSize;
  170. CV_PROP_RW int cornerRefinementMaxIterations;
  171. CV_PROP_RW double cornerRefinementMinAccuracy;
  172. CV_PROP_RW int markerBorderBits;
  173. CV_PROP_RW int perspectiveRemovePixelPerCell;
  174. CV_PROP_RW double perspectiveRemoveIgnoredMarginPerCell;
  175. CV_PROP_RW double maxErroneousBitsInBorderRate;
  176. CV_PROP_RW double minOtsuStdDev;
  177. CV_PROP_RW double errorCorrectionRate;
  178. // April :: User-configurable parameters.
  179. CV_PROP_RW float aprilTagQuadDecimate;
  180. CV_PROP_RW float aprilTagQuadSigma;
  181. // April :: Internal variables
  182. CV_PROP_RW int aprilTagMinClusterPixels;
  183. CV_PROP_RW int aprilTagMaxNmaxima;
  184. CV_PROP_RW float aprilTagCriticalRad;
  185. CV_PROP_RW float aprilTagMaxLineFitMse;
  186. CV_PROP_RW int aprilTagMinWhiteBlackDiff;
  187. CV_PROP_RW int aprilTagDeglitch;
  188. // to detect white (inverted) markers
  189. CV_PROP_RW bool detectInvertedMarker;
  190. // New Aruco functionality proposed in the paper:
  191. // Romero-Ramirez et al: Speeded up detection of squared fiducial markers (2018)
  192. CV_PROP_RW bool useAruco3Detection;
  193. CV_PROP_RW int minSideLengthCanonicalImg;
  194. CV_PROP_RW float minMarkerLengthRatioOriginalImg;
  195. };
  196. /**
  197. * @brief Basic marker detection
  198. *
  199. * @param image input image
  200. * @param dictionary indicates the type of markers that will be searched
  201. * @param corners vector of detected marker corners. For each marker, its four corners
  202. * are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
  203. * the dimensions of this array is Nx4. The order of the corners is clockwise.
  204. * @param ids vector of identifiers of the detected markers. The identifier is of type int
  205. * (e.g. std::vector<int>). For N detected markers, the size of ids is also N.
  206. * The identifiers have the same order than the markers in the imgPoints array.
  207. * @param parameters marker detection parameters
  208. * @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a
  209. * correct codification. Useful for debugging purposes.
  210. * @param cameraMatrix optional input 3x3 floating-point camera matrix
  211. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
  212. * @param distCoeff optional vector of distortion coefficients
  213. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  214. *
  215. * Performs marker detection in the input image. Only markers included in the specific dictionary
  216. * are searched. For each detected marker, it returns the 2D position of its corner in the image
  217. * and its corresponding identifier.
  218. * Note that this function does not perform pose estimation.
  219. * @sa estimatePoseSingleMarkers, estimatePoseBoard
  220. *
  221. */
  222. CV_EXPORTS_W void detectMarkers(InputArray image, const Ptr<Dictionary> &dictionary, OutputArrayOfArrays corners,
  223. OutputArray ids, const Ptr<DetectorParameters> &parameters = DetectorParameters::create(),
  224. OutputArrayOfArrays rejectedImgPoints = noArray(), InputArray cameraMatrix= noArray(), InputArray distCoeff= noArray());
  225. /**
  226. * @brief Pose estimation for single markers
  227. *
  228. * @param corners vector of already detected markers corners. For each marker, its four corners
  229. * are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
  230. * the dimensions of this array should be Nx4. The order of the corners should be clockwise.
  231. * @sa detectMarkers
  232. * @param markerLength the length of the markers' side. The returning translation vectors will
  233. * be in the same unit. Normally, unit is meters.
  234. * @param cameraMatrix input 3x3 floating-point camera matrix
  235. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
  236. * @param distCoeffs vector of distortion coefficients
  237. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  238. * @param rvecs array of output rotation vectors (@sa Rodrigues) (e.g. std::vector<cv::Vec3d>).
  239. * Each element in rvecs corresponds to the specific marker in imgPoints.
  240. * @param tvecs array of output translation vectors (e.g. std::vector<cv::Vec3d>).
  241. * Each element in tvecs corresponds to the specific marker in imgPoints.
  242. * @param _objPoints array of object points of all the marker corners
  243. *
  244. * This function receives the detected markers and returns their pose estimation respect to
  245. * the camera individually. So for each marker, one rotation and translation vector is returned.
  246. * The returned transformation is the one that transforms points from each marker coordinate system
  247. * to the camera coordinate system.
  248. * The marker corrdinate system is centered on the middle of the marker, with the Z axis
  249. * perpendicular to the marker plane.
  250. * The coordinates of the four corners of the marker in its own coordinate system are:
  251. * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0),
  252. * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0)
  253. */
  254. CV_EXPORTS_W void estimatePoseSingleMarkers(InputArrayOfArrays corners, float markerLength,
  255. InputArray cameraMatrix, InputArray distCoeffs,
  256. OutputArray rvecs, OutputArray tvecs, OutputArray _objPoints = noArray());
  257. /**
  258. * @brief Board of markers
  259. *
  260. * A board is a set of markers in the 3D space with a common coordinate system.
  261. * The common form of a board of marker is a planar (2D) board, however any 3D layout can be used.
  262. * A Board object is composed by:
  263. * - The object points of the marker corners, i.e. their coordinates respect to the board system.
  264. * - The dictionary which indicates the type of markers of the board
  265. * - The identifier of all the markers in the board.
  266. */
  267. class CV_EXPORTS_W Board {
  268. public:
  269. /**
  270. * @brief Provide way to create Board by passing necessary data. Specially needed in Python.
  271. *
  272. * @param objPoints array of object points of all the marker corners in the board
  273. * @param dictionary the dictionary of markers employed for this board
  274. * @param ids vector of the identifiers of the markers in the board
  275. *
  276. */
  277. CV_WRAP static Ptr<Board> create(InputArrayOfArrays objPoints, const Ptr<Dictionary> &dictionary, InputArray ids);
  278. /**
  279. * @brief Set ids vector
  280. *
  281. * @param ids vector of the identifiers of the markers in the board (should be the same size
  282. * as objPoints)
  283. *
  284. * Recommended way to set ids vector, which will fail if the size of ids does not match size
  285. * of objPoints.
  286. */
  287. CV_WRAP void setIds(InputArray ids);
  288. /// array of object points of all the marker corners in the board
  289. /// each marker include its 4 corners in CCW order. For M markers, the size is Mx4.
  290. CV_PROP std::vector< std::vector< Point3f > > objPoints;
  291. /// the dictionary of markers employed for this board
  292. CV_PROP Ptr<Dictionary> dictionary;
  293. /// vector of the identifiers of the markers in the board (same size than objPoints)
  294. /// The identifiers refers to the board dictionary
  295. CV_PROP_RW std::vector< int > ids;
  296. };
  297. /**
  298. * @brief Planar board with grid arrangement of markers
  299. * More common type of board. All markers are placed in the same plane in a grid arrangement.
  300. * The board can be drawn using drawPlanarBoard() function (@sa drawPlanarBoard)
  301. */
  302. class CV_EXPORTS_W GridBoard : public Board {
  303. public:
  304. /**
  305. * @brief Draw a GridBoard
  306. *
  307. * @param outSize size of the output image in pixels.
  308. * @param img output image with the board. The size of this image will be outSize
  309. * and the board will be on the center, keeping the board proportions.
  310. * @param marginSize minimum margins (in pixels) of the board in the output image
  311. * @param borderBits width of the marker borders.
  312. *
  313. * This function return the image of the GridBoard, ready to be printed.
  314. */
  315. CV_WRAP void draw(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1);
  316. /**
  317. * @brief Create a GridBoard object
  318. *
  319. * @param markersX number of markers in X direction
  320. * @param markersY number of markers in Y direction
  321. * @param markerLength marker side length (normally in meters)
  322. * @param markerSeparation separation between two markers (same unit as markerLength)
  323. * @param dictionary dictionary of markers indicating the type of markers
  324. * @param firstMarker id of first marker in dictionary to use on board.
  325. * @return the output GridBoard object
  326. *
  327. * This functions creates a GridBoard object given the number of markers in each direction and
  328. * the marker size and marker separation.
  329. */
  330. CV_WRAP static Ptr<GridBoard> create(int markersX, int markersY, float markerLength,
  331. float markerSeparation, const Ptr<Dictionary> &dictionary, int firstMarker = 0);
  332. /**
  333. *
  334. */
  335. CV_WRAP Size getGridSize() const { return Size(_markersX, _markersY); }
  336. /**
  337. *
  338. */
  339. CV_WRAP float getMarkerLength() const { return _markerLength; }
  340. /**
  341. *
  342. */
  343. CV_WRAP float getMarkerSeparation() const { return _markerSeparation; }
  344. private:
  345. // number of markers in X and Y directions
  346. int _markersX, _markersY;
  347. // marker side length (normally in meters)
  348. float _markerLength;
  349. // separation between markers in the grid
  350. float _markerSeparation;
  351. };
  352. /**
  353. * @brief Pose estimation for a board of markers
  354. *
  355. * @param corners vector of already detected markers corners. For each marker, its four corners
  356. * are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the
  357. * dimensions of this array should be Nx4. The order of the corners should be clockwise.
  358. * @param ids list of identifiers for each marker in corners
  359. * @param board layout of markers in the board. The layout is composed by the marker identifiers
  360. * and the positions of each marker corner in the board reference system.
  361. * @param cameraMatrix input 3x3 floating-point camera matrix
  362. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
  363. * @param distCoeffs vector of distortion coefficients
  364. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  365. * @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board
  366. * (see cv::Rodrigues). Used as initial guess if not empty.
  367. * @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.
  368. * @param useExtrinsicGuess defines whether initial guess for \b rvec and \b tvec will be used or not.
  369. * Used as initial guess if not empty.
  370. *
  371. * This function receives the detected markers and returns the pose of a marker board composed
  372. * by those markers.
  373. * A Board of marker has a single world coordinate system which is defined by the board layout.
  374. * The returned transformation is the one that transforms points from the board coordinate system
  375. * to the camera coordinate system.
  376. * Input markers that are not included in the board layout are ignored.
  377. * The function returns the number of markers from the input employed for the board pose estimation.
  378. * Note that returning a 0 means the pose has not been estimated.
  379. */
  380. CV_EXPORTS_W int estimatePoseBoard(InputArrayOfArrays corners, InputArray ids, const Ptr<Board> &board,
  381. InputArray cameraMatrix, InputArray distCoeffs, InputOutputArray rvec,
  382. InputOutputArray tvec, bool useExtrinsicGuess = false);
  383. /**
  384. * @brief Refind not detected markers based on the already detected and the board layout
  385. *
  386. * @param image input image
  387. * @param board layout of markers in the board.
  388. * @param detectedCorners vector of already detected marker corners.
  389. * @param detectedIds vector of already detected marker identifiers.
  390. * @param rejectedCorners vector of rejected candidates during the marker detection process.
  391. * @param cameraMatrix optional input 3x3 floating-point camera matrix
  392. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
  393. * @param distCoeffs optional vector of distortion coefficients
  394. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  395. * @param minRepDistance minimum distance between the corners of the rejected candidate and the
  396. * reprojected marker in order to consider it as a correspondence.
  397. * @param errorCorrectionRate rate of allowed erroneous bits respect to the error correction
  398. * capability of the used dictionary. -1 ignores the error correction step.
  399. * @param checkAllOrders Consider the four posible corner orders in the rejectedCorners array.
  400. * If it set to false, only the provided corner order is considered (default true).
  401. * @param recoveredIdxs Optional array to returns the indexes of the recovered candidates in the
  402. * original rejectedCorners array.
  403. * @param parameters marker detection parameters
  404. *
  405. * This function tries to find markers that were not detected in the basic detecMarkers function.
  406. * First, based on the current detected marker and the board layout, the function interpolates
  407. * the position of the missing markers. Then it tries to find correspondence between the reprojected
  408. * markers and the rejected candidates based on the minRepDistance and errorCorrectionRate
  409. * parameters.
  410. * If camera parameters and distortion coefficients are provided, missing markers are reprojected
  411. * using projectPoint function. If not, missing marker projections are interpolated using global
  412. * homography, and all the marker corners in the board must have the same Z coordinate.
  413. */
  414. CV_EXPORTS_W void refineDetectedMarkers(
  415. InputArray image,const Ptr<Board> &board, InputOutputArrayOfArrays detectedCorners,
  416. InputOutputArray detectedIds, InputOutputArrayOfArrays rejectedCorners,
  417. InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(),
  418. float minRepDistance = 10.f, float errorCorrectionRate = 3.f, bool checkAllOrders = true,
  419. OutputArray recoveredIdxs = noArray(), const Ptr<DetectorParameters> &parameters = DetectorParameters::create());
  420. /**
  421. * @brief Draw detected markers in image
  422. *
  423. * @param image input/output image. It must have 1 or 3 channels. The number of channels is not
  424. * altered.
  425. * @param corners positions of marker corners on input image.
  426. * (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the dimensions of
  427. * this array should be Nx4. The order of the corners should be clockwise.
  428. * @param ids vector of identifiers for markers in markersCorners .
  429. * Optional, if not provided, ids are not painted.
  430. * @param borderColor color of marker borders. Rest of colors (text color and first corner color)
  431. * are calculated based on this one to improve visualization.
  432. *
  433. * Given an array of detected marker corners and its corresponding ids, this functions draws
  434. * the markers in the image. The marker borders are painted and the markers identifiers if provided.
  435. * Useful for debugging purposes.
  436. */
  437. CV_EXPORTS_W void drawDetectedMarkers(InputOutputArray image, InputArrayOfArrays corners,
  438. InputArray ids = noArray(),
  439. Scalar borderColor = Scalar(0, 255, 0));
  440. /**
  441. * @brief Draw coordinate system axis from pose estimation
  442. *
  443. * @param image input/output image. It must have 1 or 3 channels. The number of channels is not
  444. * altered.
  445. * @param cameraMatrix input 3x3 floating-point camera matrix
  446. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
  447. * @param distCoeffs vector of distortion coefficients
  448. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  449. * @param rvec rotation vector of the coordinate system that will be drawn. (@sa Rodrigues).
  450. * @param tvec translation vector of the coordinate system that will be drawn.
  451. * @param length length of the painted axis in the same unit than tvec (usually in meters)
  452. *
  453. * Given the pose estimation of a marker or board, this function draws the axis of the world
  454. * coordinate system, i.e. the system centered on the marker/board. Useful for debugging purposes.
  455. *
  456. * @deprecated use cv::drawFrameAxes
  457. */
  458. CV_EXPORTS_W void drawAxis(InputOutputArray image, InputArray cameraMatrix, InputArray distCoeffs,
  459. InputArray rvec, InputArray tvec, float length);
  460. /**
  461. * @brief Draw a canonical marker image
  462. *
  463. * @param dictionary dictionary of markers indicating the type of markers
  464. * @param id identifier of the marker that will be returned. It has to be a valid id
  465. * in the specified dictionary.
  466. * @param sidePixels size of the image in pixels
  467. * @param img output image with the marker
  468. * @param borderBits width of the marker border.
  469. *
  470. * This function returns a marker image in its canonical form (i.e. ready to be printed)
  471. */
  472. CV_EXPORTS_W void drawMarker(const Ptr<Dictionary> &dictionary, int id, int sidePixels, OutputArray img,
  473. int borderBits = 1);
  474. /**
  475. * @brief Draw a planar board
  476. * @sa _drawPlanarBoardImpl
  477. *
  478. * @param board layout of the board that will be drawn. The board should be planar,
  479. * z coordinate is ignored
  480. * @param outSize size of the output image in pixels.
  481. * @param img output image with the board. The size of this image will be outSize
  482. * and the board will be on the center, keeping the board proportions.
  483. * @param marginSize minimum margins (in pixels) of the board in the output image
  484. * @param borderBits width of the marker borders.
  485. *
  486. * This function return the image of a planar board, ready to be printed. It assumes
  487. * the Board layout specified is planar by ignoring the z coordinates of the object points.
  488. */
  489. CV_EXPORTS_W void drawPlanarBoard(const Ptr<Board> &board, Size outSize, OutputArray img,
  490. int marginSize = 0, int borderBits = 1);
  491. /**
  492. * @brief Implementation of drawPlanarBoard that accepts a raw Board pointer.
  493. */
  494. void _drawPlanarBoardImpl(Board *board, Size outSize, OutputArray img,
  495. int marginSize = 0, int borderBits = 1);
  496. /**
  497. * @brief Calibrate a camera using aruco markers
  498. *
  499. * @param corners vector of detected marker corners in all frames.
  500. * The corners should have the same format returned by detectMarkers (see #detectMarkers).
  501. * @param ids list of identifiers for each marker in corners
  502. * @param counter number of markers in each frame so that corners and ids can be split
  503. * @param board Marker Board layout
  504. * @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
  505. * @param cameraMatrix Output 3x3 floating-point camera matrix
  506. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
  507. * and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
  508. * initialized before calling the function.
  509. * @param distCoeffs Output vector of distortion coefficients
  510. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  511. * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view
  512. * (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
  513. * k-th translation vector (see the next output parameter description) brings the board pattern
  514. * from the model coordinate space (in which object points are specified) to the world coordinate
  515. * space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1).
  516. * @param tvecs Output vector of translation vectors estimated for each pattern view.
  517. * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
  518. * Order of deviations values:
  519. * \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
  520. * s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero.
  521. * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
  522. * Order of deviations values: \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern views,
  523. * \f$R_i, T_i\f$ are concatenated 1x3 vectors.
  524. * @param perViewErrors Output vector of average re-projection errors estimated for each pattern view.
  525. * @param flags flags Different flags for the calibration process (see #calibrateCamera for details).
  526. * @param criteria Termination criteria for the iterative optimization algorithm.
  527. *
  528. * This function calibrates a camera using an Aruco Board. The function receives a list of
  529. * detected markers from several views of the Board. The process is similar to the chessboard
  530. * calibration in calibrateCamera(). The function returns the final re-projection error.
  531. */
  532. CV_EXPORTS_AS(calibrateCameraArucoExtended) double calibrateCameraAruco(
  533. InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr<Board> &board,
  534. Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
  535. OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
  536. OutputArray stdDeviationsIntrinsics, OutputArray stdDeviationsExtrinsics,
  537. OutputArray perViewErrors, int flags = 0,
  538. TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));
  539. /** @brief It's the same function as #calibrateCameraAruco but without calibration error estimation.
  540. */
  541. CV_EXPORTS_W double calibrateCameraAruco(
  542. InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr<Board> &board,
  543. Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
  544. OutputArrayOfArrays rvecs = noArray(), OutputArrayOfArrays tvecs = noArray(), int flags = 0,
  545. TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));
  546. /**
  547. * @brief Given a board configuration and a set of detected markers, returns the corresponding
  548. * image points and object points to call solvePnP
  549. *
  550. * @param board Marker board layout.
  551. * @param detectedCorners List of detected marker corners of the board.
  552. * @param detectedIds List of identifiers for each marker.
  553. * @param objPoints Vector of vectors of board marker points in the board coordinate space.
  554. * @param imgPoints Vector of vectors of the projections of board marker corner points.
  555. */
  556. CV_EXPORTS_W void getBoardObjectAndImagePoints(const Ptr<Board> &board, InputArrayOfArrays detectedCorners,
  557. InputArray detectedIds, OutputArray objPoints, OutputArray imgPoints);
  558. //! @}
  559. }
  560. }
  561. #endif