test_arithm.cpp 78 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550
  1. // This file is part of OpenCV project.
  2. // It is subject to the license terms in the LICENSE file found in the top-level directory
  3. // of this distribution and at http://opencv.org/license.html.
  4. #include "test_precomp.hpp"
  5. #include "ref_reduce_arg.impl.hpp"
  6. namespace opencv_test { namespace {
  7. const int ARITHM_NTESTS = 1000;
  8. const int ARITHM_RNG_SEED = -1;
  9. const int ARITHM_MAX_CHANNELS = 4;
  10. const int ARITHM_MAX_NDIMS = 4;
  11. const int ARITHM_MAX_SIZE_LOG = 10;
  12. struct BaseElemWiseOp
  13. {
  14. enum { FIX_ALPHA=1, FIX_BETA=2, FIX_GAMMA=4, REAL_GAMMA=8, SUPPORT_MASK=16, SCALAR_OUTPUT=32, SUPPORT_MULTICHANNELMASK=64 };
  15. BaseElemWiseOp(int _ninputs, int _flags, double _alpha, double _beta,
  16. Scalar _gamma=Scalar::all(0), int _context=1)
  17. : ninputs(_ninputs), flags(_flags), alpha(_alpha), beta(_beta), gamma(_gamma), context(_context) {}
  18. BaseElemWiseOp() { flags = 0; alpha = beta = 0; gamma = Scalar::all(0); ninputs = 0; context = 1; }
  19. virtual ~BaseElemWiseOp() {}
  20. virtual void op(const vector<Mat>&, Mat&, const Mat&) {}
  21. virtual void refop(const vector<Mat>&, Mat&, const Mat&) {}
  22. virtual void getValueRange(int depth, double& minval, double& maxval)
  23. {
  24. minval = depth < CV_32S ? cvtest::getMinVal(depth) : depth == CV_32S ? -1000000 : -1000.;
  25. maxval = depth < CV_32S ? cvtest::getMaxVal(depth) : depth == CV_32S ? 1000000 : 1000.;
  26. }
  27. virtual void getRandomSize(RNG& rng, vector<int>& size)
  28. {
  29. cvtest::randomSize(rng, 2, ARITHM_MAX_NDIMS, ARITHM_MAX_SIZE_LOG, size);
  30. }
  31. virtual int getRandomType(RNG& rng)
  32. {
  33. return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL_BUT_8S, 1,
  34. ninputs > 1 ? ARITHM_MAX_CHANNELS : 4);
  35. }
  36. virtual double getMaxErr(int depth) { return depth < CV_32F ? 1 : depth == CV_32F ? 1e-5 : 1e-12; }
  37. virtual void generateScalars(int depth, RNG& rng)
  38. {
  39. const double m = 3.;
  40. if( !(flags & FIX_ALPHA) )
  41. {
  42. alpha = exp(rng.uniform(-0.5, 0.1)*m*2*CV_LOG2);
  43. alpha *= rng.uniform(0, 2) ? 1 : -1;
  44. }
  45. if( !(flags & FIX_BETA) )
  46. {
  47. beta = exp(rng.uniform(-0.5, 0.1)*m*2*CV_LOG2);
  48. beta *= rng.uniform(0, 2) ? 1 : -1;
  49. }
  50. if( !(flags & FIX_GAMMA) )
  51. {
  52. for( int i = 0; i < 4; i++ )
  53. {
  54. gamma[i] = exp(rng.uniform(-1, 6)*m*CV_LOG2);
  55. gamma[i] *= rng.uniform(0, 2) ? 1 : -1;
  56. }
  57. if( flags & REAL_GAMMA )
  58. gamma = Scalar::all(gamma[0]);
  59. }
  60. if( depth == CV_32F )
  61. {
  62. Mat fl, db;
  63. db = Mat(1, 1, CV_64F, &alpha);
  64. db.convertTo(fl, CV_32F);
  65. fl.convertTo(db, CV_64F);
  66. db = Mat(1, 1, CV_64F, &beta);
  67. db.convertTo(fl, CV_32F);
  68. fl.convertTo(db, CV_64F);
  69. db = Mat(1, 4, CV_64F, &gamma[0]);
  70. db.convertTo(fl, CV_32F);
  71. fl.convertTo(db, CV_64F);
  72. }
  73. }
  74. int ninputs;
  75. int flags;
  76. double alpha;
  77. double beta;
  78. Scalar gamma;
  79. int context;
  80. };
  81. struct BaseAddOp : public BaseElemWiseOp
  82. {
  83. BaseAddOp(int _ninputs, int _flags, double _alpha, double _beta, Scalar _gamma=Scalar::all(0))
  84. : BaseElemWiseOp(_ninputs, _flags, _alpha, _beta, _gamma) {}
  85. void refop(const vector<Mat>& src, Mat& dst, const Mat& mask)
  86. {
  87. Mat temp;
  88. if( !mask.empty() )
  89. {
  90. cvtest::add(src[0], alpha, src.size() > 1 ? src[1] : Mat(), beta, gamma, temp, src[0].type());
  91. cvtest::copy(temp, dst, mask);
  92. }
  93. else
  94. cvtest::add(src[0], alpha, src.size() > 1 ? src[1] : Mat(), beta, gamma, dst, src[0].type());
  95. }
  96. };
  97. struct AddOp : public BaseAddOp
  98. {
  99. AddOp() : BaseAddOp(2, FIX_ALPHA+FIX_BETA+FIX_GAMMA+SUPPORT_MASK, 1, 1, Scalar::all(0)) {}
  100. void op(const vector<Mat>& src, Mat& dst, const Mat& mask)
  101. {
  102. if( mask.empty() )
  103. cv::add(src[0], src[1], dst);
  104. else
  105. cv::add(src[0], src[1], dst, mask);
  106. }
  107. };
  108. struct SubOp : public BaseAddOp
  109. {
  110. SubOp() : BaseAddOp(2, FIX_ALPHA+FIX_BETA+FIX_GAMMA+SUPPORT_MASK, 1, -1, Scalar::all(0)) {}
  111. void op(const vector<Mat>& src, Mat& dst, const Mat& mask)
  112. {
  113. if( mask.empty() )
  114. cv::subtract(src[0], src[1], dst);
  115. else
  116. cv::subtract(src[0], src[1], dst, mask);
  117. }
  118. };
  119. struct AddSOp : public BaseAddOp
  120. {
  121. AddSOp() : BaseAddOp(1, FIX_ALPHA+FIX_BETA+SUPPORT_MASK, 1, 0, Scalar::all(0)) {}
  122. void op(const vector<Mat>& src, Mat& dst, const Mat& mask)
  123. {
  124. if( mask.empty() )
  125. cv::add(src[0], gamma, dst);
  126. else
  127. cv::add(src[0], gamma, dst, mask);
  128. }
  129. };
  130. struct SubRSOp : public BaseAddOp
  131. {
  132. SubRSOp() : BaseAddOp(1, FIX_ALPHA+FIX_BETA+SUPPORT_MASK, -1, 0, Scalar::all(0)) {}
  133. void op(const vector<Mat>& src, Mat& dst, const Mat& mask)
  134. {
  135. if( mask.empty() )
  136. cv::subtract(gamma, src[0], dst);
  137. else
  138. cv::subtract(gamma, src[0], dst, mask);
  139. }
  140. };
  141. struct ScaleAddOp : public BaseAddOp
  142. {
  143. ScaleAddOp() : BaseAddOp(2, FIX_BETA+FIX_GAMMA, 1, 1, Scalar::all(0)) {}
  144. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  145. {
  146. cv::scaleAdd(src[0], alpha, src[1], dst);
  147. }
  148. double getMaxErr(int depth)
  149. {
  150. return depth <= CV_32S ? 2 : depth < CV_64F ? 1e-4 : 1e-12;
  151. }
  152. };
  153. struct AddWeightedOp : public BaseAddOp
  154. {
  155. AddWeightedOp() : BaseAddOp(2, REAL_GAMMA, 1, 1, Scalar::all(0)) {}
  156. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  157. {
  158. cv::addWeighted(src[0], alpha, src[1], beta, gamma[0], dst);
  159. }
  160. double getMaxErr(int depth)
  161. {
  162. return depth <= CV_32S ? 2 : depth < CV_64F ? 1e-5 : 1e-10;
  163. }
  164. };
  165. struct MulOp : public BaseElemWiseOp
  166. {
  167. MulOp() : BaseElemWiseOp(2, FIX_BETA+FIX_GAMMA, 1, 1, Scalar::all(0)) {}
  168. void getValueRange(int depth, double& minval, double& maxval)
  169. {
  170. minval = depth < CV_32S ? cvtest::getMinVal(depth) : depth == CV_32S ? -1000000 : -1000.;
  171. maxval = depth < CV_32S ? cvtest::getMaxVal(depth) : depth == CV_32S ? 1000000 : 1000.;
  172. minval = std::max(minval, -30000.);
  173. maxval = std::min(maxval, 30000.);
  174. }
  175. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  176. {
  177. cv::multiply(src[0], src[1], dst, alpha);
  178. }
  179. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  180. {
  181. cvtest::multiply(src[0], src[1], dst, alpha);
  182. }
  183. double getMaxErr(int depth)
  184. {
  185. return depth <= CV_32S ? 2 : depth < CV_64F ? 1e-5 : 1e-12;
  186. }
  187. };
  188. struct DivOp : public BaseElemWiseOp
  189. {
  190. DivOp() : BaseElemWiseOp(2, FIX_BETA+FIX_GAMMA, 1, 1, Scalar::all(0)) {}
  191. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  192. {
  193. cv::divide(src[0], src[1], dst, alpha);
  194. }
  195. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  196. {
  197. cvtest::divide(src[0], src[1], dst, alpha);
  198. }
  199. double getMaxErr(int depth)
  200. {
  201. return depth <= CV_32S ? 2 : depth < CV_64F ? 1e-5 : 1e-12;
  202. }
  203. };
  204. struct RecipOp : public BaseElemWiseOp
  205. {
  206. RecipOp() : BaseElemWiseOp(1, FIX_BETA+FIX_GAMMA, 1, 1, Scalar::all(0)) {}
  207. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  208. {
  209. cv::divide(alpha, src[0], dst);
  210. }
  211. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  212. {
  213. cvtest::divide(Mat(), src[0], dst, alpha);
  214. }
  215. double getMaxErr(int depth)
  216. {
  217. return depth <= CV_32S ? 2 : depth < CV_64F ? 1e-5 : 1e-12;
  218. }
  219. };
  220. struct AbsDiffOp : public BaseAddOp
  221. {
  222. AbsDiffOp() : BaseAddOp(2, FIX_ALPHA+FIX_BETA+FIX_GAMMA, 1, -1, Scalar::all(0)) {}
  223. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  224. {
  225. absdiff(src[0], src[1], dst);
  226. }
  227. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  228. {
  229. cvtest::add(src[0], 1, src[1], -1, Scalar::all(0), dst, src[0].type(), true);
  230. }
  231. };
  232. struct AbsDiffSOp : public BaseAddOp
  233. {
  234. AbsDiffSOp() : BaseAddOp(1, FIX_ALPHA+FIX_BETA, 1, 0, Scalar::all(0)) {}
  235. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  236. {
  237. absdiff(src[0], gamma, dst);
  238. }
  239. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  240. {
  241. cvtest::add(src[0], 1, Mat(), 0, -gamma, dst, src[0].type(), true);
  242. }
  243. };
  244. struct LogicOp : public BaseElemWiseOp
  245. {
  246. LogicOp(char _opcode) : BaseElemWiseOp(2, FIX_ALPHA+FIX_BETA+FIX_GAMMA+SUPPORT_MASK, 1, 1, Scalar::all(0)), opcode(_opcode) {}
  247. void op(const vector<Mat>& src, Mat& dst, const Mat& mask)
  248. {
  249. if( opcode == '&' )
  250. cv::bitwise_and(src[0], src[1], dst, mask);
  251. else if( opcode == '|' )
  252. cv::bitwise_or(src[0], src[1], dst, mask);
  253. else
  254. cv::bitwise_xor(src[0], src[1], dst, mask);
  255. }
  256. void refop(const vector<Mat>& src, Mat& dst, const Mat& mask)
  257. {
  258. Mat temp;
  259. if( !mask.empty() )
  260. {
  261. cvtest::logicOp(src[0], src[1], temp, opcode);
  262. cvtest::copy(temp, dst, mask);
  263. }
  264. else
  265. cvtest::logicOp(src[0], src[1], dst, opcode);
  266. }
  267. double getMaxErr(int)
  268. {
  269. return 0;
  270. }
  271. char opcode;
  272. };
  273. struct LogicSOp : public BaseElemWiseOp
  274. {
  275. LogicSOp(char _opcode)
  276. : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+(_opcode != '~' ? SUPPORT_MASK : 0), 1, 1, Scalar::all(0)), opcode(_opcode) {}
  277. void op(const vector<Mat>& src, Mat& dst, const Mat& mask)
  278. {
  279. if( opcode == '&' )
  280. cv::bitwise_and(src[0], gamma, dst, mask);
  281. else if( opcode == '|' )
  282. cv::bitwise_or(src[0], gamma, dst, mask);
  283. else if( opcode == '^' )
  284. cv::bitwise_xor(src[0], gamma, dst, mask);
  285. else
  286. cv::bitwise_not(src[0], dst);
  287. }
  288. void refop(const vector<Mat>& src, Mat& dst, const Mat& mask)
  289. {
  290. Mat temp;
  291. if( !mask.empty() )
  292. {
  293. cvtest::logicOp(src[0], gamma, temp, opcode);
  294. cvtest::copy(temp, dst, mask);
  295. }
  296. else
  297. cvtest::logicOp(src[0], gamma, dst, opcode);
  298. }
  299. double getMaxErr(int)
  300. {
  301. return 0;
  302. }
  303. char opcode;
  304. };
  305. struct MinOp : public BaseElemWiseOp
  306. {
  307. MinOp() : BaseElemWiseOp(2, FIX_ALPHA+FIX_BETA+FIX_GAMMA, 1, 1, Scalar::all(0)) {}
  308. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  309. {
  310. cv::min(src[0], src[1], dst);
  311. }
  312. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  313. {
  314. cvtest::min(src[0], src[1], dst);
  315. }
  316. double getMaxErr(int)
  317. {
  318. return 0;
  319. }
  320. };
  321. struct MaxOp : public BaseElemWiseOp
  322. {
  323. MaxOp() : BaseElemWiseOp(2, FIX_ALPHA+FIX_BETA+FIX_GAMMA, 1, 1, Scalar::all(0)) {}
  324. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  325. {
  326. cv::max(src[0], src[1], dst);
  327. }
  328. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  329. {
  330. cvtest::max(src[0], src[1], dst);
  331. }
  332. double getMaxErr(int)
  333. {
  334. return 0;
  335. }
  336. };
  337. struct MinSOp : public BaseElemWiseOp
  338. {
  339. MinSOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+REAL_GAMMA, 1, 1, Scalar::all(0)) {}
  340. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  341. {
  342. cv::min(src[0], gamma[0], dst);
  343. }
  344. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  345. {
  346. cvtest::min(src[0], gamma[0], dst);
  347. }
  348. double getMaxErr(int)
  349. {
  350. return 0;
  351. }
  352. };
  353. struct MaxSOp : public BaseElemWiseOp
  354. {
  355. MaxSOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+REAL_GAMMA, 1, 1, Scalar::all(0)) {}
  356. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  357. {
  358. cv::max(src[0], gamma[0], dst);
  359. }
  360. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  361. {
  362. cvtest::max(src[0], gamma[0], dst);
  363. }
  364. double getMaxErr(int)
  365. {
  366. return 0;
  367. }
  368. };
  369. struct CmpOp : public BaseElemWiseOp
  370. {
  371. CmpOp() : BaseElemWiseOp(2, FIX_ALPHA+FIX_BETA+FIX_GAMMA, 1, 1, Scalar::all(0)) { cmpop = 0; }
  372. void generateScalars(int depth, RNG& rng)
  373. {
  374. BaseElemWiseOp::generateScalars(depth, rng);
  375. cmpop = rng.uniform(0, 6);
  376. }
  377. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  378. {
  379. cv::compare(src[0], src[1], dst, cmpop);
  380. }
  381. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  382. {
  383. cvtest::compare(src[0], src[1], dst, cmpop);
  384. }
  385. int getRandomType(RNG& rng)
  386. {
  387. return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL_BUT_8S, 1, 1);
  388. }
  389. double getMaxErr(int)
  390. {
  391. return 0;
  392. }
  393. int cmpop;
  394. };
  395. struct CmpSOp : public BaseElemWiseOp
  396. {
  397. CmpSOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+REAL_GAMMA, 1, 1, Scalar::all(0)) { cmpop = 0; }
  398. void generateScalars(int depth, RNG& rng)
  399. {
  400. BaseElemWiseOp::generateScalars(depth, rng);
  401. cmpop = rng.uniform(0, 6);
  402. if( depth < CV_32F )
  403. gamma[0] = cvRound(gamma[0]);
  404. }
  405. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  406. {
  407. cv::compare(src[0], gamma[0], dst, cmpop);
  408. }
  409. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  410. {
  411. cvtest::compare(src[0], gamma[0], dst, cmpop);
  412. }
  413. int getRandomType(RNG& rng)
  414. {
  415. return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL_BUT_8S, 1, 1);
  416. }
  417. double getMaxErr(int)
  418. {
  419. return 0;
  420. }
  421. int cmpop;
  422. };
  423. struct CopyOp : public BaseElemWiseOp
  424. {
  425. CopyOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+FIX_GAMMA+SUPPORT_MASK+SUPPORT_MULTICHANNELMASK, 1, 1, Scalar::all(0)) { }
  426. void op(const vector<Mat>& src, Mat& dst, const Mat& mask)
  427. {
  428. src[0].copyTo(dst, mask);
  429. }
  430. void refop(const vector<Mat>& src, Mat& dst, const Mat& mask)
  431. {
  432. cvtest::copy(src[0], dst, mask);
  433. }
  434. int getRandomType(RNG& rng)
  435. {
  436. return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL_16F, 1, ARITHM_MAX_CHANNELS);
  437. }
  438. double getMaxErr(int)
  439. {
  440. return 0;
  441. }
  442. };
  443. struct SetOp : public BaseElemWiseOp
  444. {
  445. SetOp() : BaseElemWiseOp(0, FIX_ALPHA+FIX_BETA+SUPPORT_MASK+SUPPORT_MULTICHANNELMASK, 1, 1, Scalar::all(0)) {}
  446. void op(const vector<Mat>&, Mat& dst, const Mat& mask)
  447. {
  448. dst.setTo(gamma, mask);
  449. }
  450. void refop(const vector<Mat>&, Mat& dst, const Mat& mask)
  451. {
  452. cvtest::set(dst, gamma, mask);
  453. }
  454. int getRandomType(RNG& rng)
  455. {
  456. return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL_16F, 1, ARITHM_MAX_CHANNELS);
  457. }
  458. double getMaxErr(int)
  459. {
  460. return 0;
  461. }
  462. };
  463. template<typename _Tp, typename _WTp> static void
  464. inRangeS_(const _Tp* src, const _WTp* a, const _WTp* b, uchar* dst, size_t total, int cn)
  465. {
  466. size_t i;
  467. int c;
  468. for( i = 0; i < total; i++ )
  469. {
  470. _Tp val = src[i*cn];
  471. dst[i] = (a[0] <= val && val <= b[0]) ? uchar(255) : 0;
  472. }
  473. for( c = 1; c < cn; c++ )
  474. {
  475. for( i = 0; i < total; i++ )
  476. {
  477. _Tp val = src[i*cn + c];
  478. dst[i] = a[c] <= val && val <= b[c] ? dst[i] : 0;
  479. }
  480. }
  481. }
  482. template<typename _Tp> static void inRange_(const _Tp* src, const _Tp* a, const _Tp* b, uchar* dst, size_t total, int cn)
  483. {
  484. size_t i;
  485. int c;
  486. for( i = 0; i < total; i++ )
  487. {
  488. _Tp val = src[i*cn];
  489. dst[i] = a[i*cn] <= val && val <= b[i*cn] ? 255 : 0;
  490. }
  491. for( c = 1; c < cn; c++ )
  492. {
  493. for( i = 0; i < total; i++ )
  494. {
  495. _Tp val = src[i*cn + c];
  496. dst[i] = a[i*cn + c] <= val && val <= b[i*cn + c] ? dst[i] : 0;
  497. }
  498. }
  499. }
  500. namespace reference {
  501. static void inRange(const Mat& src, const Mat& lb, const Mat& rb, Mat& dst)
  502. {
  503. CV_Assert( src.type() == lb.type() && src.type() == rb.type() &&
  504. src.size == lb.size && src.size == rb.size );
  505. dst.create( src.dims, &src.size[0], CV_8U );
  506. const Mat *arrays[]={&src, &lb, &rb, &dst, 0};
  507. Mat planes[4];
  508. NAryMatIterator it(arrays, planes);
  509. size_t total = planes[0].total();
  510. size_t i, nplanes = it.nplanes;
  511. int depth = src.depth(), cn = src.channels();
  512. for( i = 0; i < nplanes; i++, ++it )
  513. {
  514. const uchar* sptr = planes[0].ptr();
  515. const uchar* aptr = planes[1].ptr();
  516. const uchar* bptr = planes[2].ptr();
  517. uchar* dptr = planes[3].ptr();
  518. switch( depth )
  519. {
  520. case CV_8U:
  521. inRange_((const uchar*)sptr, (const uchar*)aptr, (const uchar*)bptr, dptr, total, cn);
  522. break;
  523. case CV_8S:
  524. inRange_((const schar*)sptr, (const schar*)aptr, (const schar*)bptr, dptr, total, cn);
  525. break;
  526. case CV_16U:
  527. inRange_((const ushort*)sptr, (const ushort*)aptr, (const ushort*)bptr, dptr, total, cn);
  528. break;
  529. case CV_16S:
  530. inRange_((const short*)sptr, (const short*)aptr, (const short*)bptr, dptr, total, cn);
  531. break;
  532. case CV_32S:
  533. inRange_((const int*)sptr, (const int*)aptr, (const int*)bptr, dptr, total, cn);
  534. break;
  535. case CV_32F:
  536. inRange_((const float*)sptr, (const float*)aptr, (const float*)bptr, dptr, total, cn);
  537. break;
  538. case CV_64F:
  539. inRange_((const double*)sptr, (const double*)aptr, (const double*)bptr, dptr, total, cn);
  540. break;
  541. default:
  542. CV_Error(CV_StsUnsupportedFormat, "");
  543. }
  544. }
  545. }
  546. static void inRangeS(const Mat& src, const Scalar& lb, const Scalar& rb, Mat& dst)
  547. {
  548. dst.create( src.dims, &src.size[0], CV_8U );
  549. const Mat *arrays[]={&src, &dst, 0};
  550. Mat planes[2];
  551. NAryMatIterator it(arrays, planes);
  552. size_t total = planes[0].total();
  553. size_t i, nplanes = it.nplanes;
  554. int depth = src.depth(), cn = src.channels();
  555. union { double d[4]; float f[4]; int i[4];} lbuf, rbuf;
  556. int wtype = CV_MAKETYPE(depth <= CV_32S ? CV_32S : depth, cn);
  557. scalarToRawData(lb, lbuf.d, wtype, cn);
  558. scalarToRawData(rb, rbuf.d, wtype, cn);
  559. for( i = 0; i < nplanes; i++, ++it )
  560. {
  561. const uchar* sptr = planes[0].ptr();
  562. uchar* dptr = planes[1].ptr();
  563. switch( depth )
  564. {
  565. case CV_8U:
  566. inRangeS_((const uchar*)sptr, lbuf.i, rbuf.i, dptr, total, cn);
  567. break;
  568. case CV_8S:
  569. inRangeS_((const schar*)sptr, lbuf.i, rbuf.i, dptr, total, cn);
  570. break;
  571. case CV_16U:
  572. inRangeS_((const ushort*)sptr, lbuf.i, rbuf.i, dptr, total, cn);
  573. break;
  574. case CV_16S:
  575. inRangeS_((const short*)sptr, lbuf.i, rbuf.i, dptr, total, cn);
  576. break;
  577. case CV_32S:
  578. inRangeS_((const int*)sptr, lbuf.i, rbuf.i, dptr, total, cn);
  579. break;
  580. case CV_32F:
  581. inRangeS_((const float*)sptr, lbuf.f, rbuf.f, dptr, total, cn);
  582. break;
  583. case CV_64F:
  584. inRangeS_((const double*)sptr, lbuf.d, rbuf.d, dptr, total, cn);
  585. break;
  586. default:
  587. CV_Error(CV_StsUnsupportedFormat, "");
  588. }
  589. }
  590. }
  591. } // namespace
  592. CVTEST_GUARD_SYMBOL(inRange);
  593. struct InRangeSOp : public BaseElemWiseOp
  594. {
  595. InRangeSOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA, 1, 1, Scalar::all(0)) {}
  596. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  597. {
  598. cv::inRange(src[0], gamma, gamma1, dst);
  599. }
  600. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  601. {
  602. reference::inRangeS(src[0], gamma, gamma1, dst);
  603. }
  604. double getMaxErr(int)
  605. {
  606. return 0;
  607. }
  608. void generateScalars(int depth, RNG& rng)
  609. {
  610. BaseElemWiseOp::generateScalars(depth, rng);
  611. Scalar temp = gamma;
  612. BaseElemWiseOp::generateScalars(depth, rng);
  613. for( int i = 0; i < 4; i++ )
  614. {
  615. gamma1[i] = std::max(gamma[i], temp[i]);
  616. gamma[i] = std::min(gamma[i], temp[i]);
  617. }
  618. }
  619. Scalar gamma1;
  620. };
  621. struct InRangeOp : public BaseElemWiseOp
  622. {
  623. InRangeOp() : BaseElemWiseOp(3, FIX_ALPHA+FIX_BETA+FIX_GAMMA, 1, 1, Scalar::all(0)) {}
  624. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  625. {
  626. Mat lb, rb;
  627. cvtest::min(src[1], src[2], lb);
  628. cvtest::max(src[1], src[2], rb);
  629. cv::inRange(src[0], lb, rb, dst);
  630. }
  631. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  632. {
  633. Mat lb, rb;
  634. cvtest::min(src[1], src[2], lb);
  635. cvtest::max(src[1], src[2], rb);
  636. reference::inRange(src[0], lb, rb, dst);
  637. }
  638. double getMaxErr(int)
  639. {
  640. return 0;
  641. }
  642. };
  643. struct ConvertScaleOp : public BaseElemWiseOp
  644. {
  645. ConvertScaleOp() : BaseElemWiseOp(1, FIX_BETA+REAL_GAMMA, 1, 1, Scalar::all(0)), ddepth(0) { }
  646. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  647. {
  648. src[0].convertTo(dst, ddepth, alpha, gamma[0]);
  649. }
  650. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  651. {
  652. cvtest::convert(src[0], dst, CV_MAKETYPE(ddepth, src[0].channels()), alpha, gamma[0]);
  653. }
  654. int getRandomType(RNG& rng)
  655. {
  656. int srctype = cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL, 1, ARITHM_MAX_CHANNELS);
  657. ddepth = cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL, 1, 1);
  658. return srctype;
  659. }
  660. double getMaxErr(int)
  661. {
  662. return ddepth <= CV_32S ? 2 : ddepth < CV_64F ? 1e-3 : 1e-12;
  663. }
  664. void generateScalars(int depth, RNG& rng)
  665. {
  666. if( rng.uniform(0, 2) )
  667. BaseElemWiseOp::generateScalars(depth, rng);
  668. else
  669. {
  670. alpha = 1;
  671. gamma = Scalar::all(0);
  672. }
  673. }
  674. int ddepth;
  675. };
  676. struct ConvertScaleFp16Op : public BaseElemWiseOp
  677. {
  678. ConvertScaleFp16Op() : BaseElemWiseOp(1, FIX_BETA+REAL_GAMMA, 1, 1, Scalar::all(0)), nextRange(0) { }
  679. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  680. {
  681. Mat m;
  682. convertFp16(src[0], m);
  683. convertFp16(m, dst);
  684. }
  685. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  686. {
  687. cvtest::copy(src[0], dst);
  688. }
  689. int getRandomType(RNG&)
  690. {
  691. // 0: FP32 -> FP16 -> FP32
  692. // 1: FP16 -> FP32 -> FP16
  693. int srctype = (nextRange & 1) == 0 ? CV_32F : CV_16S;
  694. return srctype;
  695. }
  696. void getValueRange(int, double& minval, double& maxval)
  697. {
  698. // 0: FP32 -> FP16 -> FP32
  699. // 1: FP16 -> FP32 -> FP16
  700. if( (nextRange & 1) == 0 )
  701. {
  702. // largest integer number that fp16 can express exactly
  703. maxval = 2048.f;
  704. minval = -maxval;
  705. }
  706. else
  707. {
  708. // 0: positive number range
  709. // 1: negative number range
  710. if( (nextRange & 2) == 0 )
  711. {
  712. minval = 0; // 0x0000 +0
  713. maxval = 31744; // 0x7C00 +Inf
  714. }
  715. else
  716. {
  717. minval = -32768; // 0x8000 -0
  718. maxval = -1024; // 0xFC00 -Inf
  719. }
  720. }
  721. }
  722. double getMaxErr(int)
  723. {
  724. return 0.5f;
  725. }
  726. void generateScalars(int, RNG& rng)
  727. {
  728. nextRange = rng.next();
  729. }
  730. int nextRange;
  731. };
  732. struct ConvertScaleAbsOp : public BaseElemWiseOp
  733. {
  734. ConvertScaleAbsOp() : BaseElemWiseOp(1, FIX_BETA+REAL_GAMMA, 1, 1, Scalar::all(0)) {}
  735. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  736. {
  737. cv::convertScaleAbs(src[0], dst, alpha, gamma[0]);
  738. }
  739. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  740. {
  741. cvtest::add(src[0], alpha, Mat(), 0, Scalar::all(gamma[0]), dst, CV_8UC(src[0].channels()), true);
  742. }
  743. int getRandomType(RNG& rng)
  744. {
  745. return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL, 1,
  746. ninputs > 1 ? ARITHM_MAX_CHANNELS : 4);
  747. }
  748. double getMaxErr(int)
  749. {
  750. return 1;
  751. }
  752. void generateScalars(int depth, RNG& rng)
  753. {
  754. if( rng.uniform(0, 2) )
  755. BaseElemWiseOp::generateScalars(depth, rng);
  756. else
  757. {
  758. alpha = 1;
  759. gamma = Scalar::all(0);
  760. }
  761. }
  762. };
  763. namespace reference {
  764. static void flip(const Mat& src, Mat& dst, int flipcode)
  765. {
  766. CV_Assert(src.dims == 2);
  767. dst.create(src.size(), src.type());
  768. int i, j, k, esz = (int)src.elemSize(), width = src.cols*esz;
  769. for( i = 0; i < dst.rows; i++ )
  770. {
  771. const uchar* sptr = src.ptr(flipcode == 1 ? i : dst.rows - i - 1);
  772. uchar* dptr = dst.ptr(i);
  773. if( flipcode == 0 )
  774. memcpy(dptr, sptr, width);
  775. else
  776. {
  777. for( j = 0; j < width; j += esz )
  778. for( k = 0; k < esz; k++ )
  779. dptr[j + k] = sptr[width - j - esz + k];
  780. }
  781. }
  782. }
  783. static void setIdentity(Mat& dst, const Scalar& s)
  784. {
  785. CV_Assert( dst.dims == 2 && dst.channels() <= 4 );
  786. double buf[4];
  787. scalarToRawData(s, buf, dst.type(), 0);
  788. int i, k, esz = (int)dst.elemSize(), width = dst.cols*esz;
  789. for( i = 0; i < dst.rows; i++ )
  790. {
  791. uchar* dptr = dst.ptr(i);
  792. memset( dptr, 0, width );
  793. if( i < dst.cols )
  794. for( k = 0; k < esz; k++ )
  795. dptr[i*esz + k] = ((uchar*)buf)[k];
  796. }
  797. }
  798. } // namespace
  799. struct FlipOp : public BaseElemWiseOp
  800. {
  801. FlipOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+FIX_GAMMA, 1, 1, Scalar::all(0)) { flipcode = 0; }
  802. void getRandomSize(RNG& rng, vector<int>& size)
  803. {
  804. cvtest::randomSize(rng, 2, 2, ARITHM_MAX_SIZE_LOG, size);
  805. }
  806. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  807. {
  808. cv::flip(src[0], dst, flipcode);
  809. }
  810. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  811. {
  812. reference::flip(src[0], dst, flipcode);
  813. }
  814. void generateScalars(int, RNG& rng)
  815. {
  816. flipcode = rng.uniform(0, 3) - 1;
  817. }
  818. double getMaxErr(int)
  819. {
  820. return 0;
  821. }
  822. int flipcode;
  823. };
  824. struct TransposeOp : public BaseElemWiseOp
  825. {
  826. TransposeOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+FIX_GAMMA, 1, 1, Scalar::all(0)) {}
  827. void getRandomSize(RNG& rng, vector<int>& size)
  828. {
  829. cvtest::randomSize(rng, 2, 2, ARITHM_MAX_SIZE_LOG, size);
  830. }
  831. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  832. {
  833. cv::transpose(src[0], dst);
  834. }
  835. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  836. {
  837. cvtest::transpose(src[0], dst);
  838. }
  839. double getMaxErr(int)
  840. {
  841. return 0;
  842. }
  843. };
  844. struct SetIdentityOp : public BaseElemWiseOp
  845. {
  846. SetIdentityOp() : BaseElemWiseOp(0, FIX_ALPHA+FIX_BETA, 1, 1, Scalar::all(0)) {}
  847. void getRandomSize(RNG& rng, vector<int>& size)
  848. {
  849. cvtest::randomSize(rng, 2, 2, ARITHM_MAX_SIZE_LOG, size);
  850. }
  851. void op(const vector<Mat>&, Mat& dst, const Mat&)
  852. {
  853. cv::setIdentity(dst, gamma);
  854. }
  855. void refop(const vector<Mat>&, Mat& dst, const Mat&)
  856. {
  857. reference::setIdentity(dst, gamma);
  858. }
  859. double getMaxErr(int)
  860. {
  861. return 0;
  862. }
  863. };
  864. struct SetZeroOp : public BaseElemWiseOp
  865. {
  866. SetZeroOp() : BaseElemWiseOp(0, FIX_ALPHA+FIX_BETA+FIX_GAMMA, 1, 1, Scalar::all(0)) {}
  867. void op(const vector<Mat>&, Mat& dst, const Mat&)
  868. {
  869. dst = Scalar::all(0);
  870. }
  871. void refop(const vector<Mat>&, Mat& dst, const Mat&)
  872. {
  873. cvtest::set(dst, Scalar::all(0));
  874. }
  875. double getMaxErr(int)
  876. {
  877. return 0;
  878. }
  879. };
  880. namespace reference {
  881. static void exp(const Mat& src, Mat& dst)
  882. {
  883. dst.create( src.dims, &src.size[0], src.type() );
  884. const Mat *arrays[]={&src, &dst, 0};
  885. Mat planes[2];
  886. NAryMatIterator it(arrays, planes);
  887. size_t j, total = planes[0].total()*src.channels();
  888. size_t i, nplanes = it.nplanes;
  889. int depth = src.depth();
  890. for( i = 0; i < nplanes; i++, ++it )
  891. {
  892. const uchar* sptr = planes[0].ptr();
  893. uchar* dptr = planes[1].ptr();
  894. if( depth == CV_32F )
  895. {
  896. for( j = 0; j < total; j++ )
  897. ((float*)dptr)[j] = std::exp(((const float*)sptr)[j]);
  898. }
  899. else if( depth == CV_64F )
  900. {
  901. for( j = 0; j < total; j++ )
  902. ((double*)dptr)[j] = std::exp(((const double*)sptr)[j]);
  903. }
  904. }
  905. }
  906. static void log(const Mat& src, Mat& dst)
  907. {
  908. dst.create( src.dims, &src.size[0], src.type() );
  909. const Mat *arrays[]={&src, &dst, 0};
  910. Mat planes[2];
  911. NAryMatIterator it(arrays, planes);
  912. size_t j, total = planes[0].total()*src.channels();
  913. size_t i, nplanes = it.nplanes;
  914. int depth = src.depth();
  915. for( i = 0; i < nplanes; i++, ++it )
  916. {
  917. const uchar* sptr = planes[0].ptr();
  918. uchar* dptr = planes[1].ptr();
  919. if( depth == CV_32F )
  920. {
  921. for( j = 0; j < total; j++ )
  922. ((float*)dptr)[j] = (float)std::log(fabs(((const float*)sptr)[j]));
  923. }
  924. else if( depth == CV_64F )
  925. {
  926. for( j = 0; j < total; j++ )
  927. ((double*)dptr)[j] = std::log(fabs(((const double*)sptr)[j]));
  928. }
  929. }
  930. }
  931. } // namespace
  932. struct ExpOp : public BaseElemWiseOp
  933. {
  934. ExpOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+FIX_GAMMA, 1, 1, Scalar::all(0)) {}
  935. int getRandomType(RNG& rng)
  936. {
  937. return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_FLT, 1, ARITHM_MAX_CHANNELS);
  938. }
  939. void getValueRange(int depth, double& minval, double& maxval)
  940. {
  941. maxval = depth == CV_32F ? 50 : 100;
  942. minval = -maxval;
  943. }
  944. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  945. {
  946. cv::exp(src[0], dst);
  947. }
  948. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  949. {
  950. reference::exp(src[0], dst);
  951. }
  952. double getMaxErr(int depth)
  953. {
  954. return depth == CV_32F ? 1e-5 : 1e-12;
  955. }
  956. };
  957. struct LogOp : public BaseElemWiseOp
  958. {
  959. LogOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+FIX_GAMMA, 1, 1, Scalar::all(0)) {}
  960. int getRandomType(RNG& rng)
  961. {
  962. return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_FLT, 1, ARITHM_MAX_CHANNELS);
  963. }
  964. void getValueRange(int depth, double& minval, double& maxval)
  965. {
  966. maxval = depth == CV_32F ? 50 : 100;
  967. minval = -maxval;
  968. }
  969. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  970. {
  971. Mat temp;
  972. reference::exp(src[0], temp);
  973. cv::log(temp, dst);
  974. }
  975. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  976. {
  977. Mat temp;
  978. reference::exp(src[0], temp);
  979. reference::log(temp, dst);
  980. }
  981. double getMaxErr(int depth)
  982. {
  983. return depth == CV_32F ? 1e-5 : 1e-12;
  984. }
  985. };
  986. namespace reference {
  987. static void cartToPolar(const Mat& mx, const Mat& my, Mat& mmag, Mat& mangle, bool angleInDegrees)
  988. {
  989. CV_Assert( (mx.type() == CV_32F || mx.type() == CV_64F) &&
  990. mx.type() == my.type() && mx.size == my.size );
  991. mmag.create( mx.dims, &mx.size[0], mx.type() );
  992. mangle.create( mx.dims, &mx.size[0], mx.type() );
  993. const Mat *arrays[]={&mx, &my, &mmag, &mangle, 0};
  994. Mat planes[4];
  995. NAryMatIterator it(arrays, planes);
  996. size_t j, total = planes[0].total();
  997. size_t i, nplanes = it.nplanes;
  998. int depth = mx.depth();
  999. double scale = angleInDegrees ? 180/CV_PI : 1;
  1000. for( i = 0; i < nplanes; i++, ++it )
  1001. {
  1002. if( depth == CV_32F )
  1003. {
  1004. const float* xptr = planes[0].ptr<float>();
  1005. const float* yptr = planes[1].ptr<float>();
  1006. float* mptr = planes[2].ptr<float>();
  1007. float* aptr = planes[3].ptr<float>();
  1008. for( j = 0; j < total; j++ )
  1009. {
  1010. mptr[j] = std::sqrt(xptr[j]*xptr[j] + yptr[j]*yptr[j]);
  1011. double a = atan2((double)yptr[j], (double)xptr[j]);
  1012. if( a < 0 ) a += CV_PI*2;
  1013. aptr[j] = (float)(a*scale);
  1014. }
  1015. }
  1016. else
  1017. {
  1018. const double* xptr = planes[0].ptr<double>();
  1019. const double* yptr = planes[1].ptr<double>();
  1020. double* mptr = planes[2].ptr<double>();
  1021. double* aptr = planes[3].ptr<double>();
  1022. for( j = 0; j < total; j++ )
  1023. {
  1024. mptr[j] = std::sqrt(xptr[j]*xptr[j] + yptr[j]*yptr[j]);
  1025. double a = atan2(yptr[j], xptr[j]);
  1026. if( a < 0 ) a += CV_PI*2;
  1027. aptr[j] = a*scale;
  1028. }
  1029. }
  1030. }
  1031. }
  1032. } // namespace
  1033. struct CartToPolarToCartOp : public BaseElemWiseOp
  1034. {
  1035. CartToPolarToCartOp() : BaseElemWiseOp(2, FIX_ALPHA+FIX_BETA+FIX_GAMMA, 1, 1, Scalar::all(0))
  1036. {
  1037. context = 3;
  1038. angleInDegrees = true;
  1039. }
  1040. int getRandomType(RNG& rng)
  1041. {
  1042. return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_FLT, 1, 1);
  1043. }
  1044. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  1045. {
  1046. Mat mag, angle, x, y;
  1047. cv::cartToPolar(src[0], src[1], mag, angle, angleInDegrees);
  1048. cv::polarToCart(mag, angle, x, y, angleInDegrees);
  1049. Mat msrc[] = {mag, angle, x, y};
  1050. int pairs[] = {0, 0, 1, 1, 2, 2, 3, 3};
  1051. dst.create(src[0].dims, src[0].size, CV_MAKETYPE(src[0].depth(), 4));
  1052. cv::mixChannels(msrc, 4, &dst, 1, pairs, 4);
  1053. }
  1054. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  1055. {
  1056. Mat mag, angle;
  1057. reference::cartToPolar(src[0], src[1], mag, angle, angleInDegrees);
  1058. Mat msrc[] = {mag, angle, src[0], src[1]};
  1059. int pairs[] = {0, 0, 1, 1, 2, 2, 3, 3};
  1060. dst.create(src[0].dims, src[0].size, CV_MAKETYPE(src[0].depth(), 4));
  1061. cv::mixChannels(msrc, 4, &dst, 1, pairs, 4);
  1062. }
  1063. void generateScalars(int, RNG& rng)
  1064. {
  1065. angleInDegrees = rng.uniform(0, 2) != 0;
  1066. }
  1067. double getMaxErr(int)
  1068. {
  1069. return 1e-3;
  1070. }
  1071. bool angleInDegrees;
  1072. };
  1073. struct MeanOp : public BaseElemWiseOp
  1074. {
  1075. MeanOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+FIX_GAMMA+SUPPORT_MASK+SCALAR_OUTPUT, 1, 1, Scalar::all(0))
  1076. {
  1077. context = 3;
  1078. };
  1079. void op(const vector<Mat>& src, Mat& dst, const Mat& mask)
  1080. {
  1081. dst.create(1, 1, CV_64FC4);
  1082. dst.at<Scalar>(0,0) = cv::mean(src[0], mask);
  1083. }
  1084. void refop(const vector<Mat>& src, Mat& dst, const Mat& mask)
  1085. {
  1086. dst.create(1, 1, CV_64FC4);
  1087. dst.at<Scalar>(0,0) = cvtest::mean(src[0], mask);
  1088. }
  1089. double getMaxErr(int)
  1090. {
  1091. return 1e-5;
  1092. }
  1093. };
  1094. struct SumOp : public BaseElemWiseOp
  1095. {
  1096. SumOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+FIX_GAMMA+SCALAR_OUTPUT, 1, 1, Scalar::all(0))
  1097. {
  1098. context = 3;
  1099. };
  1100. void op(const vector<Mat>& src, Mat& dst, const Mat&)
  1101. {
  1102. dst.create(1, 1, CV_64FC4);
  1103. dst.at<Scalar>(0,0) = cv::sum(src[0]);
  1104. }
  1105. void refop(const vector<Mat>& src, Mat& dst, const Mat&)
  1106. {
  1107. dst.create(1, 1, CV_64FC4);
  1108. dst.at<Scalar>(0,0) = cvtest::mean(src[0])*(double)src[0].total();
  1109. }
  1110. double getMaxErr(int)
  1111. {
  1112. return 1e-5;
  1113. }
  1114. };
  1115. struct CountNonZeroOp : public BaseElemWiseOp
  1116. {
  1117. CountNonZeroOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+FIX_GAMMA+SCALAR_OUTPUT+SUPPORT_MASK, 1, 1, Scalar::all(0))
  1118. {}
  1119. int getRandomType(RNG& rng)
  1120. {
  1121. return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL, 1, 1);
  1122. }
  1123. void op(const vector<Mat>& src, Mat& dst, const Mat& mask)
  1124. {
  1125. Mat temp;
  1126. src[0].copyTo(temp);
  1127. if( !mask.empty() )
  1128. temp.setTo(Scalar::all(0), mask);
  1129. dst.create(1, 1, CV_32S);
  1130. dst.at<int>(0,0) = cv::countNonZero(temp);
  1131. }
  1132. void refop(const vector<Mat>& src, Mat& dst, const Mat& mask)
  1133. {
  1134. Mat temp;
  1135. cvtest::compare(src[0], 0, temp, CMP_NE);
  1136. if( !mask.empty() )
  1137. cvtest::set(temp, Scalar::all(0), mask);
  1138. dst.create(1, 1, CV_32S);
  1139. dst.at<int>(0,0) = saturate_cast<int>(cvtest::mean(temp)[0]/255*temp.total());
  1140. }
  1141. double getMaxErr(int)
  1142. {
  1143. return 0;
  1144. }
  1145. };
  1146. struct MeanStdDevOp : public BaseElemWiseOp
  1147. {
  1148. Scalar sqmeanRef;
  1149. int cn;
  1150. MeanStdDevOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+FIX_GAMMA+SUPPORT_MASK+SCALAR_OUTPUT, 1, 1, Scalar::all(0))
  1151. {
  1152. cn = 0;
  1153. context = 7;
  1154. };
  1155. void op(const vector<Mat>& src, Mat& dst, const Mat& mask)
  1156. {
  1157. dst.create(1, 2, CV_64FC4);
  1158. cv::meanStdDev(src[0], dst.at<Scalar>(0,0), dst.at<Scalar>(0,1), mask);
  1159. }
  1160. void refop(const vector<Mat>& src, Mat& dst, const Mat& mask)
  1161. {
  1162. Mat temp;
  1163. cvtest::convert(src[0], temp, CV_64F);
  1164. cvtest::multiply(temp, temp, temp);
  1165. Scalar mean = cvtest::mean(src[0], mask);
  1166. Scalar sqmean = cvtest::mean(temp, mask);
  1167. sqmeanRef = sqmean;
  1168. cn = temp.channels();
  1169. for( int c = 0; c < 4; c++ )
  1170. sqmean[c] = std::sqrt(std::max(sqmean[c] - mean[c]*mean[c], 0.));
  1171. dst.create(1, 2, CV_64FC4);
  1172. dst.at<Scalar>(0,0) = mean;
  1173. dst.at<Scalar>(0,1) = sqmean;
  1174. }
  1175. double getMaxErr(int)
  1176. {
  1177. CV_Assert(cn > 0);
  1178. double err = sqmeanRef[0];
  1179. for(int i = 1; i < cn; ++i)
  1180. err = std::max(err, sqmeanRef[i]);
  1181. return 3e-7 * err;
  1182. }
  1183. };
  1184. struct NormOp : public BaseElemWiseOp
  1185. {
  1186. NormOp() : BaseElemWiseOp(2, FIX_ALPHA+FIX_BETA+FIX_GAMMA+SUPPORT_MASK+SCALAR_OUTPUT, 1, 1, Scalar::all(0))
  1187. {
  1188. context = 1;
  1189. normType = 0;
  1190. };
  1191. int getRandomType(RNG& rng)
  1192. {
  1193. int type = cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL_BUT_8S, 1, 4);
  1194. for(;;)
  1195. {
  1196. normType = rng.uniform(1, 8);
  1197. if( normType == NORM_INF || normType == NORM_L1 ||
  1198. normType == NORM_L2 || normType == NORM_L2SQR ||
  1199. normType == NORM_HAMMING || normType == NORM_HAMMING2 )
  1200. break;
  1201. }
  1202. if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
  1203. {
  1204. type = CV_8U;
  1205. }
  1206. return type;
  1207. }
  1208. void op(const vector<Mat>& src, Mat& dst, const Mat& mask)
  1209. {
  1210. dst.create(1, 2, CV_64FC1);
  1211. dst.at<double>(0,0) = cv::norm(src[0], normType, mask);
  1212. dst.at<double>(0,1) = cv::norm(src[0], src[1], normType, mask);
  1213. }
  1214. void refop(const vector<Mat>& src, Mat& dst, const Mat& mask)
  1215. {
  1216. dst.create(1, 2, CV_64FC1);
  1217. dst.at<double>(0,0) = cvtest::norm(src[0], normType, mask);
  1218. dst.at<double>(0,1) = cvtest::norm(src[0], src[1], normType, mask);
  1219. }
  1220. void generateScalars(int, RNG& /*rng*/)
  1221. {
  1222. }
  1223. double getMaxErr(int)
  1224. {
  1225. return 1e-6;
  1226. }
  1227. int normType;
  1228. };
  1229. struct MinMaxLocOp : public BaseElemWiseOp
  1230. {
  1231. MinMaxLocOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+FIX_GAMMA+SUPPORT_MASK+SCALAR_OUTPUT, 1, 1, Scalar::all(0))
  1232. {
  1233. context = ARITHM_MAX_NDIMS*2 + 2;
  1234. };
  1235. int getRandomType(RNG& rng)
  1236. {
  1237. return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL_BUT_8S, 1, 1);
  1238. }
  1239. void saveOutput(const vector<int>& minidx, const vector<int>& maxidx,
  1240. double minval, double maxval, Mat& dst)
  1241. {
  1242. int i, ndims = (int)minidx.size();
  1243. dst.create(1, ndims*2 + 2, CV_64FC1);
  1244. for( i = 0; i < ndims; i++ )
  1245. {
  1246. dst.at<double>(0,i) = minidx[i];
  1247. dst.at<double>(0,i+ndims) = maxidx[i];
  1248. }
  1249. dst.at<double>(0,ndims*2) = minval;
  1250. dst.at<double>(0,ndims*2+1) = maxval;
  1251. }
  1252. void op(const vector<Mat>& src, Mat& dst, const Mat& mask)
  1253. {
  1254. int ndims = src[0].dims;
  1255. vector<int> minidx(ndims), maxidx(ndims);
  1256. double minval=0, maxval=0;
  1257. cv::minMaxIdx(src[0], &minval, &maxval, &minidx[0], &maxidx[0], mask);
  1258. saveOutput(minidx, maxidx, minval, maxval, dst);
  1259. }
  1260. void refop(const vector<Mat>& src, Mat& dst, const Mat& mask)
  1261. {
  1262. int ndims=src[0].dims;
  1263. vector<int> minidx(ndims), maxidx(ndims);
  1264. double minval=0, maxval=0;
  1265. cvtest::minMaxLoc(src[0], &minval, &maxval, &minidx, &maxidx, mask);
  1266. saveOutput(minidx, maxidx, minval, maxval, dst);
  1267. }
  1268. double getMaxErr(int)
  1269. {
  1270. return 0;
  1271. }
  1272. };
  1273. struct reduceArgMinMaxOp : public BaseElemWiseOp
  1274. {
  1275. reduceArgMinMaxOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+FIX_GAMMA, 1, 1, Scalar::all(0)),
  1276. isLast(false), isMax(false), axis(0)
  1277. {
  1278. context = ARITHM_MAX_NDIMS*2 + 2;
  1279. };
  1280. int getRandomType(RNG& rng) override
  1281. {
  1282. return cvtest::randomType(rng, _OutputArray::DEPTH_MASK_ALL_BUT_8S, 1, 1);
  1283. }
  1284. void getRandomSize(RNG& rng, vector<int>& size) override
  1285. {
  1286. cvtest::randomSize(rng, 2, ARITHM_MAX_NDIMS, 6, size);
  1287. }
  1288. void generateScalars(int depth, RNG& rng) override
  1289. {
  1290. BaseElemWiseOp::generateScalars(depth, rng);
  1291. isLast = (randInt(rng) % 2 == 0);
  1292. isMax = (randInt(rng) % 2 == 0);
  1293. axis = randInt(rng);
  1294. }
  1295. int getAxis(const Mat& src) const
  1296. {
  1297. int dims = src.dims;
  1298. return static_cast<int>(axis % (2 * dims)) - dims; // [-dims; dims - 1]
  1299. }
  1300. void op(const vector<Mat>& src, Mat& dst, const Mat&) override
  1301. {
  1302. const Mat& inp = src[0];
  1303. const int axis_ = getAxis(inp);
  1304. if (isMax)
  1305. {
  1306. cv::reduceArgMax(inp, dst, axis_, isLast);
  1307. }
  1308. else
  1309. {
  1310. cv::reduceArgMin(inp, dst, axis_, isLast);
  1311. }
  1312. }
  1313. void refop(const vector<Mat>& src, Mat& dst, const Mat&) override
  1314. {
  1315. const Mat& inp = src[0];
  1316. const int axis_ = getAxis(inp);
  1317. if (!isLast && !isMax)
  1318. {
  1319. cvtest::MinMaxReducer<std::less>::reduce(inp, dst, axis_);
  1320. }
  1321. else if (!isLast && isMax)
  1322. {
  1323. cvtest::MinMaxReducer<std::greater>::reduce(inp, dst, axis_);
  1324. }
  1325. else if (isLast && !isMax)
  1326. {
  1327. cvtest::MinMaxReducer<std::less_equal>::reduce(inp, dst, axis_);
  1328. }
  1329. else
  1330. {
  1331. cvtest::MinMaxReducer<std::greater_equal>::reduce(inp, dst, axis_);
  1332. }
  1333. }
  1334. bool isLast;
  1335. bool isMax;
  1336. uint32_t axis;
  1337. };
  1338. typedef Ptr<BaseElemWiseOp> ElemWiseOpPtr;
  1339. class ElemWiseTest : public ::testing::TestWithParam<ElemWiseOpPtr> {};
  1340. TEST_P(ElemWiseTest, accuracy)
  1341. {
  1342. ElemWiseOpPtr op = GetParam();
  1343. int testIdx = 0;
  1344. RNG rng((uint64)ARITHM_RNG_SEED);
  1345. for( testIdx = 0; testIdx < ARITHM_NTESTS; testIdx++ )
  1346. {
  1347. vector<int> size;
  1348. op->getRandomSize(rng, size);
  1349. int type = op->getRandomType(rng);
  1350. int depth = CV_MAT_DEPTH(type);
  1351. bool haveMask = ((op->flags & BaseElemWiseOp::SUPPORT_MASK) != 0
  1352. || (op->flags & BaseElemWiseOp::SUPPORT_MULTICHANNELMASK) != 0) && rng.uniform(0, 4) == 0;
  1353. double minval=0, maxval=0;
  1354. op->getValueRange(depth, minval, maxval);
  1355. int i, ninputs = op->ninputs;
  1356. vector<Mat> src(ninputs);
  1357. for( i = 0; i < ninputs; i++ )
  1358. src[i] = cvtest::randomMat(rng, size, type, minval, maxval, true);
  1359. Mat dst0, dst, mask;
  1360. if( haveMask ) {
  1361. bool multiChannelMask = (op->flags & BaseElemWiseOp::SUPPORT_MULTICHANNELMASK) != 0
  1362. && rng.uniform(0, 2) == 0;
  1363. int masktype = CV_8UC(multiChannelMask ? CV_MAT_CN(type) : 1);
  1364. mask = cvtest::randomMat(rng, size, masktype, 0, 2, true);
  1365. }
  1366. if( (haveMask || ninputs == 0) && !(op->flags & BaseElemWiseOp::SCALAR_OUTPUT))
  1367. {
  1368. dst0 = cvtest::randomMat(rng, size, type, minval, maxval, false);
  1369. dst = cvtest::randomMat(rng, size, type, minval, maxval, true);
  1370. cvtest::copy(dst, dst0);
  1371. }
  1372. op->generateScalars(depth, rng);
  1373. op->refop(src, dst0, mask);
  1374. op->op(src, dst, mask);
  1375. double maxErr = op->getMaxErr(depth);
  1376. ASSERT_PRED_FORMAT2(cvtest::MatComparator(maxErr, op->context), dst0, dst) << "\nsrc[0] ~ " <<
  1377. cvtest::MatInfo(!src.empty() ? src[0] : Mat()) << "\ntestCase #" << testIdx << "\n";
  1378. }
  1379. }
  1380. INSTANTIATE_TEST_CASE_P(Core_Copy, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new CopyOp)));
  1381. INSTANTIATE_TEST_CASE_P(Core_Set, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new SetOp)));
  1382. INSTANTIATE_TEST_CASE_P(Core_SetZero, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new SetZeroOp)));
  1383. INSTANTIATE_TEST_CASE_P(Core_ConvertScale, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new ConvertScaleOp)));
  1384. INSTANTIATE_TEST_CASE_P(Core_ConvertScaleFp16, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new ConvertScaleFp16Op)));
  1385. INSTANTIATE_TEST_CASE_P(Core_ConvertScaleAbs, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new ConvertScaleAbsOp)));
  1386. INSTANTIATE_TEST_CASE_P(Core_Add, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new AddOp)));
  1387. INSTANTIATE_TEST_CASE_P(Core_Sub, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new SubOp)));
  1388. INSTANTIATE_TEST_CASE_P(Core_AddS, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new AddSOp)));
  1389. INSTANTIATE_TEST_CASE_P(Core_SubRS, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new SubRSOp)));
  1390. INSTANTIATE_TEST_CASE_P(Core_ScaleAdd, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new ScaleAddOp)));
  1391. INSTANTIATE_TEST_CASE_P(Core_AddWeighted, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new AddWeightedOp)));
  1392. INSTANTIATE_TEST_CASE_P(Core_AbsDiff, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new AbsDiffOp)));
  1393. INSTANTIATE_TEST_CASE_P(Core_AbsDiffS, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new AbsDiffSOp)));
  1394. INSTANTIATE_TEST_CASE_P(Core_And, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new LogicOp('&'))));
  1395. INSTANTIATE_TEST_CASE_P(Core_AndS, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new LogicSOp('&'))));
  1396. INSTANTIATE_TEST_CASE_P(Core_Or, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new LogicOp('|'))));
  1397. INSTANTIATE_TEST_CASE_P(Core_OrS, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new LogicSOp('|'))));
  1398. INSTANTIATE_TEST_CASE_P(Core_Xor, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new LogicOp('^'))));
  1399. INSTANTIATE_TEST_CASE_P(Core_XorS, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new LogicSOp('^'))));
  1400. INSTANTIATE_TEST_CASE_P(Core_Not, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new LogicSOp('~'))));
  1401. INSTANTIATE_TEST_CASE_P(Core_Max, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new MaxOp)));
  1402. INSTANTIATE_TEST_CASE_P(Core_MaxS, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new MaxSOp)));
  1403. INSTANTIATE_TEST_CASE_P(Core_Min, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new MinOp)));
  1404. INSTANTIATE_TEST_CASE_P(Core_MinS, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new MinSOp)));
  1405. INSTANTIATE_TEST_CASE_P(Core_Mul, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new MulOp)));
  1406. INSTANTIATE_TEST_CASE_P(Core_Div, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new DivOp)));
  1407. INSTANTIATE_TEST_CASE_P(Core_Recip, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new RecipOp)));
  1408. INSTANTIATE_TEST_CASE_P(Core_Cmp, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new CmpOp)));
  1409. INSTANTIATE_TEST_CASE_P(Core_CmpS, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new CmpSOp)));
  1410. INSTANTIATE_TEST_CASE_P(Core_InRangeS, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new InRangeSOp)));
  1411. INSTANTIATE_TEST_CASE_P(Core_InRange, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new InRangeOp)));
  1412. INSTANTIATE_TEST_CASE_P(Core_Flip, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new FlipOp)));
  1413. INSTANTIATE_TEST_CASE_P(Core_Transpose, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new TransposeOp)));
  1414. INSTANTIATE_TEST_CASE_P(Core_SetIdentity, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new SetIdentityOp)));
  1415. INSTANTIATE_TEST_CASE_P(Core_Exp, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new ExpOp)));
  1416. INSTANTIATE_TEST_CASE_P(Core_Log, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new LogOp)));
  1417. INSTANTIATE_TEST_CASE_P(Core_CountNonZero, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new CountNonZeroOp)));
  1418. INSTANTIATE_TEST_CASE_P(Core_Mean, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new MeanOp)));
  1419. INSTANTIATE_TEST_CASE_P(Core_MeanStdDev, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new MeanStdDevOp)));
  1420. INSTANTIATE_TEST_CASE_P(Core_Sum, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new SumOp)));
  1421. INSTANTIATE_TEST_CASE_P(Core_Norm, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new NormOp)));
  1422. INSTANTIATE_TEST_CASE_P(Core_MinMaxLoc, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new MinMaxLocOp)));
  1423. INSTANTIATE_TEST_CASE_P(Core_reduceArgMinMax, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new reduceArgMinMaxOp)));
  1424. INSTANTIATE_TEST_CASE_P(Core_CartToPolarToCart, ElemWiseTest, ::testing::Values(ElemWiseOpPtr(new CartToPolarToCartOp)));
  1425. TEST(Core_ArithmMask, uninitialized)
  1426. {
  1427. RNG& rng = theRNG();
  1428. const int MAX_DIM=3;
  1429. int sizes[MAX_DIM];
  1430. for( int iter = 0; iter < 100; iter++ )
  1431. {
  1432. int dims = rng.uniform(1, MAX_DIM+1);
  1433. int depth = rng.uniform(CV_8U, CV_64F+1);
  1434. int cn = rng.uniform(1, 6);
  1435. int type = CV_MAKETYPE(depth, cn);
  1436. int op = rng.uniform(0, depth < CV_32F ? 5 : 2); // don't run binary operations between floating-point values
  1437. int depth1 = op <= 1 ? CV_64F : depth;
  1438. for (int k = 0; k < MAX_DIM; k++)
  1439. {
  1440. sizes[k] = k < dims ? rng.uniform(1, 30) : 0;
  1441. }
  1442. SCOPED_TRACE(cv::format("iter=%d dims=%d depth=%d cn=%d type=%d op=%d depth1=%d dims=[%d; %d; %d]",
  1443. iter, dims, depth, cn, type, op, depth1, sizes[0], sizes[1], sizes[2]));
  1444. Mat a(dims, sizes, type), a1;
  1445. Mat b(dims, sizes, type), b1;
  1446. Mat mask(dims, sizes, CV_8U);
  1447. Mat mask1;
  1448. Mat c, d;
  1449. rng.fill(a, RNG::UNIFORM, 0, 100);
  1450. rng.fill(b, RNG::UNIFORM, 0, 100);
  1451. // [-2,2) range means that the each generated random number
  1452. // will be one of -2, -1, 0, 1. Saturated to [0,255], it will become
  1453. // 0, 0, 0, 1 => the mask will be filled by ~25%.
  1454. rng.fill(mask, RNG::UNIFORM, -2, 2);
  1455. a.convertTo(a1, depth1);
  1456. b.convertTo(b1, depth1);
  1457. // invert the mask
  1458. cv::compare(mask, 0, mask1, CMP_EQ);
  1459. a1.setTo(0, mask1);
  1460. b1.setTo(0, mask1);
  1461. if( op == 0 )
  1462. {
  1463. cv::add(a, b, c, mask);
  1464. cv::add(a1, b1, d);
  1465. }
  1466. else if( op == 1 )
  1467. {
  1468. cv::subtract(a, b, c, mask);
  1469. cv::subtract(a1, b1, d);
  1470. }
  1471. else if( op == 2 )
  1472. {
  1473. cv::bitwise_and(a, b, c, mask);
  1474. cv::bitwise_and(a1, b1, d);
  1475. }
  1476. else if( op == 3 )
  1477. {
  1478. cv::bitwise_or(a, b, c, mask);
  1479. cv::bitwise_or(a1, b1, d);
  1480. }
  1481. else if( op == 4 )
  1482. {
  1483. cv::bitwise_xor(a, b, c, mask);
  1484. cv::bitwise_xor(a1, b1, d);
  1485. }
  1486. Mat d1;
  1487. d.convertTo(d1, depth);
  1488. EXPECT_LE(cvtest::norm(c, d1, CV_C), DBL_EPSILON);
  1489. }
  1490. Mat_<uchar> tmpSrc(100,100);
  1491. tmpSrc = 124;
  1492. Mat_<uchar> tmpMask(100,100);
  1493. tmpMask = 255;
  1494. Mat_<uchar> tmpDst(100,100);
  1495. tmpDst = 2;
  1496. tmpSrc.copyTo(tmpDst,tmpMask);
  1497. }
  1498. TEST(Multiply, FloatingPointRounding)
  1499. {
  1500. cv::Mat src(1, 1, CV_8UC1, cv::Scalar::all(110)), dst;
  1501. cv::Scalar s(147.286359696927, 1, 1 ,1);
  1502. cv::multiply(src, s, dst, 1, CV_16U);
  1503. // with CV_32F this produce result 16202
  1504. ASSERT_EQ(dst.at<ushort>(0,0), 16201);
  1505. }
  1506. TEST(Core_Add, AddToColumnWhen3Rows)
  1507. {
  1508. cv::Mat m1 = (cv::Mat_<double>(3, 2) << 1, 2, 3, 4, 5, 6);
  1509. m1.col(1) += 10;
  1510. cv::Mat m2 = (cv::Mat_<double>(3, 2) << 1, 12, 3, 14, 5, 16);
  1511. ASSERT_EQ(0, countNonZero(m1 - m2));
  1512. }
  1513. TEST(Core_Add, AddToColumnWhen4Rows)
  1514. {
  1515. cv::Mat m1 = (cv::Mat_<double>(4, 2) << 1, 2, 3, 4, 5, 6, 7, 8);
  1516. m1.col(1) += 10;
  1517. cv::Mat m2 = (cv::Mat_<double>(4, 2) << 1, 12, 3, 14, 5, 16, 7, 18);
  1518. ASSERT_EQ(0, countNonZero(m1 - m2));
  1519. }
  1520. TEST(Core_round, CvRound)
  1521. {
  1522. ASSERT_EQ(2, cvRound(2.0));
  1523. ASSERT_EQ(2, cvRound(2.1));
  1524. ASSERT_EQ(-2, cvRound(-2.1));
  1525. ASSERT_EQ(3, cvRound(2.8));
  1526. ASSERT_EQ(-3, cvRound(-2.8));
  1527. ASSERT_EQ(2, cvRound(2.5));
  1528. ASSERT_EQ(4, cvRound(3.5));
  1529. ASSERT_EQ(-2, cvRound(-2.5));
  1530. ASSERT_EQ(-4, cvRound(-3.5));
  1531. }
  1532. typedef testing::TestWithParam<Size> Mul1;
  1533. TEST_P(Mul1, One)
  1534. {
  1535. Size size = GetParam();
  1536. cv::Mat src(size, CV_32FC1, cv::Scalar::all(2)), dst,
  1537. ref_dst(size, CV_32FC1, cv::Scalar::all(6));
  1538. cv::multiply(3, src, dst);
  1539. ASSERT_EQ(0, cvtest::norm(dst, ref_dst, cv::NORM_INF));
  1540. }
  1541. INSTANTIATE_TEST_CASE_P(Arithm, Mul1, testing::Values(Size(2, 2), Size(1, 1)));
  1542. class SubtractOutputMatNotEmpty : public testing::TestWithParam< tuple<cv::Size, perf::MatType, perf::MatDepth, bool> >
  1543. {
  1544. public:
  1545. cv::Size size;
  1546. int src_type;
  1547. int dst_depth;
  1548. bool fixed;
  1549. void SetUp()
  1550. {
  1551. size = get<0>(GetParam());
  1552. src_type = get<1>(GetParam());
  1553. dst_depth = get<2>(GetParam());
  1554. fixed = get<3>(GetParam());
  1555. }
  1556. };
  1557. TEST_P(SubtractOutputMatNotEmpty, Mat_Mat)
  1558. {
  1559. cv::Mat src1(size, src_type, cv::Scalar::all(16));
  1560. cv::Mat src2(size, src_type, cv::Scalar::all(16));
  1561. cv::Mat dst;
  1562. if (!fixed)
  1563. {
  1564. cv::subtract(src1, src2, dst, cv::noArray(), dst_depth);
  1565. }
  1566. else
  1567. {
  1568. const cv::Mat fixed_dst(size, CV_MAKE_TYPE((dst_depth > 0 ? dst_depth : CV_16S), src1.channels()));
  1569. cv::subtract(src1, src2, fixed_dst, cv::noArray(), dst_depth);
  1570. dst = fixed_dst;
  1571. dst_depth = fixed_dst.depth();
  1572. }
  1573. ASSERT_FALSE(dst.empty());
  1574. ASSERT_EQ(src1.size(), dst.size());
  1575. ASSERT_EQ(dst_depth > 0 ? dst_depth : src1.depth(), dst.depth());
  1576. ASSERT_EQ(0, cv::countNonZero(dst.reshape(1)));
  1577. }
  1578. TEST_P(SubtractOutputMatNotEmpty, Mat_Mat_WithMask)
  1579. {
  1580. cv::Mat src1(size, src_type, cv::Scalar::all(16));
  1581. cv::Mat src2(size, src_type, cv::Scalar::all(16));
  1582. cv::Mat mask(size, CV_8UC1, cv::Scalar::all(255));
  1583. cv::Mat dst;
  1584. if (!fixed)
  1585. {
  1586. cv::subtract(src1, src2, dst, mask, dst_depth);
  1587. }
  1588. else
  1589. {
  1590. const cv::Mat fixed_dst(size, CV_MAKE_TYPE((dst_depth > 0 ? dst_depth : CV_16S), src1.channels()));
  1591. cv::subtract(src1, src2, fixed_dst, mask, dst_depth);
  1592. dst = fixed_dst;
  1593. dst_depth = fixed_dst.depth();
  1594. }
  1595. ASSERT_FALSE(dst.empty());
  1596. ASSERT_EQ(src1.size(), dst.size());
  1597. ASSERT_EQ(dst_depth > 0 ? dst_depth : src1.depth(), dst.depth());
  1598. ASSERT_EQ(0, cv::countNonZero(dst.reshape(1)));
  1599. }
  1600. TEST_P(SubtractOutputMatNotEmpty, Mat_Mat_Expr)
  1601. {
  1602. cv::Mat src1(size, src_type, cv::Scalar::all(16));
  1603. cv::Mat src2(size, src_type, cv::Scalar::all(16));
  1604. cv::Mat dst = src1 - src2;
  1605. ASSERT_FALSE(dst.empty());
  1606. ASSERT_EQ(src1.size(), dst.size());
  1607. ASSERT_EQ(src1.depth(), dst.depth());
  1608. ASSERT_EQ(0, cv::countNonZero(dst.reshape(1)));
  1609. }
  1610. TEST_P(SubtractOutputMatNotEmpty, Mat_Scalar)
  1611. {
  1612. cv::Mat src(size, src_type, cv::Scalar::all(16));
  1613. cv::Mat dst;
  1614. if (!fixed)
  1615. {
  1616. cv::subtract(src, cv::Scalar::all(16), dst, cv::noArray(), dst_depth);
  1617. }
  1618. else
  1619. {
  1620. const cv::Mat fixed_dst(size, CV_MAKE_TYPE((dst_depth > 0 ? dst_depth : CV_16S), src.channels()));
  1621. cv::subtract(src, cv::Scalar::all(16), fixed_dst, cv::noArray(), dst_depth);
  1622. dst = fixed_dst;
  1623. dst_depth = fixed_dst.depth();
  1624. }
  1625. ASSERT_FALSE(dst.empty());
  1626. ASSERT_EQ(src.size(), dst.size());
  1627. ASSERT_EQ(dst_depth > 0 ? dst_depth : src.depth(), dst.depth());
  1628. ASSERT_EQ(0, cv::countNonZero(dst.reshape(1)));
  1629. }
  1630. TEST_P(SubtractOutputMatNotEmpty, Mat_Scalar_WithMask)
  1631. {
  1632. cv::Mat src(size, src_type, cv::Scalar::all(16));
  1633. cv::Mat mask(size, CV_8UC1, cv::Scalar::all(255));
  1634. cv::Mat dst;
  1635. if (!fixed)
  1636. {
  1637. cv::subtract(src, cv::Scalar::all(16), dst, mask, dst_depth);
  1638. }
  1639. else
  1640. {
  1641. const cv::Mat fixed_dst(size, CV_MAKE_TYPE((dst_depth > 0 ? dst_depth : CV_16S), src.channels()));
  1642. cv::subtract(src, cv::Scalar::all(16), fixed_dst, mask, dst_depth);
  1643. dst = fixed_dst;
  1644. dst_depth = fixed_dst.depth();
  1645. }
  1646. ASSERT_FALSE(dst.empty());
  1647. ASSERT_EQ(src.size(), dst.size());
  1648. ASSERT_EQ(dst_depth > 0 ? dst_depth : src.depth(), dst.depth());
  1649. ASSERT_EQ(0, cv::countNonZero(dst.reshape(1)));
  1650. }
  1651. TEST_P(SubtractOutputMatNotEmpty, Scalar_Mat)
  1652. {
  1653. cv::Mat src(size, src_type, cv::Scalar::all(16));
  1654. cv::Mat dst;
  1655. if (!fixed)
  1656. {
  1657. cv::subtract(cv::Scalar::all(16), src, dst, cv::noArray(), dst_depth);
  1658. }
  1659. else
  1660. {
  1661. const cv::Mat fixed_dst(size, CV_MAKE_TYPE((dst_depth > 0 ? dst_depth : CV_16S), src.channels()));
  1662. cv::subtract(cv::Scalar::all(16), src, fixed_dst, cv::noArray(), dst_depth);
  1663. dst = fixed_dst;
  1664. dst_depth = fixed_dst.depth();
  1665. }
  1666. ASSERT_FALSE(dst.empty());
  1667. ASSERT_EQ(src.size(), dst.size());
  1668. ASSERT_EQ(dst_depth > 0 ? dst_depth : src.depth(), dst.depth());
  1669. ASSERT_EQ(0, cv::countNonZero(dst.reshape(1)));
  1670. }
  1671. TEST_P(SubtractOutputMatNotEmpty, Scalar_Mat_WithMask)
  1672. {
  1673. cv::Mat src(size, src_type, cv::Scalar::all(16));
  1674. cv::Mat mask(size, CV_8UC1, cv::Scalar::all(255));
  1675. cv::Mat dst;
  1676. if (!fixed)
  1677. {
  1678. cv::subtract(cv::Scalar::all(16), src, dst, mask, dst_depth);
  1679. }
  1680. else
  1681. {
  1682. const cv::Mat fixed_dst(size, CV_MAKE_TYPE((dst_depth > 0 ? dst_depth : CV_16S), src.channels()));
  1683. cv::subtract(cv::Scalar::all(16), src, fixed_dst, mask, dst_depth);
  1684. dst = fixed_dst;
  1685. dst_depth = fixed_dst.depth();
  1686. }
  1687. ASSERT_FALSE(dst.empty());
  1688. ASSERT_EQ(src.size(), dst.size());
  1689. ASSERT_EQ(dst_depth > 0 ? dst_depth : src.depth(), dst.depth());
  1690. ASSERT_EQ(0, cv::countNonZero(dst.reshape(1)));
  1691. }
  1692. TEST_P(SubtractOutputMatNotEmpty, Mat_Mat_3d)
  1693. {
  1694. int dims[] = {5, size.height, size.width};
  1695. cv::Mat src1(3, dims, src_type, cv::Scalar::all(16));
  1696. cv::Mat src2(3, dims, src_type, cv::Scalar::all(16));
  1697. cv::Mat dst;
  1698. if (!fixed)
  1699. {
  1700. cv::subtract(src1, src2, dst, cv::noArray(), dst_depth);
  1701. }
  1702. else
  1703. {
  1704. const cv::Mat fixed_dst(3, dims, CV_MAKE_TYPE((dst_depth > 0 ? dst_depth : CV_16S), src1.channels()));
  1705. cv::subtract(src1, src2, fixed_dst, cv::noArray(), dst_depth);
  1706. dst = fixed_dst;
  1707. dst_depth = fixed_dst.depth();
  1708. }
  1709. ASSERT_FALSE(dst.empty());
  1710. ASSERT_EQ(src1.dims, dst.dims);
  1711. ASSERT_EQ(src1.size, dst.size);
  1712. ASSERT_EQ(dst_depth > 0 ? dst_depth : src1.depth(), dst.depth());
  1713. ASSERT_EQ(0, cv::countNonZero(dst.reshape(1)));
  1714. }
  1715. INSTANTIATE_TEST_CASE_P(Arithm, SubtractOutputMatNotEmpty, testing::Combine(
  1716. testing::Values(cv::Size(16, 16), cv::Size(13, 13), cv::Size(16, 13), cv::Size(13, 16)),
  1717. testing::Values(perf::MatType(CV_8UC1), CV_8UC3, CV_8UC4, CV_16SC1, CV_16SC3),
  1718. testing::Values(-1, CV_16S, CV_32S, CV_32F),
  1719. testing::Bool()));
  1720. TEST(Core_FindNonZero, regression)
  1721. {
  1722. Mat img(10, 10, CV_8U, Scalar::all(0));
  1723. vector<Point> pts, pts2(5);
  1724. findNonZero(img, pts);
  1725. findNonZero(img, pts2);
  1726. ASSERT_TRUE(pts.empty() && pts2.empty());
  1727. RNG rng((uint64)-1);
  1728. size_t nz = 0;
  1729. for( int i = 0; i < 10; i++ )
  1730. {
  1731. int idx = rng.uniform(0, img.rows*img.cols);
  1732. if( !img.data[idx] ) nz++;
  1733. img.data[idx] = (uchar)rng.uniform(1, 256);
  1734. }
  1735. findNonZero(img, pts);
  1736. ASSERT_TRUE(pts.size() == nz);
  1737. img.convertTo( img, CV_8S );
  1738. pts.clear();
  1739. findNonZero(img, pts);
  1740. ASSERT_TRUE(pts.size() == nz);
  1741. img.convertTo( img, CV_16U );
  1742. pts.resize(pts.size()*2);
  1743. findNonZero(img, pts);
  1744. ASSERT_TRUE(pts.size() == nz);
  1745. img.convertTo( img, CV_16S );
  1746. pts.resize(pts.size()*3);
  1747. findNonZero(img, pts);
  1748. ASSERT_TRUE(pts.size() == nz);
  1749. img.convertTo( img, CV_32S );
  1750. pts.resize(pts.size()*4);
  1751. findNonZero(img, pts);
  1752. ASSERT_TRUE(pts.size() == nz);
  1753. img.convertTo( img, CV_32F );
  1754. pts.resize(pts.size()*5);
  1755. findNonZero(img, pts);
  1756. ASSERT_TRUE(pts.size() == nz);
  1757. img.convertTo( img, CV_64F );
  1758. pts.clear();
  1759. findNonZero(img, pts);
  1760. ASSERT_TRUE(pts.size() == nz);
  1761. }
  1762. TEST(Core_BoolVector, support)
  1763. {
  1764. std::vector<bool> test;
  1765. int i, n = 205;
  1766. int nz = 0;
  1767. test.resize(n);
  1768. for( i = 0; i < n; i++ )
  1769. {
  1770. test[i] = theRNG().uniform(0, 2) != 0;
  1771. nz += (int)test[i];
  1772. }
  1773. ASSERT_EQ( nz, countNonZero(test) );
  1774. ASSERT_FLOAT_EQ((float)nz/n, (float)(cv::mean(test)[0]));
  1775. }
  1776. TEST(MinMaxLoc, Mat_UcharMax_Without_Loc)
  1777. {
  1778. Mat_<uchar> mat(50, 50);
  1779. uchar iMaxVal = std::numeric_limits<uchar>::max();
  1780. mat.setTo(iMaxVal);
  1781. double min, max;
  1782. Point minLoc, maxLoc;
  1783. minMaxLoc(mat, &min, &max, &minLoc, &maxLoc, Mat());
  1784. ASSERT_EQ(iMaxVal, min);
  1785. ASSERT_EQ(iMaxVal, max);
  1786. ASSERT_EQ(Point(0, 0), minLoc);
  1787. ASSERT_EQ(Point(0, 0), maxLoc);
  1788. }
  1789. TEST(MinMaxLoc, Mat_IntMax_Without_Mask)
  1790. {
  1791. Mat_<int> mat(50, 50);
  1792. int iMaxVal = std::numeric_limits<int>::max();
  1793. mat.setTo(iMaxVal);
  1794. double min, max;
  1795. Point minLoc, maxLoc;
  1796. minMaxLoc(mat, &min, &max, &minLoc, &maxLoc, Mat());
  1797. ASSERT_EQ(iMaxVal, min);
  1798. ASSERT_EQ(iMaxVal, max);
  1799. ASSERT_EQ(Point(0, 0), minLoc);
  1800. ASSERT_EQ(Point(0, 0), maxLoc);
  1801. }
  1802. TEST(Normalize, regression_5876_inplace_change_type)
  1803. {
  1804. double initial_values[] = {1, 2, 5, 4, 3};
  1805. float result_values[] = {0, 0.25, 1, 0.75, 0.5};
  1806. Mat m(Size(5, 1), CV_64FC1, initial_values);
  1807. Mat result(Size(5, 1), CV_32FC1, result_values);
  1808. normalize(m, m, 1, 0, NORM_MINMAX, CV_32F);
  1809. EXPECT_EQ(0, cvtest::norm(m, result, NORM_INF));
  1810. }
  1811. TEST(Normalize, regression_6125)
  1812. {
  1813. float initial_values[] = {
  1814. 1888, 1692, 369, 263, 199,
  1815. 280, 326, 129, 143, 126,
  1816. 233, 221, 130, 126, 150,
  1817. 249, 575, 574, 63, 12
  1818. };
  1819. Mat src(Size(20, 1), CV_32F, initial_values);
  1820. float min = 0., max = 400.;
  1821. normalize(src, src, 0, 400, NORM_MINMAX, CV_32F);
  1822. for(int i = 0; i < 20; i++)
  1823. {
  1824. EXPECT_GE(src.at<float>(i), min) << "Value should be >= 0";
  1825. EXPECT_LE(src.at<float>(i), max) << "Value should be <= 400";
  1826. }
  1827. }
  1828. TEST(MinMaxLoc, regression_4955_nans)
  1829. {
  1830. cv::Mat one_mat(2, 2, CV_32F, cv::Scalar(1));
  1831. cv::minMaxLoc(one_mat, NULL, NULL, NULL, NULL);
  1832. cv::Mat nan_mat(2, 2, CV_32F, cv::Scalar(std::numeric_limits<float>::quiet_NaN()));
  1833. cv::minMaxLoc(nan_mat, NULL, NULL, NULL, NULL);
  1834. }
  1835. TEST(Subtract, scalarc1_matc3)
  1836. {
  1837. int scalar = 255;
  1838. cv::Mat srcImage(5, 5, CV_8UC3, cv::Scalar::all(5)), destImage;
  1839. cv::subtract(scalar, srcImage, destImage);
  1840. ASSERT_EQ(0, cv::norm(cv::Mat(5, 5, CV_8UC3, cv::Scalar::all(250)), destImage, cv::NORM_INF));
  1841. }
  1842. TEST(Subtract, scalarc4_matc4)
  1843. {
  1844. cv::Scalar sc(255, 255, 255, 255);
  1845. cv::Mat srcImage(5, 5, CV_8UC4, cv::Scalar::all(5)), destImage;
  1846. cv::subtract(sc, srcImage, destImage);
  1847. ASSERT_EQ(0, cv::norm(cv::Mat(5, 5, CV_8UC4, cv::Scalar::all(250)), destImage, cv::NORM_INF));
  1848. }
  1849. TEST(Compare, empty)
  1850. {
  1851. cv::Mat temp, dst1, dst2;
  1852. EXPECT_NO_THROW(cv::compare(temp, temp, dst1, cv::CMP_EQ));
  1853. EXPECT_TRUE(dst1.empty());
  1854. EXPECT_THROW(dst2 = temp > 5, cv::Exception);
  1855. }
  1856. TEST(Compare, regression_8999)
  1857. {
  1858. Mat_<double> A(4,1); A << 1, 3, 2, 4;
  1859. Mat_<double> B(1,1); B << 2;
  1860. Mat C;
  1861. EXPECT_THROW(cv::compare(A, B, C, CMP_LT), cv::Exception);
  1862. }
  1863. TEST(Compare, regression_16F_do_not_crash)
  1864. {
  1865. cv::Mat mat1(2, 2, CV_16F, cv::Scalar(1));
  1866. cv::Mat mat2(2, 2, CV_16F, cv::Scalar(2));
  1867. cv::Mat dst;
  1868. EXPECT_THROW(cv::compare(mat1, mat2, dst, cv::CMP_EQ), cv::Exception);
  1869. }
  1870. TEST(Core_minMaxIdx, regression_9207_1)
  1871. {
  1872. const int rows = 4;
  1873. const int cols = 3;
  1874. uchar mask_[rows*cols] = {
  1875. 255, 255, 255,
  1876. 255, 0, 255,
  1877. 0, 255, 255,
  1878. 0, 0, 255
  1879. };
  1880. uchar src_[rows*cols] = {
  1881. 1, 1, 1,
  1882. 1, 1, 1,
  1883. 2, 1, 1,
  1884. 2, 2, 1
  1885. };
  1886. Mat mask(Size(cols, rows), CV_8UC1, mask_);
  1887. Mat src(Size(cols, rows), CV_8UC1, src_);
  1888. double minVal = -0.0, maxVal = -0.0;
  1889. int minIdx[2] = { -2, -2 }, maxIdx[2] = { -2, -2 };
  1890. cv::minMaxIdx(src, &minVal, &maxVal, minIdx, maxIdx, mask);
  1891. EXPECT_EQ(0, minIdx[0]);
  1892. EXPECT_EQ(0, minIdx[1]);
  1893. EXPECT_EQ(0, maxIdx[0]);
  1894. EXPECT_EQ(0, maxIdx[1]);
  1895. }
  1896. TEST(Core_minMaxIdx, regression_9207_2)
  1897. {
  1898. const int rows = 13;
  1899. const int cols = 15;
  1900. uchar mask_[rows*cols] = {
  1901. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255,
  1902. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255,
  1903. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255,
  1904. 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255,
  1905. 255, 0, 0, 0, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 255,
  1906. 255, 0, 0, 0, 0, 0, 255, 0, 0, 0, 0, 0, 0, 255, 255,
  1907. 255, 0, 0, 0, 0, 0, 0, 255, 255, 0, 0, 255, 255, 255, 0,
  1908. 255, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 255, 0,
  1909. 255, 0, 0, 0, 0, 0, 0, 255, 255, 0, 0, 0, 255, 255, 0,
  1910. 255, 0, 0, 0, 0, 0, 255, 0, 0, 0, 0, 0, 0, 255, 0,
  1911. 255, 0, 0, 0, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  1912. 0, 255, 0, 0, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  1913. 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  1914. };
  1915. uchar src_[15*13] = {
  1916. 5, 5, 5, 5, 5, 6, 5, 2, 0, 4, 6, 6, 4, 1, 0,
  1917. 6, 5, 4, 4, 5, 6, 6, 5, 2, 0, 4, 6, 5, 2, 0,
  1918. 3, 2, 1, 1, 2, 4, 6, 6, 4, 2, 3, 4, 4, 2, 0,
  1919. 1, 0, 0, 0, 0, 1, 4, 5, 4, 4, 4, 4, 3, 2, 0,
  1920. 0, 0, 0, 0, 0, 0, 2, 3, 4, 4, 4, 3, 2, 1, 0,
  1921. 0, 0, 0, 0, 0, 0, 0, 2, 3, 4, 3, 2, 1, 0, 0,
  1922. 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1,
  1923. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
  1924. 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1,
  1925. 0, 0, 0, 0, 0, 0, 0, 1, 2, 4, 3, 3, 1, 0, 1,
  1926. 0, 0, 0, 0, 0, 0, 1, 4, 5, 6, 5, 4, 3, 2, 0,
  1927. 1, 0, 0, 0, 0, 0, 3, 5, 5, 4, 3, 4, 4, 3, 0,
  1928. 2, 0, 0, 0, 0, 2, 5, 6, 5, 2, 2, 5, 4, 3, 0
  1929. };
  1930. Mat mask(Size(cols, rows), CV_8UC1, mask_);
  1931. Mat src(Size(cols, rows), CV_8UC1, src_);
  1932. double minVal = -0.0, maxVal = -0.0;
  1933. int minIdx[2] = { -2, -2 }, maxIdx[2] = { -2, -2 };
  1934. cv::minMaxIdx(src, &minVal, &maxVal, minIdx, maxIdx, mask);
  1935. EXPECT_EQ(0, minIdx[0]);
  1936. EXPECT_EQ(14, minIdx[1]);
  1937. EXPECT_EQ(0, maxIdx[0]);
  1938. EXPECT_EQ(14, maxIdx[1]);
  1939. }
  1940. TEST(Core_Set, regression_11044)
  1941. {
  1942. Mat testFloat(Size(3, 3), CV_32FC1);
  1943. Mat testDouble(Size(3, 3), CV_64FC1);
  1944. testFloat.setTo(1);
  1945. EXPECT_EQ(1, testFloat.at<float>(0,0));
  1946. testFloat.setTo(std::numeric_limits<float>::infinity());
  1947. EXPECT_EQ(std::numeric_limits<float>::infinity(), testFloat.at<float>(0, 0));
  1948. testFloat.setTo(1);
  1949. EXPECT_EQ(1, testFloat.at<float>(0, 0));
  1950. testFloat.setTo(std::numeric_limits<double>::infinity());
  1951. EXPECT_EQ(std::numeric_limits<float>::infinity(), testFloat.at<float>(0, 0));
  1952. testDouble.setTo(1);
  1953. EXPECT_EQ(1, testDouble.at<double>(0, 0));
  1954. testDouble.setTo(std::numeric_limits<float>::infinity());
  1955. EXPECT_EQ(std::numeric_limits<double>::infinity(), testDouble.at<double>(0, 0));
  1956. testDouble.setTo(1);
  1957. EXPECT_EQ(1, testDouble.at<double>(0, 0));
  1958. testDouble.setTo(std::numeric_limits<double>::infinity());
  1959. EXPECT_EQ(std::numeric_limits<double>::infinity(), testDouble.at<double>(0, 0));
  1960. Mat testMask(Size(3, 3), CV_8UC1, Scalar(1));
  1961. testFloat.setTo(1);
  1962. EXPECT_EQ(1, testFloat.at<float>(0, 0));
  1963. testFloat.setTo(std::numeric_limits<float>::infinity(), testMask);
  1964. EXPECT_EQ(std::numeric_limits<float>::infinity(), testFloat.at<float>(0, 0));
  1965. testFloat.setTo(1);
  1966. EXPECT_EQ(1, testFloat.at<float>(0, 0));
  1967. testFloat.setTo(std::numeric_limits<double>::infinity(), testMask);
  1968. EXPECT_EQ(std::numeric_limits<float>::infinity(), testFloat.at<float>(0, 0));
  1969. testDouble.setTo(1);
  1970. EXPECT_EQ(1, testDouble.at<double>(0, 0));
  1971. testDouble.setTo(std::numeric_limits<float>::infinity(), testMask);
  1972. EXPECT_EQ(std::numeric_limits<double>::infinity(), testDouble.at<double>(0, 0));
  1973. testDouble.setTo(1);
  1974. EXPECT_EQ(1, testDouble.at<double>(0, 0));
  1975. testDouble.setTo(std::numeric_limits<double>::infinity(), testMask);
  1976. EXPECT_EQ(std::numeric_limits<double>::infinity(), testDouble.at<double>(0, 0));
  1977. }
  1978. TEST(Core_Norm, IPP_regression_NORM_L1_16UC3_small)
  1979. {
  1980. int cn = 3;
  1981. Size sz(9, 4); // width < 16
  1982. Mat a(sz, CV_MAKE_TYPE(CV_16U, cn), Scalar::all(1));
  1983. Mat b(sz, CV_MAKE_TYPE(CV_16U, cn), Scalar::all(2));
  1984. uchar mask_[9*4] = {
  1985. 255, 255, 255, 0, 255, 255, 0, 255, 0,
  1986. 0, 255, 0, 0, 255, 255, 255, 255, 0,
  1987. 0, 0, 0, 255, 0, 255, 0, 255, 255,
  1988. 0, 0, 255, 0, 255, 255, 255, 0, 255
  1989. };
  1990. Mat mask(sz, CV_8UC1, mask_);
  1991. EXPECT_EQ((double)9*4*cn, cv::norm(a, b, NORM_L1)); // without mask, IPP works well
  1992. EXPECT_EQ((double)20*cn, cv::norm(a, b, NORM_L1, mask));
  1993. }
  1994. TEST(Core_Norm, NORM_L2_8UC4)
  1995. {
  1996. // Tests there is no integer overflow in norm computation for multiple channels.
  1997. const int kSide = 100;
  1998. cv::Mat4b a(kSide, kSide, cv::Scalar(255, 255, 255, 255));
  1999. cv::Mat4b b = cv::Mat4b::zeros(kSide, kSide);
  2000. const double kNorm = 2.*kSide*255.;
  2001. EXPECT_EQ(kNorm, cv::norm(a, b, NORM_L2));
  2002. }
  2003. TEST(Core_ConvertTo, regression_12121)
  2004. {
  2005. {
  2006. Mat src(4, 64, CV_32SC1, Scalar(-1));
  2007. Mat dst;
  2008. src.convertTo(dst, CV_8U);
  2009. EXPECT_EQ(0, dst.at<uchar>(0, 0)) << "src=" << src.at<int>(0, 0);
  2010. }
  2011. {
  2012. Mat src(4, 64, CV_32SC1, Scalar(INT_MIN));
  2013. Mat dst;
  2014. src.convertTo(dst, CV_8U);
  2015. EXPECT_EQ(0, dst.at<uchar>(0, 0)) << "src=" << src.at<int>(0, 0);
  2016. }
  2017. {
  2018. Mat src(4, 64, CV_32SC1, Scalar(INT_MIN + 32767));
  2019. Mat dst;
  2020. src.convertTo(dst, CV_8U);
  2021. EXPECT_EQ(0, dst.at<uchar>(0, 0)) << "src=" << src.at<int>(0, 0);
  2022. }
  2023. {
  2024. Mat src(4, 64, CV_32SC1, Scalar(INT_MIN + 32768));
  2025. Mat dst;
  2026. src.convertTo(dst, CV_8U);
  2027. EXPECT_EQ(0, dst.at<uchar>(0, 0)) << "src=" << src.at<int>(0, 0);
  2028. }
  2029. {
  2030. Mat src(4, 64, CV_32SC1, Scalar(32768));
  2031. Mat dst;
  2032. src.convertTo(dst, CV_8U);
  2033. EXPECT_EQ(255, dst.at<uchar>(0, 0)) << "src=" << src.at<int>(0, 0);
  2034. }
  2035. {
  2036. Mat src(4, 64, CV_32SC1, Scalar(INT_MIN));
  2037. Mat dst;
  2038. src.convertTo(dst, CV_16U);
  2039. EXPECT_EQ(0, dst.at<ushort>(0, 0)) << "src=" << src.at<int>(0, 0);
  2040. }
  2041. {
  2042. Mat src(4, 64, CV_32SC1, Scalar(INT_MIN + 32767));
  2043. Mat dst;
  2044. src.convertTo(dst, CV_16U);
  2045. EXPECT_EQ(0, dst.at<ushort>(0, 0)) << "src=" << src.at<int>(0, 0);
  2046. }
  2047. {
  2048. Mat src(4, 64, CV_32SC1, Scalar(INT_MIN + 32768));
  2049. Mat dst;
  2050. src.convertTo(dst, CV_16U);
  2051. EXPECT_EQ(0, dst.at<ushort>(0, 0)) << "src=" << src.at<int>(0, 0);
  2052. }
  2053. {
  2054. Mat src(4, 64, CV_32SC1, Scalar(65536));
  2055. Mat dst;
  2056. src.convertTo(dst, CV_16U);
  2057. EXPECT_EQ(65535, dst.at<ushort>(0, 0)) << "src=" << src.at<int>(0, 0);
  2058. }
  2059. }
  2060. TEST(Core_MeanStdDev, regression_multichannel)
  2061. {
  2062. {
  2063. uchar buf[] = { 1, 2, 3, 4, 5, 6, 7, 8,
  2064. 3, 4, 5, 6, 7, 8, 9, 10 };
  2065. double ref_buf[] = { 2., 3., 4., 5., 6., 7., 8., 9.,
  2066. 1., 1., 1., 1., 1., 1., 1., 1. };
  2067. Mat src(1, 2, CV_MAKETYPE(CV_8U, 8), buf);
  2068. Mat ref_m(8, 1, CV_64FC1, ref_buf);
  2069. Mat ref_sd(8, 1, CV_64FC1, ref_buf + 8);
  2070. Mat dst_m, dst_sd;
  2071. meanStdDev(src, dst_m, dst_sd);
  2072. EXPECT_EQ(0, cv::norm(dst_m, ref_m, NORM_L1));
  2073. EXPECT_EQ(0, cv::norm(dst_sd, ref_sd, NORM_L1));
  2074. }
  2075. }
  2076. template <typename T> static inline
  2077. void testDivideInitData(Mat& src1, Mat& src2)
  2078. {
  2079. CV_StaticAssert(std::numeric_limits<T>::is_integer, "");
  2080. const static T src1_[] = {
  2081. 0, 0, 0, 0,
  2082. 8, 8, 8, 8,
  2083. -8, -8, -8, -8
  2084. };
  2085. Mat(3, 4, traits::Type<T>::value, (void*)src1_).copyTo(src1);
  2086. const static T src2_[] = {
  2087. 1, 2, 0, std::numeric_limits<T>::max(),
  2088. 1, 2, 0, std::numeric_limits<T>::max(),
  2089. 1, 2, 0, std::numeric_limits<T>::max(),
  2090. };
  2091. Mat(3, 4, traits::Type<T>::value, (void*)src2_).copyTo(src2);
  2092. }
  2093. template <typename T> static inline
  2094. void testDivideInitDataFloat(Mat& src1, Mat& src2)
  2095. {
  2096. CV_StaticAssert(!std::numeric_limits<T>::is_integer, "");
  2097. const static T src1_[] = {
  2098. 0, 0, 0, 0,
  2099. 8, 8, 8, 8,
  2100. -8, -8, -8, -8
  2101. };
  2102. Mat(3, 4, traits::Type<T>::value, (void*)src1_).copyTo(src1);
  2103. const static T src2_[] = {
  2104. 1, 2, 0, std::numeric_limits<T>::infinity(),
  2105. 1, 2, 0, std::numeric_limits<T>::infinity(),
  2106. 1, 2, 0, std::numeric_limits<T>::infinity(),
  2107. };
  2108. Mat(3, 4, traits::Type<T>::value, (void*)src2_).copyTo(src2);
  2109. }
  2110. template <> inline void testDivideInitData<float>(Mat& src1, Mat& src2) { testDivideInitDataFloat<float>(src1, src2); }
  2111. template <> inline void testDivideInitData<double>(Mat& src1, Mat& src2) { testDivideInitDataFloat<double>(src1, src2); }
  2112. template <typename T> static inline
  2113. void testDivideChecks(const Mat& dst)
  2114. {
  2115. ASSERT_FALSE(dst.empty());
  2116. CV_StaticAssert(std::numeric_limits<T>::is_integer, "");
  2117. for (int y = 0; y < dst.rows; y++)
  2118. {
  2119. for (int x = 0; x < dst.cols; x++)
  2120. {
  2121. if ((x % 4) == 2)
  2122. {
  2123. EXPECT_EQ(0, dst.at<T>(y, x)) << "dst(" << y << ", " << x << ") = " << dst.at<T>(y, x);
  2124. }
  2125. else
  2126. {
  2127. EXPECT_TRUE(0 == cvIsNaN((double)dst.at<T>(y, x))) << "dst(" << y << ", " << x << ") = " << dst.at<T>(y, x);
  2128. EXPECT_TRUE(0 == cvIsInf((double)dst.at<T>(y, x))) << "dst(" << y << ", " << x << ") = " << dst.at<T>(y, x);
  2129. }
  2130. }
  2131. }
  2132. }
  2133. template <typename T> static inline
  2134. void testDivideChecksFP(const Mat& dst)
  2135. {
  2136. ASSERT_FALSE(dst.empty());
  2137. CV_StaticAssert(!std::numeric_limits<T>::is_integer, "");
  2138. for (int y = 0; y < dst.rows; y++)
  2139. {
  2140. for (int x = 0; x < dst.cols; x++)
  2141. {
  2142. if ((y % 3) == 0 && (x % 4) == 2)
  2143. {
  2144. EXPECT_TRUE(cvIsNaN(dst.at<T>(y, x))) << "dst(" << y << ", " << x << ") = " << dst.at<T>(y, x);
  2145. }
  2146. else if ((x % 4) == 2)
  2147. {
  2148. EXPECT_TRUE(cvIsInf(dst.at<T>(y, x))) << "dst(" << y << ", " << x << ") = " << dst.at<T>(y, x);
  2149. }
  2150. else
  2151. {
  2152. EXPECT_FALSE(cvIsNaN(dst.at<T>(y, x))) << "dst(" << y << ", " << x << ") = " << dst.at<T>(y, x);
  2153. EXPECT_FALSE(cvIsInf(dst.at<T>(y, x))) << "dst(" << y << ", " << x << ") = " << dst.at<T>(y, x);
  2154. }
  2155. }
  2156. }
  2157. }
  2158. template <> inline void testDivideChecks<float>(const Mat& dst) { testDivideChecksFP<float>(dst); }
  2159. template <> inline void testDivideChecks<double>(const Mat& dst) { testDivideChecksFP<double>(dst); }
  2160. template <typename T> static inline
  2161. void testDivide(bool isUMat, double scale, bool largeSize, bool tailProcessing, bool roi)
  2162. {
  2163. Mat src1, src2;
  2164. testDivideInitData<T>(src1, src2);
  2165. ASSERT_FALSE(src1.empty()); ASSERT_FALSE(src2.empty());
  2166. if (largeSize)
  2167. {
  2168. repeat(src1.clone(), 1, 8, src1);
  2169. repeat(src2.clone(), 1, 8, src2);
  2170. }
  2171. if (tailProcessing)
  2172. {
  2173. src1 = src1(Rect(0, 0, src1.cols - 1, src1.rows));
  2174. src2 = src2(Rect(0, 0, src2.cols - 1, src2.rows));
  2175. }
  2176. if (!roi && tailProcessing)
  2177. {
  2178. src1 = src1.clone();
  2179. src2 = src2.clone();
  2180. }
  2181. Mat dst;
  2182. if (!isUMat)
  2183. {
  2184. cv::divide(src1, src2, dst, scale);
  2185. }
  2186. else
  2187. {
  2188. UMat usrc1, usrc2, udst;
  2189. src1.copyTo(usrc1);
  2190. src2.copyTo(usrc2);
  2191. cv::divide(usrc1, usrc2, udst, scale);
  2192. udst.copyTo(dst);
  2193. }
  2194. testDivideChecks<T>(dst);
  2195. if (::testing::Test::HasFailure())
  2196. {
  2197. std::cout << "src1 = " << std::endl << src1 << std::endl;
  2198. std::cout << "src2 = " << std::endl << src2 << std::endl;
  2199. std::cout << "dst = " << std::endl << dst << std::endl;
  2200. }
  2201. }
  2202. typedef tuple<bool, double, bool, bool, bool> DivideRulesParam;
  2203. typedef testing::TestWithParam<DivideRulesParam> Core_DivideRules;
  2204. TEST_P(Core_DivideRules, type_32s)
  2205. {
  2206. DivideRulesParam param = GetParam();
  2207. testDivide<int>(get<0>(param), get<1>(param), get<2>(param), get<3>(param), get<4>(param));
  2208. }
  2209. TEST_P(Core_DivideRules, type_16s)
  2210. {
  2211. DivideRulesParam param = GetParam();
  2212. testDivide<short>(get<0>(param), get<1>(param), get<2>(param), get<3>(param), get<4>(param));
  2213. }
  2214. TEST_P(Core_DivideRules, type_32f)
  2215. {
  2216. DivideRulesParam param = GetParam();
  2217. testDivide<float>(get<0>(param), get<1>(param), get<2>(param), get<3>(param), get<4>(param));
  2218. }
  2219. TEST_P(Core_DivideRules, type_64f)
  2220. {
  2221. DivideRulesParam param = GetParam();
  2222. testDivide<double>(get<0>(param), get<1>(param), get<2>(param), get<3>(param), get<4>(param));
  2223. }
  2224. INSTANTIATE_TEST_CASE_P(/* */, Core_DivideRules, testing::Combine(
  2225. /* isMat */ testing::Values(false),
  2226. /* scale */ testing::Values(1.0, 5.0),
  2227. /* largeSize */ testing::Bool(),
  2228. /* tail */ testing::Bool(),
  2229. /* roi */ testing::Bool()
  2230. ));
  2231. INSTANTIATE_TEST_CASE_P(UMat, Core_DivideRules, testing::Combine(
  2232. /* isMat */ testing::Values(true),
  2233. /* scale */ testing::Values(1.0, 5.0),
  2234. /* largeSize */ testing::Bool(),
  2235. /* tail */ testing::Bool(),
  2236. /* roi */ testing::Bool()
  2237. ));
  2238. TEST(Core_MinMaxIdx, rows_overflow)
  2239. {
  2240. const int N = 65536 + 1;
  2241. const int M = 1;
  2242. {
  2243. setRNGSeed(123);
  2244. Mat m(N, M, CV_32FC1);
  2245. randu(m, -100, 100);
  2246. double minVal = 0, maxVal = 0;
  2247. int minIdx[CV_MAX_DIM] = { 0 }, maxIdx[CV_MAX_DIM] = { 0 };
  2248. cv::minMaxIdx(m, &minVal, &maxVal, minIdx, maxIdx);
  2249. double minVal0 = 0, maxVal0 = 0;
  2250. int minIdx0[CV_MAX_DIM] = { 0 }, maxIdx0[CV_MAX_DIM] = { 0 };
  2251. cv::ipp::setUseIPP(false);
  2252. cv::minMaxIdx(m, &minVal0, &maxVal0, minIdx0, maxIdx0);
  2253. cv::ipp::setUseIPP(true);
  2254. EXPECT_FALSE(fabs(minVal0 - minVal) > 1e-6 || fabs(maxVal0 - maxVal) > 1e-6) << "NxM=" << N << "x" << M <<
  2255. " min=" << minVal0 << " vs " << minVal <<
  2256. " max=" << maxVal0 << " vs " << maxVal;
  2257. }
  2258. }
  2259. TEST(Core_Magnitude, regression_19506)
  2260. {
  2261. for (int N = 1; N <= 64; ++N)
  2262. {
  2263. Mat a(1, N, CV_32FC1, Scalar::all(1e-20));
  2264. Mat res;
  2265. magnitude(a, a, res);
  2266. EXPECT_LE(cvtest::norm(res, NORM_L1), 1e-15) << N;
  2267. }
  2268. }
  2269. }} // namespace