exposure_compensate.cpp 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. /*M///////////////////////////////////////////////////////////////////////////////////////
  2. //
  3. // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
  4. //
  5. // By downloading, copying, installing or using the software you agree to this license.
  6. // If you do not agree to this license, do not download, install,
  7. // copy or use the software.
  8. //
  9. //
  10. // License Agreement
  11. // For Open Source Computer Vision Library
  12. //
  13. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
  14. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
  15. // Third party copyrights are property of their respective owners.
  16. //
  17. // Redistribution and use in source and binary forms, with or without modification,
  18. // are permitted provided that the following conditions are met:
  19. //
  20. // * Redistribution's of source code must retain the above copyright notice,
  21. // this list of conditions and the following disclaimer.
  22. //
  23. // * Redistribution's in binary form must reproduce the above copyright notice,
  24. // this list of conditions and the following disclaimer in the documentation
  25. // and/or other materials provided with the distribution.
  26. //
  27. // * The name of the copyright holders may not be used to endorse or promote products
  28. // derived from this software without specific prior written permission.
  29. //
  30. // This software is provided by the copyright holders and contributors "as is" and
  31. // any express or implied warranties, including, but not limited to, the implied
  32. // warranties of merchantability and fitness for a particular purpose are disclaimed.
  33. // In no event shall the Intel Corporation or contributors be liable for any direct,
  34. // indirect, incidental, special, exemplary, or consequential damages
  35. // (including, but not limited to, procurement of substitute goods or services;
  36. // loss of use, data, or profits; or business interruption) however caused
  37. // and on any theory of liability, whether in contract, strict liability,
  38. // or tort (including negligence or otherwise) arising in any way out of
  39. // the use of this software, even if advised of the possibility of such damage.
  40. //
  41. //M*/
  42. #include "precomp.hpp"
  43. #ifdef HAVE_EIGEN
  44. #include <Eigen/Core>
  45. #include <Eigen/Dense>
  46. #endif
  47. namespace cv {
  48. namespace detail {
  49. Ptr<ExposureCompensator> ExposureCompensator::createDefault(int type)
  50. {
  51. Ptr<ExposureCompensator> e;
  52. if (type == NO)
  53. e = makePtr<NoExposureCompensator>();
  54. else if (type == GAIN)
  55. e = makePtr<GainCompensator>();
  56. else if (type == GAIN_BLOCKS)
  57. e = makePtr<BlocksGainCompensator>();
  58. else if (type == CHANNELS)
  59. e = makePtr<ChannelsCompensator>();
  60. else if (type == CHANNELS_BLOCKS)
  61. e = makePtr<BlocksChannelsCompensator>();
  62. if (e.get() != nullptr)
  63. return e;
  64. CV_Error(Error::StsBadArg, "unsupported exposure compensation method");
  65. }
  66. void ExposureCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
  67. const std::vector<UMat> &masks)
  68. {
  69. std::vector<std::pair<UMat,uchar> > level_masks;
  70. for (size_t i = 0; i < masks.size(); ++i)
  71. level_masks.push_back(std::make_pair(masks[i], (uchar)255));
  72. feed(corners, images, level_masks);
  73. }
  74. void GainCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
  75. const std::vector<std::pair<UMat,uchar> > &masks)
  76. {
  77. LOGLN("Exposure compensation...");
  78. #if ENABLE_LOG
  79. int64 t = getTickCount();
  80. #endif
  81. const int num_images = static_cast<int>(images.size());
  82. Mat accumulated_gains;
  83. prepareSimilarityMask(corners, images);
  84. for (int n = 0; n < nr_feeds_; ++n)
  85. {
  86. if (n > 0)
  87. {
  88. // Apply previous iteration gains
  89. for (int i = 0; i < num_images; ++i)
  90. apply(i, corners[i], images[i], masks[i].first);
  91. }
  92. singleFeed(corners, images, masks);
  93. if (n == 0)
  94. accumulated_gains = gains_.clone();
  95. else
  96. multiply(accumulated_gains, gains_, accumulated_gains);
  97. }
  98. gains_ = accumulated_gains;
  99. LOGLN("Exposure compensation, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  100. }
  101. void GainCompensator::singleFeed(const std::vector<Point> &corners, const std::vector<UMat> &images,
  102. const std::vector<std::pair<UMat,uchar> > &masks)
  103. {
  104. CV_Assert(corners.size() == images.size() && images.size() == masks.size());
  105. if (images.size() == 0)
  106. return;
  107. const int num_channels = images[0].channels();
  108. CV_Assert(std::all_of(images.begin(), images.end(),
  109. [num_channels](const UMat& image) { return image.channels() == num_channels; }));
  110. CV_Assert(num_channels == 1 || num_channels == 3);
  111. const int num_images = static_cast<int>(images.size());
  112. Mat_<int> N(num_images, num_images); N.setTo(0);
  113. Mat_<double> I(num_images, num_images); I.setTo(0);
  114. Mat_<bool> skip(num_images, 1); skip.setTo(true);
  115. Mat subimg1, subimg2;
  116. Mat_<uchar> submask1, submask2, intersect;
  117. std::vector<UMat>::iterator similarity_it = similarities_.begin();
  118. for (int i = 0; i < num_images; ++i)
  119. {
  120. for (int j = i; j < num_images; ++j)
  121. {
  122. Rect roi;
  123. if (overlapRoi(corners[i], corners[j], images[i].size(), images[j].size(), roi))
  124. {
  125. subimg1 = images[i](Rect(roi.tl() - corners[i], roi.br() - corners[i])).getMat(ACCESS_READ);
  126. subimg2 = images[j](Rect(roi.tl() - corners[j], roi.br() - corners[j])).getMat(ACCESS_READ);
  127. submask1 = masks[i].first(Rect(roi.tl() - corners[i], roi.br() - corners[i])).getMat(ACCESS_READ);
  128. submask2 = masks[j].first(Rect(roi.tl() - corners[j], roi.br() - corners[j])).getMat(ACCESS_READ);
  129. intersect = (submask1 == masks[i].second) & (submask2 == masks[j].second);
  130. if (!similarities_.empty())
  131. {
  132. CV_Assert(similarity_it != similarities_.end());
  133. UMat similarity = *similarity_it++;
  134. // in-place operation has an issue. don't remove the swap
  135. // detail https://github.com/opencv/opencv/issues/19184
  136. Mat_<uchar> intersect_updated;
  137. bitwise_and(intersect, similarity, intersect_updated);
  138. std::swap(intersect, intersect_updated);
  139. }
  140. int intersect_count = countNonZero(intersect);
  141. N(i, j) = N(j, i) = std::max(1, intersect_count);
  142. // Don't compute Isums if subimages do not intersect anyway
  143. if (intersect_count == 0)
  144. continue;
  145. // Don't skip images that intersect with at least one other image
  146. if (i != j)
  147. {
  148. skip(i, 0) = false;
  149. skip(j, 0) = false;
  150. }
  151. double Isum1 = 0, Isum2 = 0;
  152. for (int y = 0; y < roi.height; ++y)
  153. {
  154. if (num_channels == 3)
  155. {
  156. const Vec<uchar, 3>* r1 = subimg1.ptr<Vec<uchar, 3> >(y);
  157. const Vec<uchar, 3>* r2 = subimg2.ptr<Vec<uchar, 3> >(y);
  158. for (int x = 0; x < roi.width; ++x)
  159. {
  160. if (intersect(y, x))
  161. {
  162. Isum1 += norm(r1[x]);
  163. Isum2 += norm(r2[x]);
  164. }
  165. }
  166. }
  167. else // if (num_channels == 1)
  168. {
  169. const uchar* r1 = subimg1.ptr<uchar>(y);
  170. const uchar* r2 = subimg2.ptr<uchar>(y);
  171. for (int x = 0; x < roi.width; ++x)
  172. {
  173. if (intersect(y, x))
  174. {
  175. Isum1 += r1[x];
  176. Isum2 += r2[x];
  177. }
  178. }
  179. }
  180. }
  181. I(i, j) = Isum1 / N(i, j);
  182. I(j, i) = Isum2 / N(i, j);
  183. }
  184. }
  185. }
  186. if (getUpdateGain() || gains_.rows != num_images)
  187. {
  188. double alpha = 0.01;
  189. double beta = 100;
  190. int num_eq = num_images - countNonZero(skip);
  191. gains_.create(num_images, 1);
  192. gains_.setTo(1);
  193. // No image process, gains are all set to one, stop here
  194. if (num_eq == 0)
  195. return;
  196. Mat_<double> A(num_eq, num_eq); A.setTo(0);
  197. Mat_<double> b(num_eq, 1); b.setTo(0);
  198. for (int i = 0, ki = 0; i < num_images; ++i)
  199. {
  200. if (skip(i, 0))
  201. continue;
  202. for (int j = 0, kj = 0; j < num_images; ++j)
  203. {
  204. if (skip(j, 0))
  205. continue;
  206. b(ki, 0) += beta * N(i, j);
  207. A(ki, ki) += beta * N(i, j);
  208. if (j != i)
  209. {
  210. A(ki, ki) += 2 * alpha * I(i, j) * I(i, j) * N(i, j);
  211. A(ki, kj) -= 2 * alpha * I(i, j) * I(j, i) * N(i, j);
  212. }
  213. ++kj;
  214. }
  215. ++ki;
  216. }
  217. Mat_<double> l_gains;
  218. #ifdef HAVE_EIGEN
  219. Eigen::MatrixXf eigen_A, eigen_b, eigen_x;
  220. cv2eigen(A, eigen_A);
  221. cv2eigen(b, eigen_b);
  222. Eigen::LLT<Eigen::MatrixXf> solver(eigen_A);
  223. #if ENABLE_LOG
  224. if (solver.info() != Eigen::ComputationInfo::Success)
  225. LOGLN("Failed to solve exposure compensation system");
  226. #endif
  227. eigen_x = solver.solve(eigen_b);
  228. Mat_<float> l_gains_float;
  229. eigen2cv(eigen_x, l_gains_float);
  230. l_gains_float.convertTo(l_gains, CV_64FC1);
  231. #else
  232. solve(A, b, l_gains);
  233. #endif
  234. CV_CheckTypeEQ(l_gains.type(), CV_64FC1, "");
  235. for (int i = 0, j = 0; i < num_images; ++i)
  236. {
  237. // Only assign non-skipped gains. Other gains are already set to 1
  238. if (!skip(i, 0))
  239. gains_.at<double>(i, 0) = l_gains(j++, 0);
  240. }
  241. }
  242. }
  243. void GainCompensator::apply(int index, Point /*corner*/, InputOutputArray image, InputArray /*mask*/)
  244. {
  245. CV_INSTRUMENT_REGION();
  246. multiply(image, gains_(index, 0), image);
  247. }
  248. std::vector<double> GainCompensator::gains() const
  249. {
  250. std::vector<double> gains_vec(gains_.rows);
  251. for (int i = 0; i < gains_.rows; ++i)
  252. gains_vec[i] = gains_(i, 0);
  253. return gains_vec;
  254. }
  255. void GainCompensator::getMatGains(std::vector<Mat>& umv)
  256. {
  257. umv.clear();
  258. for (int i = 0; i < gains_.rows; ++i)
  259. umv.push_back(Mat(1,1,CV_64FC1,Scalar(gains_(i, 0))));
  260. }
  261. void GainCompensator::setMatGains(std::vector<Mat>& umv)
  262. {
  263. gains_=Mat_<double>(static_cast<int>(umv.size()),1);
  264. for (int i = 0; i < static_cast<int>(umv.size()); i++)
  265. {
  266. int type = umv[i].type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
  267. CV_CheckType(type, depth == CV_64F && cn == 1, "Only double images are supported for gain");
  268. CV_Assert(umv[i].rows == 1 && umv[i].cols == 1);
  269. gains_(i, 0) = umv[i].at<double>(0, 0);
  270. }
  271. }
  272. void GainCompensator::prepareSimilarityMask(
  273. const std::vector<Point> &corners, const std::vector<UMat> &images)
  274. {
  275. if (similarity_threshold_ >= 1)
  276. {
  277. LOGLN(" skipping similarity mask: disabled");
  278. return;
  279. }
  280. if (!similarities_.empty())
  281. {
  282. LOGLN(" skipping similarity mask: already set");
  283. return;
  284. }
  285. LOGLN(" calculating similarity mask");
  286. const int num_images = static_cast<int>(images.size());
  287. for (int i = 0; i < num_images; ++i)
  288. {
  289. for (int j = i; j < num_images; ++j)
  290. {
  291. Rect roi;
  292. if (overlapRoi(corners[i], corners[j], images[i].size(), images[j].size(), roi))
  293. {
  294. UMat subimg1 = images[i](Rect(roi.tl() - corners[i], roi.br() - corners[i]));
  295. UMat subimg2 = images[j](Rect(roi.tl() - corners[j], roi.br() - corners[j]));
  296. UMat similarity = buildSimilarityMask(subimg1, subimg2);
  297. similarities_.push_back(similarity);
  298. }
  299. }
  300. }
  301. }
  302. UMat GainCompensator::buildSimilarityMask(InputArray src_array1, InputArray src_array2)
  303. {
  304. CV_Assert(src_array1.rows() == src_array2.rows() && src_array1.cols() == src_array2.cols());
  305. CV_Assert(src_array1.type() == src_array2.type());
  306. CV_Assert(src_array1.type() == CV_8UC3 || src_array1.type() == CV_8UC1);
  307. Mat src1 = src_array1.getMat();
  308. Mat src2 = src_array2.getMat();
  309. UMat umat_similarity(src1.rows, src1.cols, CV_8UC1);
  310. Mat similarity = umat_similarity.getMat(ACCESS_WRITE);
  311. if (src1.channels() == 3)
  312. {
  313. for (int y = 0; y < similarity.rows; ++y)
  314. {
  315. for (int x = 0; x < similarity.cols; ++x)
  316. {
  317. Vec<float, 3> vec_diff =
  318. Vec<float, 3>(*src1.ptr<Vec<uchar, 3>>(y, x))
  319. - Vec<float, 3>(*src2.ptr<Vec<uchar, 3>>(y, x));
  320. double diff = norm(vec_diff * (1.f / 255.f));
  321. *similarity.ptr<uchar>(y, x) = diff <= similarity_threshold_ ? 255 : 0;
  322. }
  323. }
  324. }
  325. else // if (src1.channels() == 1)
  326. {
  327. for (int y = 0; y < similarity.rows; ++y)
  328. {
  329. for (int x = 0; x < similarity.cols; ++x)
  330. {
  331. float diff = std::abs(static_cast<int>(*src1.ptr<uchar>(y, x))
  332. - static_cast<int>(*src2.ptr<uchar>(y, x))) / 255.f;
  333. *similarity.ptr<uchar>(y, x) = diff <= similarity_threshold_ ? 255 : 0;
  334. }
  335. }
  336. }
  337. similarity.release();
  338. Mat kernel = getStructuringElement(MORPH_RECT, Size(3,3));
  339. UMat umat_erode;
  340. erode(umat_similarity, umat_erode, kernel);
  341. dilate(umat_erode, umat_similarity, kernel);
  342. return umat_similarity;
  343. }
  344. void ChannelsCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
  345. const std::vector<std::pair<UMat,uchar> > &masks)
  346. {
  347. std::array<std::vector<UMat>, 3> images_channels;
  348. // Split channels of each input image
  349. for (const UMat& image: images)
  350. {
  351. std::vector<UMat> image_channels;
  352. image_channels.resize(3);
  353. split(image, image_channels);
  354. for (int i = 0; i < int(images_channels.size()); ++i)
  355. images_channels[i].emplace_back(std::move(image_channels[i]));
  356. }
  357. // For each channel, feed the channel of each image in a GainCompensator
  358. gains_.clear();
  359. gains_.resize(images.size());
  360. GainCompensator compensator(getNrFeeds());
  361. compensator.setSimilarityThreshold(getSimilarityThreshold());
  362. compensator.prepareSimilarityMask(corners, images);
  363. for (int c = 0; c < 3; ++c)
  364. {
  365. const std::vector<UMat>& channels = images_channels[c];
  366. compensator.feed(corners, channels, masks);
  367. std::vector<double> gains = compensator.gains();
  368. for (int i = 0; i < int(gains.size()); ++i)
  369. gains_.at(i)[c] = gains[i];
  370. }
  371. }
  372. void ChannelsCompensator::apply(int index, Point /*corner*/, InputOutputArray image, InputArray /*mask*/)
  373. {
  374. CV_INSTRUMENT_REGION();
  375. multiply(image, gains_.at(index), image);
  376. }
  377. void ChannelsCompensator::getMatGains(std::vector<Mat>& umv)
  378. {
  379. umv.clear();
  380. for (int i = 0; i < static_cast<int>(gains_.size()); ++i)
  381. {
  382. Mat m;
  383. Mat(gains_[i]).copyTo(m);
  384. umv.push_back(m);
  385. }
  386. }
  387. void ChannelsCompensator::setMatGains(std::vector<Mat>& umv)
  388. {
  389. for (int i = 0; i < static_cast<int>(umv.size()); i++)
  390. {
  391. Scalar s;
  392. umv[i].copyTo(s);
  393. gains_.push_back(s);
  394. }
  395. }
  396. template<class Compensator>
  397. void BlocksCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
  398. const std::vector<std::pair<UMat,uchar> > &masks)
  399. {
  400. CV_Assert(corners.size() == images.size() && images.size() == masks.size());
  401. const int num_images = static_cast<int>(images.size());
  402. std::vector<Size> bl_per_imgs(num_images);
  403. std::vector<Point> block_corners;
  404. std::vector<UMat> block_images;
  405. std::vector<std::pair<UMat,uchar> > block_masks;
  406. // Construct blocks for gain compensator
  407. for (int img_idx = 0; img_idx < num_images; ++img_idx)
  408. {
  409. Size bl_per_img((images[img_idx].cols + bl_width_ - 1) / bl_width_,
  410. (images[img_idx].rows + bl_height_ - 1) / bl_height_);
  411. int bl_width = (images[img_idx].cols + bl_per_img.width - 1) / bl_per_img.width;
  412. int bl_height = (images[img_idx].rows + bl_per_img.height - 1) / bl_per_img.height;
  413. bl_per_imgs[img_idx] = bl_per_img;
  414. for (int by = 0; by < bl_per_img.height; ++by)
  415. {
  416. for (int bx = 0; bx < bl_per_img.width; ++bx)
  417. {
  418. Point bl_tl(bx * bl_width, by * bl_height);
  419. Point bl_br(std::min(bl_tl.x + bl_width, images[img_idx].cols),
  420. std::min(bl_tl.y + bl_height, images[img_idx].rows));
  421. block_corners.push_back(corners[img_idx] + bl_tl);
  422. block_images.push_back(images[img_idx](Rect(bl_tl, bl_br)));
  423. block_masks.push_back(std::make_pair(masks[img_idx].first(Rect(bl_tl, bl_br)),
  424. masks[img_idx].second));
  425. }
  426. }
  427. }
  428. if (getUpdateGain() || int(gain_maps_.size()) != num_images)
  429. {
  430. Compensator compensator;
  431. compensator.setNrFeeds(getNrFeeds());
  432. compensator.setSimilarityThreshold(getSimilarityThreshold());
  433. compensator.feed(block_corners, block_images, block_masks);
  434. gain_maps_.clear();
  435. gain_maps_.resize(num_images);
  436. Mat_<float> ker(1, 3);
  437. ker(0, 0) = 0.25; ker(0, 1) = 0.5; ker(0, 2) = 0.25;
  438. int bl_idx = 0;
  439. for (int img_idx = 0; img_idx < num_images; ++img_idx)
  440. {
  441. Size bl_per_img = bl_per_imgs[img_idx];
  442. UMat gain_map = getGainMap(compensator, bl_idx, bl_per_img);
  443. bl_idx += bl_per_img.width*bl_per_img.height;
  444. for (int i=0; i<nr_gain_filtering_iterations_; ++i)
  445. {
  446. UMat tmp;
  447. sepFilter2D(gain_map, tmp, CV_32F, ker, ker);
  448. swap(gain_map, tmp);
  449. }
  450. gain_maps_[img_idx] = gain_map;
  451. }
  452. }
  453. }
  454. UMat BlocksCompensator::getGainMap(const GainCompensator& compensator, int bl_idx, Size bl_per_img)
  455. {
  456. std::vector<double> gains = compensator.gains();
  457. UMat u_gain_map(bl_per_img, CV_32F);
  458. Mat_<float> gain_map = u_gain_map.getMat(ACCESS_WRITE);
  459. for (int by = 0; by < bl_per_img.height; ++by)
  460. for (int bx = 0; bx < bl_per_img.width; ++bx, ++bl_idx)
  461. gain_map(by, bx) = static_cast<float>(gains[bl_idx]);
  462. return u_gain_map;
  463. }
  464. UMat BlocksCompensator::getGainMap(const ChannelsCompensator& compensator, int bl_idx, Size bl_per_img)
  465. {
  466. std::vector<Scalar> gains = compensator.gains();
  467. UMat u_gain_map(bl_per_img, CV_32FC3);
  468. Mat_<Vec3f> gain_map = u_gain_map.getMat(ACCESS_WRITE);
  469. for (int by = 0; by < bl_per_img.height; ++by)
  470. for (int bx = 0; bx < bl_per_img.width; ++bx, ++bl_idx)
  471. for (int c = 0; c < 3; ++c)
  472. gain_map(by, bx)[c] = static_cast<float>(gains[bl_idx][c]);
  473. return u_gain_map;
  474. }
  475. void BlocksCompensator::apply(int index, Point /*corner*/, InputOutputArray _image, InputArray /*mask*/)
  476. {
  477. CV_INSTRUMENT_REGION();
  478. CV_Assert(_image.type() == CV_8UC3);
  479. UMat u_gain_map;
  480. if (gain_maps_.at(index).size() == _image.size())
  481. u_gain_map = gain_maps_.at(index);
  482. else
  483. resize(gain_maps_.at(index), u_gain_map, _image.size(), 0, 0, INTER_LINEAR);
  484. if (u_gain_map.channels() != 3)
  485. {
  486. std::vector<UMat> gains_channels;
  487. gains_channels.push_back(u_gain_map);
  488. gains_channels.push_back(u_gain_map);
  489. gains_channels.push_back(u_gain_map);
  490. merge(gains_channels, u_gain_map);
  491. }
  492. multiply(_image, u_gain_map, _image, 1, _image.type());
  493. }
  494. void BlocksCompensator::getMatGains(std::vector<Mat>& umv)
  495. {
  496. umv.clear();
  497. for (int i = 0; i < static_cast<int>(gain_maps_.size()); ++i)
  498. {
  499. Mat m;
  500. gain_maps_[i].copyTo(m);
  501. umv.push_back(m);
  502. }
  503. }
  504. void BlocksCompensator::setMatGains(std::vector<Mat>& umv)
  505. {
  506. for (int i = 0; i < static_cast<int>(umv.size()); i++)
  507. {
  508. UMat m;
  509. umv[i].copyTo(m);
  510. gain_maps_.push_back(m);
  511. }
  512. }
  513. void BlocksGainCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
  514. const std::vector<std::pair<UMat,uchar> > &masks)
  515. {
  516. BlocksCompensator::feed<GainCompensator>(corners, images, masks);
  517. }
  518. void BlocksChannelsCompensator::feed(const std::vector<Point> &corners, const std::vector<UMat> &images,
  519. const std::vector<std::pair<UMat,uchar> > &masks)
  520. {
  521. BlocksCompensator::feed<ChannelsCompensator>(corners, images, masks);
  522. }
  523. } // namespace detail
  524. } // namespace cv