test_adaptive_manifold_ref_impl.cpp 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947
  1. // This file is part of OpenCV project.
  2. // It is subject to the license terms in the LICENSE file found in the top-level directory
  3. // of this distribution and at http://opencv.org/license.html.
  4. /*
  5. * The MIT License(MIT)
  6. *
  7. * Copyright(c) 2013 Vladislav Vinogradov
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a copy of
  10. * this software and associated documentation files(the "Software"), to deal in
  11. * the Software without restriction, including without limitation the rights to
  12. * use, copy, modify, merge, publish, distribute, sublicense, and / or sell copies of
  13. * the Software, and to permit persons to whom the Software is furnished to do so,
  14. * subject to the following conditions :
  15. *
  16. * The above copyright notice and this permission notice shall be included in all
  17. * copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
  21. * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE AUTHORS OR
  22. * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
  23. * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  24. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. */
  26. #include "test_precomp.hpp"
  27. #include <opencv2/core/private.hpp>
  28. namespace opencv_test { namespace {
  29. using namespace cv::ximgproc;
  30. struct Buf
  31. {
  32. Mat_<Point3f> eta_1;
  33. Mat_<uchar> cluster_1;
  34. Mat_<Point3f> tilde_dst;
  35. Mat_<float> alpha;
  36. Mat_<Point3f> diff;
  37. Mat_<Point3f> dst;
  38. Mat_<float> V;
  39. Mat_<Point3f> dIcdx;
  40. Mat_<Point3f> dIcdy;
  41. Mat_<float> dIdx;
  42. Mat_<float> dIdy;
  43. Mat_<float> dHdx;
  44. Mat_<float> dVdy;
  45. Mat_<float> t;
  46. Mat_<float> theta_masked;
  47. Mat_<Point3f> mul;
  48. Mat_<Point3f> numerator;
  49. Mat_<float> denominator;
  50. Mat_<Point3f> numerator_filtered;
  51. Mat_<float> denominator_filtered;
  52. Mat_<Point3f> X;
  53. Mat_<Point3f> eta_k_small;
  54. Mat_<Point3f> eta_k_big;
  55. Mat_<Point3f> X_squared;
  56. Mat_<float> pixel_dist_to_manifold_squared;
  57. Mat_<float> gaussian_distance_weights;
  58. Mat_<Point3f> Psi_splat;
  59. Mat_<Vec4f> Psi_splat_joined;
  60. Mat_<Vec4f> Psi_splat_joined_resized;
  61. Mat_<Vec4f> blurred_projected_values;
  62. Mat_<Point3f> w_ki_Psi_blur;
  63. Mat_<float> w_ki_Psi_blur_0;
  64. Mat_<Point3f> w_ki_Psi_blur_resized;
  65. Mat_<float> w_ki_Psi_blur_0_resized;
  66. Mat_<float> rand_vec;
  67. Mat_<float> v1;
  68. Mat_<float> Nx_v1_mult;
  69. Mat_<float> theta;
  70. std::vector<Mat_<Point3f> > eta_minus;
  71. std::vector<Mat_<uchar> > cluster_minus;
  72. std::vector<Mat_<Point3f> > eta_plus;
  73. std::vector<Mat_<uchar> > cluster_plus;
  74. void release();
  75. };
  76. void Buf::release()
  77. {
  78. eta_1.release();
  79. cluster_1.release();
  80. tilde_dst.release();
  81. alpha.release();
  82. diff.release();
  83. dst.release();
  84. V.release();
  85. dIcdx.release();
  86. dIcdy.release();
  87. dIdx.release();
  88. dIdy.release();
  89. dHdx.release();
  90. dVdy.release();
  91. t.release();
  92. theta_masked.release();
  93. mul.release();
  94. numerator.release();
  95. denominator.release();
  96. numerator_filtered.release();
  97. denominator_filtered.release();
  98. X.release();
  99. eta_k_small.release();
  100. eta_k_big.release();
  101. X_squared.release();
  102. pixel_dist_to_manifold_squared.release();
  103. gaussian_distance_weights.release();
  104. Psi_splat.release();
  105. Psi_splat_joined.release();
  106. Psi_splat_joined_resized.release();
  107. blurred_projected_values.release();
  108. w_ki_Psi_blur.release();
  109. w_ki_Psi_blur_0.release();
  110. w_ki_Psi_blur_resized.release();
  111. w_ki_Psi_blur_0_resized.release();
  112. rand_vec.release();
  113. v1.release();
  114. Nx_v1_mult.release();
  115. theta.release();
  116. eta_minus.clear();
  117. cluster_minus.clear();
  118. eta_plus.clear();
  119. cluster_plus.clear();
  120. }
  121. class AdaptiveManifoldFilterRefImpl : public AdaptiveManifoldFilter
  122. {
  123. public:
  124. AdaptiveManifoldFilterRefImpl();
  125. void filter(InputArray src, OutputArray dst, InputArray joint);
  126. void collectGarbage();
  127. inline double getSigmaS() const CV_OVERRIDE { return sigma_s_; }
  128. inline void setSigmaS(double val) CV_OVERRIDE { sigma_s_ = val; }
  129. inline double getSigmaR() const CV_OVERRIDE { return sigma_r_; }
  130. inline void setSigmaR(double val) CV_OVERRIDE { sigma_r_ = val; }
  131. inline int getTreeHeight() const CV_OVERRIDE { return tree_height_; }
  132. inline void setTreeHeight(int val) CV_OVERRIDE { tree_height_ = val; }
  133. inline int getPCAIterations() const CV_OVERRIDE { return num_pca_iterations_; }
  134. inline void setPCAIterations(int val) CV_OVERRIDE { num_pca_iterations_ = val; }
  135. inline bool getAdjustOutliers() const CV_OVERRIDE { return adjust_outliers_; }
  136. inline void setAdjustOutliers(bool val) CV_OVERRIDE { adjust_outliers_ = val; }
  137. inline bool getUseRNG() const CV_OVERRIDE { return useRNG; }
  138. inline void setUseRNG(bool val) CV_OVERRIDE { useRNG = val; }
  139. protected:
  140. bool adjust_outliers_;
  141. double sigma_s_;
  142. double sigma_r_;
  143. int tree_height_;
  144. int num_pca_iterations_;
  145. bool useRNG;
  146. private:
  147. void buildManifoldsAndPerformFiltering(const Mat_<Point3f>& eta_k, const Mat_<uchar>& cluster_k, int current_tree_level);
  148. Buf buf_;
  149. Mat_<Point3f> src_f_;
  150. Mat_<Point3f> src_joint_f_;
  151. Mat_<Point3f> sum_w_ki_Psi_blur_;
  152. Mat_<float> sum_w_ki_Psi_blur_0_;
  153. Mat_<float> min_pixel_dist_to_manifold_squared_;
  154. RNG rng_;
  155. int cur_tree_height_;
  156. float sigma_r_over_sqrt_2_;
  157. };
  158. AdaptiveManifoldFilterRefImpl::AdaptiveManifoldFilterRefImpl()
  159. {
  160. sigma_s_ = 16.0;
  161. sigma_r_ = 0.2;
  162. tree_height_ = -1;
  163. num_pca_iterations_ = 1;
  164. adjust_outliers_ = false;
  165. useRNG = true;
  166. }
  167. void AdaptiveManifoldFilterRefImpl::collectGarbage()
  168. {
  169. buf_.release();
  170. src_f_.release();
  171. src_joint_f_.release();
  172. sum_w_ki_Psi_blur_.release();
  173. sum_w_ki_Psi_blur_0_.release();
  174. min_pixel_dist_to_manifold_squared_.release();
  175. }
  176. inline double Log2(double n)
  177. {
  178. return log(n) / log(2.0);
  179. }
  180. inline int computeManifoldTreeHeight(double sigma_s, double sigma_r)
  181. {
  182. const double Hs = floor(Log2(sigma_s)) - 1.0;
  183. const double Lr = 1.0 - sigma_r;
  184. return max(2, static_cast<int>(ceil(Hs * Lr)));
  185. }
  186. /*
  187. void ensureSizeIsEnough(int rows, int cols, int type, Mat& m)
  188. {
  189. if (m.empty() || m.type() != type || m.data != m.datastart)
  190. m.create(rows, cols, type);
  191. else
  192. {
  193. const size_t esz = m.elemSize();
  194. const ptrdiff_t delta2 = m.dataend - m.datastart;
  195. const size_t minstep = m.cols * esz;
  196. Size wholeSize;
  197. wholeSize.height = std::max(static_cast<int>((delta2 - minstep) / m.step + 1), m.rows);
  198. wholeSize.width = std::max(static_cast<int>((delta2 - m.step * (wholeSize.height - 1)) / esz), m.cols);
  199. if (wholeSize.height < rows || wholeSize.width < cols)
  200. m.create(rows, cols, type);
  201. else
  202. {
  203. m.cols = cols;
  204. m.rows = rows;
  205. }
  206. }
  207. }
  208. inline void ensureSizeIsEnough(Size size, int type, Mat& m)
  209. {
  210. ensureSizeIsEnough(size.height, size.width, type, m);
  211. }
  212. */
  213. template <typename T>
  214. inline void ensureSizeIsEnough(int rows, int cols, Mat_<T>& m)
  215. {
  216. if (m.empty() || m.data != m.datastart)
  217. m.create(rows, cols);
  218. else
  219. {
  220. const size_t esz = m.elemSize();
  221. const ptrdiff_t delta2 = m.dataend - m.datastart;
  222. const size_t minstep = m.cols * esz;
  223. Size wholeSize;
  224. wholeSize.height = std::max(static_cast<int>((delta2 - minstep) / m.step + 1), m.rows);
  225. wholeSize.width = std::max(static_cast<int>((delta2 - m.step * (wholeSize.height - 1)) / esz), m.cols);
  226. if (wholeSize.height < rows || wholeSize.width < cols)
  227. m.create(rows, cols);
  228. else
  229. {
  230. m.cols = cols;
  231. m.rows = rows;
  232. }
  233. }
  234. }
  235. template <typename T>
  236. inline void ensureSizeIsEnough(Size size, Mat_<T>& m)
  237. {
  238. ensureSizeIsEnough(size.height, size.width, m);
  239. }
  240. template <typename T>
  241. void h_filter(const Mat_<T>& src, Mat_<T>& dst, float sigma)
  242. {
  243. CV_DbgAssert( src.depth() == CV_32F );
  244. const float a = exp(-sqrt(2.0f) / sigma);
  245. ensureSizeIsEnough(src.size(), dst);
  246. for (int y = 0; y < src.rows; ++y)
  247. {
  248. const T* src_row = src[y];
  249. T* dst_row = dst[y];
  250. dst_row[0] = src_row[0];
  251. for (int x = 1; x < src.cols; ++x)
  252. {
  253. //dst_row[x] = src_row[x] + a * (src_row[x - 1] - src_row[x]);
  254. dst_row[x] = src_row[x] + a * (dst_row[x - 1] - src_row[x]); //!!!
  255. }
  256. for (int x = src.cols - 2; x >= 0; --x)
  257. {
  258. dst_row[x] = dst_row[x] + a * (dst_row[x + 1] - dst_row[x]);
  259. }
  260. }
  261. for (int y = 1; y < src.rows; ++y)
  262. {
  263. T* dst_cur_row = dst[y];
  264. T* dst_prev_row = dst[y - 1];
  265. for (int x = 0; x < src.cols; ++x)
  266. {
  267. dst_cur_row[x] = dst_cur_row[x] + a * (dst_prev_row[x] - dst_cur_row[x]);
  268. }
  269. }
  270. for (int y = src.rows - 2; y >= 0; --y)
  271. {
  272. T* dst_cur_row = dst[y];
  273. T* dst_prev_row = dst[y + 1];
  274. for (int x = 0; x < src.cols; ++x)
  275. {
  276. dst_cur_row[x] = dst_cur_row[x] + a * (dst_prev_row[x] - dst_cur_row[x]);
  277. }
  278. }
  279. }
  280. template <typename T>
  281. void rdivide(const Mat_<T>& a, const Mat_<float>& b, Mat_<T>& dst)
  282. {
  283. CV_DbgAssert( a.depth() == CV_32F );
  284. CV_DbgAssert( a.size() == b.size() );
  285. ensureSizeIsEnough(a.size(), dst);
  286. dst.setTo(0);
  287. for (int y = 0; y < a.rows; ++y)
  288. {
  289. const T* a_row = a[y];
  290. const float* b_row = b[y];
  291. T* dst_row = dst[y];
  292. for (int x = 0; x < a.cols; ++x)
  293. {
  294. //if (b_row[x] > numeric_limits<float>::epsilon())
  295. dst_row[x] = a_row[x] * (1.0f / b_row[x]);
  296. }
  297. }
  298. }
  299. template <typename T>
  300. void times(const Mat_<T>& a, const Mat_<float>& b, Mat_<T>& dst)
  301. {
  302. CV_DbgAssert( a.depth() == CV_32F );
  303. CV_DbgAssert( a.size() == b.size() );
  304. ensureSizeIsEnough(a.size(), dst);
  305. for (int y = 0; y < a.rows; ++y)
  306. {
  307. const T* a_row = a[y];
  308. const float* b_row = b[y];
  309. T* dst_row = dst[y];
  310. for (int x = 0; x < a.cols; ++x)
  311. {
  312. dst_row[x] = a_row[x] * b_row[x];
  313. }
  314. }
  315. }
  316. void AdaptiveManifoldFilterRefImpl::filter(InputArray _src, OutputArray _dst, InputArray _joint)
  317. {
  318. const Mat src = _src.getMat();
  319. const Mat src_joint = _joint.getMat();
  320. const Size srcSize = src.size();
  321. CV_Assert( src.type() == CV_8UC3 );
  322. CV_Assert( src_joint.empty() || (src_joint.type() == src.type() && src_joint.size() == srcSize) );
  323. ensureSizeIsEnough(srcSize, src_f_);
  324. src.convertTo(src_f_, src_f_.type(), 1.0 / 255.0);
  325. ensureSizeIsEnough(srcSize, sum_w_ki_Psi_blur_);
  326. sum_w_ki_Psi_blur_.setTo(Scalar::all(0));
  327. ensureSizeIsEnough(srcSize, sum_w_ki_Psi_blur_0_);
  328. sum_w_ki_Psi_blur_0_.setTo(Scalar::all(0));
  329. ensureSizeIsEnough(srcSize, min_pixel_dist_to_manifold_squared_);
  330. min_pixel_dist_to_manifold_squared_.setTo(Scalar::all(numeric_limits<float>::max()));
  331. // If the tree_height was not specified, compute it using Eq. (10) of our paper.
  332. cur_tree_height_ = tree_height_ > 0 ? tree_height_ : computeManifoldTreeHeight(sigma_s_, sigma_r_);
  333. // If no joint signal was specified, use the original signal
  334. ensureSizeIsEnough(srcSize, src_joint_f_);
  335. if (src_joint.empty())
  336. src_f_.copyTo(src_joint_f_);
  337. else
  338. src_joint.convertTo(src_joint_f_, src_joint_f_.type(), 1.0 / 255.0);
  339. // Use the center pixel as seed to random number generation.
  340. const double seedCoef = src_joint_f_(src_joint_f_.rows / 2, src_joint_f_.cols / 2).x;
  341. const uint64 baseCoef = numeric_limits<uint64>::max() / 0xFFFF;
  342. rng_.state = static_cast<uint64>(baseCoef*seedCoef);
  343. // Dividing the covariance matrix by 2 is equivalent to dividing the standard deviations by sqrt(2).
  344. sigma_r_over_sqrt_2_ = static_cast<float>(sigma_r_ / sqrt(2.0));
  345. // Algorithm 1, Step 1: compute the first manifold by low-pass filtering.
  346. h_filter(src_joint_f_, buf_.eta_1, static_cast<float>(sigma_s_));
  347. ensureSizeIsEnough(srcSize, buf_.cluster_1);
  348. buf_.cluster_1.setTo(Scalar::all(1));
  349. buf_.eta_minus.resize(cur_tree_height_);
  350. buf_.cluster_minus.resize(cur_tree_height_);
  351. buf_.eta_plus.resize(cur_tree_height_);
  352. buf_.cluster_plus.resize(cur_tree_height_);
  353. buildManifoldsAndPerformFiltering(buf_.eta_1, buf_.cluster_1, 1);
  354. // Compute the filter response by normalized convolution -- Eq. (4)
  355. rdivide(sum_w_ki_Psi_blur_, sum_w_ki_Psi_blur_0_, buf_.tilde_dst);
  356. if (!adjust_outliers_)
  357. {
  358. buf_.tilde_dst.convertTo(_dst, CV_8U, 255.0);
  359. }
  360. else
  361. {
  362. // Adjust the filter response for outlier pixels -- Eq. (10)
  363. ensureSizeIsEnough(srcSize, buf_.alpha);
  364. exp(min_pixel_dist_to_manifold_squared_ * (-0.5 / sigma_r_ / sigma_r_), buf_.alpha);
  365. ensureSizeIsEnough(srcSize, buf_.diff);
  366. subtract(buf_.tilde_dst, src_f_, buf_.diff);
  367. times(buf_.diff, buf_.alpha, buf_.diff);
  368. ensureSizeIsEnough(srcSize, buf_.dst);
  369. cv::add(src_f_, buf_.diff, buf_.dst); // TODO cvtest
  370. buf_.dst.convertTo(_dst, CV_8U, 255.0);
  371. }
  372. }
  373. inline double floor_to_power_of_two(double r)
  374. {
  375. return pow(2.0, floor(Log2(r)));
  376. }
  377. void channelsSum(const Mat_<Point3f>& src, Mat_<float>& dst)
  378. {
  379. ensureSizeIsEnough(src.size(), dst);
  380. for (int y = 0; y < src.rows; ++y)
  381. {
  382. const Point3f* src_row = src[y];
  383. float* dst_row = dst[y];
  384. for (int x = 0; x < src.cols; ++x)
  385. {
  386. const Point3f src_val = src_row[x];
  387. dst_row[x] = src_val.x + src_val.y + src_val.z;
  388. }
  389. }
  390. }
  391. void phi(const Mat_<float>& src, Mat_<float>& dst, float sigma)
  392. {
  393. ensureSizeIsEnough(src.size(), dst);
  394. for (int y = 0; y < dst.rows; ++y)
  395. {
  396. const float* src_row = src[y];
  397. float* dst_row = dst[y];
  398. for (int x = 0; x < dst.cols; ++x)
  399. {
  400. dst_row[x] = exp(-0.5f * src_row[x] / sigma / sigma);
  401. }
  402. }
  403. }
  404. void catCn(const Mat_<Point3f>& a, const Mat_<float>& b, Mat_<Vec4f>& dst)
  405. {
  406. ensureSizeIsEnough(a.size(), dst);
  407. for (int y = 0; y < a.rows; ++y)
  408. {
  409. const Point3f* a_row = a[y];
  410. const float* b_row = b[y];
  411. Vec4f* dst_row = dst[y];
  412. for (int x = 0; x < a.cols; ++x)
  413. {
  414. const Point3f a_val = a_row[x];
  415. const float b_val = b_row[x];
  416. dst_row[x] = Vec4f(a_val.x, a_val.y, a_val.z, b_val);
  417. }
  418. }
  419. }
  420. void diffY(const Mat_<Point3f>& src, Mat_<Point3f>& dst)
  421. {
  422. ensureSizeIsEnough(src.rows - 1, src.cols, dst);
  423. for (int y = 0; y < src.rows - 1; ++y)
  424. {
  425. const Point3f* src_cur_row = src[y];
  426. const Point3f* src_next_row = src[y + 1];
  427. Point3f* dst_row = dst[y];
  428. for (int x = 0; x < src.cols; ++x)
  429. {
  430. dst_row[x] = src_next_row[x] - src_cur_row[x];
  431. }
  432. }
  433. }
  434. void diffX(const Mat_<Point3f>& src, Mat_<Point3f>& dst)
  435. {
  436. ensureSizeIsEnough(src.rows, src.cols - 1, dst);
  437. for (int y = 0; y < src.rows; ++y)
  438. {
  439. const Point3f* src_row = src[y];
  440. Point3f* dst_row = dst[y];
  441. for (int x = 0; x < src.cols - 1; ++x)
  442. {
  443. dst_row[x] = src_row[x + 1] - src_row[x];
  444. }
  445. }
  446. }
  447. void TransformedDomainRecursiveFilter(const Mat_<Vec4f>& I, const Mat_<float>& DH, const Mat_<float>& DV, Mat_<Vec4f>& dst, float sigma, Buf& buf)
  448. {
  449. CV_DbgAssert( I.size() == DH.size() );
  450. const float a = exp(-sqrt(2.0f) / sigma);
  451. ensureSizeIsEnough(I.size(), dst);
  452. I.copyTo(dst);
  453. ensureSizeIsEnough(DH.size(), buf.V);
  454. for (int y = 0; y < DH.rows; ++y)
  455. {
  456. const float* D_row = DH[y];
  457. float* V_row = buf.V[y];
  458. for (int x = 0; x < DH.cols; ++x)
  459. {
  460. V_row[x] = pow(a, D_row[x]);
  461. }
  462. }
  463. for (int y = 0; y < I.rows; ++y)
  464. {
  465. const float* V_row = buf.V[y];
  466. Vec4f* dst_row = dst[y];
  467. for (int x = 1; x < I.cols; ++x)
  468. {
  469. Vec4f dst_cur_val = dst_row[x];
  470. const Vec4f dst_prev_val = dst_row[x - 1];
  471. const float V_val = V_row[x];
  472. dst_cur_val[0] += V_val * (dst_prev_val[0] - dst_cur_val[0]);
  473. dst_cur_val[1] += V_val * (dst_prev_val[1] - dst_cur_val[1]);
  474. dst_cur_val[2] += V_val * (dst_prev_val[2] - dst_cur_val[2]);
  475. dst_cur_val[3] += V_val * (dst_prev_val[3] - dst_cur_val[3]);
  476. dst_row[x] = dst_cur_val;
  477. }
  478. for (int x = I.cols - 2; x >= 0; --x)
  479. {
  480. Vec4f dst_cur_val = dst_row[x];
  481. const Vec4f dst_prev_val = dst_row[x + 1];
  482. //const float V_val = V_row[x];
  483. const float V_val = V_row[x+1];
  484. dst_cur_val[0] += V_val * (dst_prev_val[0] - dst_cur_val[0]);
  485. dst_cur_val[1] += V_val * (dst_prev_val[1] - dst_cur_val[1]);
  486. dst_cur_val[2] += V_val * (dst_prev_val[2] - dst_cur_val[2]);
  487. dst_cur_val[3] += V_val * (dst_prev_val[3] - dst_cur_val[3]);
  488. dst_row[x] = dst_cur_val;
  489. }
  490. }
  491. for (int y = 0; y < DV.rows; ++y)
  492. {
  493. const float* D_row = DV[y];
  494. float* V_row = buf.V[y];
  495. for (int x = 0; x < DV.cols; ++x)
  496. {
  497. V_row[x] = pow(a, D_row[x]);
  498. }
  499. }
  500. for (int y = 1; y < I.rows; ++y)
  501. {
  502. const float* V_row = buf.V[y];
  503. Vec4f* dst_cur_row = dst[y];
  504. Vec4f* dst_prev_row = dst[y - 1];
  505. for (int x = 0; x < I.cols; ++x)
  506. {
  507. Vec4f dst_cur_val = dst_cur_row[x];
  508. const Vec4f dst_prev_val = dst_prev_row[x];
  509. const float V_val = V_row[x];
  510. dst_cur_val[0] += V_val * (dst_prev_val[0] - dst_cur_val[0]);
  511. dst_cur_val[1] += V_val * (dst_prev_val[1] - dst_cur_val[1]);
  512. dst_cur_val[2] += V_val * (dst_prev_val[2] - dst_cur_val[2]);
  513. dst_cur_val[3] += V_val * (dst_prev_val[3] - dst_cur_val[3]);
  514. dst_cur_row[x] = dst_cur_val;
  515. }
  516. }
  517. for (int y = I.rows - 2; y >= 0; --y)
  518. {
  519. //const float* V_row = buf.V[y];
  520. const float* V_row = buf.V[y + 1];
  521. Vec4f* dst_cur_row = dst[y];
  522. Vec4f* dst_prev_row = dst[y + 1];
  523. for (int x = 0; x < I.cols; ++x)
  524. {
  525. Vec4f dst_cur_val = dst_cur_row[x];
  526. const Vec4f dst_prev_val = dst_prev_row[x];
  527. const float V_val = V_row[x];
  528. dst_cur_val[0] += V_val * (dst_prev_val[0] - dst_cur_val[0]);
  529. dst_cur_val[1] += V_val * (dst_prev_val[1] - dst_cur_val[1]);
  530. dst_cur_val[2] += V_val * (dst_prev_val[2] - dst_cur_val[2]);
  531. dst_cur_val[3] += V_val * (dst_prev_val[3] - dst_cur_val[3]);
  532. dst_cur_row[x] = dst_cur_val;
  533. }
  534. }
  535. }
  536. void RF_filter(const Mat_<Vec4f>& src, const Mat_<Point3f>& src_joint, Mat_<Vec4f>& dst, float sigma_s, float sigma_r, Buf& buf)
  537. {
  538. CV_DbgAssert( src_joint.size() == src.size() );
  539. diffX(src_joint, buf.dIcdx);
  540. diffY(src_joint, buf.dIcdy);
  541. ensureSizeIsEnough(src.size(), buf.dIdx);
  542. buf.dIdx.setTo(Scalar::all(0));
  543. for (int y = 0; y < src.rows; ++y)
  544. {
  545. const Point3f* dIcdx_row = buf.dIcdx[y];
  546. float* dIdx_row = buf.dIdx[y];
  547. for (int x = 1; x < src.cols; ++x)
  548. {
  549. const Point3f val = dIcdx_row[x - 1];
  550. dIdx_row[x] = val.dot(val);
  551. }
  552. }
  553. ensureSizeIsEnough(src.size(), buf.dIdy);
  554. buf.dIdy.setTo(Scalar::all(0));
  555. for (int y = 1; y < src.rows; ++y)
  556. {
  557. const Point3f* dIcdy_row = buf.dIcdy[y - 1];
  558. float* dIdy_row = buf.dIdy[y];
  559. for (int x = 0; x < src.cols; ++x)
  560. {
  561. const Point3f val = dIcdy_row[x];
  562. dIdy_row[x] = val.dot(val);
  563. }
  564. }
  565. ensureSizeIsEnough(buf.dIdx.size(), buf.dHdx);
  566. buf.dIdx.convertTo(buf.dHdx, buf.dHdx.type(), (sigma_s / sigma_r) * (sigma_s / sigma_r), (sigma_s / sigma_s) * (sigma_s / sigma_s));
  567. sqrt(buf.dHdx, buf.dHdx);
  568. ensureSizeIsEnough(buf.dIdy.size(), buf.dVdy);
  569. buf.dIdy.convertTo(buf.dVdy, buf.dVdy.type(), (sigma_s / sigma_r) * (sigma_s / sigma_r), (sigma_s / sigma_s) * (sigma_s / sigma_s));
  570. sqrt(buf.dVdy, buf.dVdy);
  571. ensureSizeIsEnough(src.size(), dst);
  572. src.copyTo(dst);
  573. TransformedDomainRecursiveFilter(src, buf.dHdx, buf.dVdy, dst, sigma_s, buf);
  574. }
  575. void split_3_1(const Mat_<Vec4f>& src, Mat_<Point3f>& dst1, Mat_<float>& dst2)
  576. {
  577. ensureSizeIsEnough(src.size(), dst1);
  578. ensureSizeIsEnough(src.size(), dst2);
  579. for (int y = 0; y < src.rows; ++y)
  580. {
  581. const Vec4f* src_row = src[y];
  582. Point3f* dst1_row = dst1[y];
  583. float* dst2_row = dst2[y];
  584. for (int x = 0; x < src.cols; ++x)
  585. {
  586. Vec4f val = src_row[x];
  587. dst1_row[x] = Point3f(val[0], val[1], val[2]);
  588. dst2_row[x] = val[3];
  589. }
  590. }
  591. }
  592. void computeEigenVector(const Mat_<float>& X, const Mat_<uchar>& mask, Mat_<float>& dst, int num_pca_iterations, const Mat_<float>& rand_vec, Buf& buf)
  593. {
  594. CV_DbgAssert( X.cols == rand_vec.cols );
  595. CV_DbgAssert( X.rows == mask.size().area() );
  596. CV_DbgAssert( rand_vec.rows == 1 );
  597. ensureSizeIsEnough(rand_vec.size(), dst);
  598. rand_vec.copyTo(dst);
  599. ensureSizeIsEnough(X.size(), buf.t);
  600. float* dst_row = dst[0];
  601. for (int i = 0; i < num_pca_iterations; ++i)
  602. {
  603. buf.t.setTo(Scalar::all(0));
  604. for (int y = 0, ind = 0; y < mask.rows; ++y)
  605. {
  606. const uchar* mask_row = mask[y];
  607. for (int x = 0; x < mask.cols; ++x, ++ind)
  608. {
  609. if (mask_row[x])
  610. {
  611. const float* X_row = X[ind];
  612. float* t_row = buf.t[ind];
  613. float dots = 0.0;
  614. for (int c = 0; c < X.cols; ++c)
  615. dots += dst_row[c] * X_row[c];
  616. for (int c = 0; c < X.cols; ++c)
  617. t_row[c] = dots * X_row[c];
  618. }
  619. }
  620. }
  621. dst.setTo(0.0);
  622. for (int k = 0; k < X.rows; ++k)
  623. {
  624. const float* t_row = buf.t[k];
  625. for (int c = 0; c < X.cols; ++c)
  626. {
  627. dst_row[c] += t_row[c];
  628. }
  629. }
  630. }
  631. double n = cvtest::norm(dst, NORM_L2);
  632. cv::divide(dst, n, dst); // TODO cvtest
  633. }
  634. void calcEta(const Mat_<Point3f>& src_joint_f, const Mat_<float>& theta, const Mat_<uchar>& cluster, Mat_<Point3f>& dst, float sigma_s, float df, Buf& buf)
  635. {
  636. ensureSizeIsEnough(theta.size(), buf.theta_masked);
  637. buf.theta_masked.setTo(Scalar::all(0));
  638. theta.copyTo(buf.theta_masked, cluster);
  639. times(src_joint_f, buf.theta_masked, buf.mul);
  640. const Size nsz = Size(saturate_cast<int>(buf.mul.cols * (1.0 / df)), saturate_cast<int>(buf.mul.rows * (1.0 / df)));
  641. ensureSizeIsEnough(nsz, buf.numerator);
  642. resize(buf.mul, buf.numerator, Size(), 1.0 / df, 1.0 / df);
  643. ensureSizeIsEnough(nsz, buf.denominator);
  644. resize(buf.theta_masked, buf.denominator, Size(), 1.0 / df, 1.0 / df);
  645. h_filter(buf.numerator, buf.numerator_filtered, sigma_s / df);
  646. h_filter(buf.denominator, buf.denominator_filtered, sigma_s / df);
  647. rdivide(buf.numerator_filtered, buf.denominator_filtered, dst);
  648. }
  649. void AdaptiveManifoldFilterRefImpl::buildManifoldsAndPerformFiltering(const Mat_<Point3f>& eta_k, const Mat_<uchar>& cluster_k, int current_tree_level)
  650. {
  651. // Compute downsampling factor
  652. double df = min(sigma_s_ / 4.0, 256.0 * sigma_r_);
  653. df = floor_to_power_of_two(df);
  654. df = max(1.0, df);
  655. // Splatting: project the pixel values onto the current manifold eta_k
  656. if (eta_k.rows == src_joint_f_.rows)
  657. {
  658. ensureSizeIsEnough(src_joint_f_.size(), buf_.X);
  659. subtract(src_joint_f_, eta_k, buf_.X);
  660. const Size nsz = Size(saturate_cast<int>(eta_k.cols * (1.0 / df)), saturate_cast<int>(eta_k.rows * (1.0 / df)));
  661. ensureSizeIsEnough(nsz, buf_.eta_k_small);
  662. resize(eta_k, buf_.eta_k_small, Size(), 1.0 / df, 1.0 / df);
  663. }
  664. else
  665. {
  666. ensureSizeIsEnough(eta_k.size(), buf_.eta_k_small);
  667. eta_k.copyTo(buf_.eta_k_small);
  668. ensureSizeIsEnough(src_joint_f_.size(), buf_.eta_k_big);
  669. resize(eta_k, buf_.eta_k_big, src_joint_f_.size());
  670. ensureSizeIsEnough(src_joint_f_.size(), buf_.X);
  671. subtract(src_joint_f_, buf_.eta_k_big, buf_.X);
  672. }
  673. // Project pixel colors onto the manifold -- Eq. (3), Eq. (5)
  674. ensureSizeIsEnough(buf_.X.size(), buf_.X_squared);
  675. cv::multiply(buf_.X, buf_.X, buf_.X_squared); // TODO cvtest
  676. channelsSum(buf_.X_squared, buf_.pixel_dist_to_manifold_squared);
  677. phi(buf_.pixel_dist_to_manifold_squared, buf_.gaussian_distance_weights, sigma_r_over_sqrt_2_);
  678. times(src_f_, buf_.gaussian_distance_weights, buf_.Psi_splat);
  679. const Mat_<float>& Psi_splat_0 = buf_.gaussian_distance_weights;
  680. // Save min distance to later perform adjustment of outliers -- Eq. (10)
  681. if (adjust_outliers_)
  682. {
  683. cv::min(_InputArray(min_pixel_dist_to_manifold_squared_), _InputArray(buf_.pixel_dist_to_manifold_squared), _OutputArray(min_pixel_dist_to_manifold_squared_));
  684. }
  685. // Blurring: perform filtering over the current manifold eta_k
  686. catCn(buf_.Psi_splat, Psi_splat_0, buf_.Psi_splat_joined);
  687. ensureSizeIsEnough(buf_.eta_k_small.size(), buf_.Psi_splat_joined_resized);
  688. resize(buf_.Psi_splat_joined, buf_.Psi_splat_joined_resized, buf_.eta_k_small.size());
  689. RF_filter(buf_.Psi_splat_joined_resized, buf_.eta_k_small, buf_.blurred_projected_values, static_cast<float>(sigma_s_ / df), sigma_r_over_sqrt_2_, buf_);
  690. split_3_1(buf_.blurred_projected_values, buf_.w_ki_Psi_blur, buf_.w_ki_Psi_blur_0);
  691. // Slicing: gather blurred values from the manifold
  692. // Since we perform splatting and slicing at the same points over the manifolds,
  693. // the interpolation weights are equal to the gaussian weights used for splatting.
  694. const Mat_<float>& w_ki = buf_.gaussian_distance_weights;
  695. ensureSizeIsEnough(src_f_.size(), buf_.w_ki_Psi_blur_resized);
  696. resize(buf_.w_ki_Psi_blur, buf_.w_ki_Psi_blur_resized, src_f_.size());
  697. times(buf_.w_ki_Psi_blur_resized, w_ki, buf_.w_ki_Psi_blur_resized);
  698. cv::add(sum_w_ki_Psi_blur_, buf_.w_ki_Psi_blur_resized, sum_w_ki_Psi_blur_); // TODO cvtest
  699. ensureSizeIsEnough(src_f_.size(), buf_.w_ki_Psi_blur_0_resized);
  700. resize(buf_.w_ki_Psi_blur_0, buf_.w_ki_Psi_blur_0_resized, src_f_.size());
  701. times(buf_.w_ki_Psi_blur_0_resized, w_ki, buf_.w_ki_Psi_blur_0_resized);
  702. cv::add(sum_w_ki_Psi_blur_0_, buf_.w_ki_Psi_blur_0_resized, sum_w_ki_Psi_blur_0_); // TODO cvtest
  703. // Compute two new manifolds eta_minus and eta_plus
  704. if (current_tree_level < cur_tree_height_)
  705. {
  706. // Algorithm 1, Step 2: compute the eigenvector v1
  707. const Mat_<float> nX(src_joint_f_.size().area(), 3, (float*) buf_.X.data);
  708. ensureSizeIsEnough(1, nX.cols, buf_.rand_vec);
  709. if (useRNG)
  710. {
  711. rng_.fill(buf_.rand_vec, RNG::UNIFORM, -0.5, 0.5);
  712. }
  713. else
  714. {
  715. for (int i = 0; i < (int)buf_.rand_vec.total(); i++)
  716. buf_.rand_vec(0, i) = (i % 2 == 0) ? 0.5f : -0.5f;
  717. }
  718. computeEigenVector(nX, cluster_k, buf_.v1, num_pca_iterations_, buf_.rand_vec, buf_);
  719. // Algorithm 1, Step 3: Segment pixels into two clusters -- Eq. (6)
  720. ensureSizeIsEnough(nX.rows, buf_.v1.rows, buf_.Nx_v1_mult);
  721. gemm(nX, buf_.v1, 1.0, noArray(), 0.0, buf_.Nx_v1_mult, GEMM_2_T);
  722. const Mat_<float> dot(src_joint_f_.rows, src_joint_f_.cols, (float*) buf_.Nx_v1_mult.data);
  723. Mat_<uchar>& cluster_minus = buf_.cluster_minus[current_tree_level];
  724. ensureSizeIsEnough(dot.size(), cluster_minus);
  725. cvtest::compare(dot, 0, cluster_minus, CMP_LT);
  726. bitwise_and(cluster_minus, cluster_k, cluster_minus);
  727. Mat_<uchar>& cluster_plus = buf_.cluster_plus[current_tree_level];
  728. ensureSizeIsEnough(dot.size(), cluster_plus);
  729. //compare(dot, 0, cluster_plus, CMP_GT);
  730. cvtest::compare(dot, 0, cluster_plus, CMP_GE);
  731. bitwise_and(cluster_plus, cluster_k, cluster_plus);
  732. // Algorithm 1, Step 4: Compute new manifolds by weighted low-pass filtering -- Eq. (7-8)
  733. ensureSizeIsEnough(w_ki.size(), buf_.theta);
  734. buf_.theta.setTo(Scalar::all(1.0));
  735. subtract(buf_.theta, w_ki, buf_.theta);
  736. Mat_<Point3f>& eta_minus = buf_.eta_minus[current_tree_level];
  737. calcEta(src_joint_f_, buf_.theta, cluster_minus, eta_minus, (float)sigma_s_, (float)df, buf_);
  738. Mat_<Point3f>& eta_plus = buf_.eta_plus[current_tree_level];
  739. calcEta(src_joint_f_, buf_.theta, cluster_plus, eta_plus, (float)sigma_s_, (float)df, buf_);
  740. // Algorithm 1, Step 5: recursively build more manifolds.
  741. buildManifoldsAndPerformFiltering(eta_minus, cluster_minus, current_tree_level + 1);
  742. buildManifoldsAndPerformFiltering(eta_plus, cluster_plus, current_tree_level + 1);
  743. }
  744. }
  745. } // namespace
  746. Ptr<AdaptiveManifoldFilter> createAMFilterRefImpl(double sigma_s, double sigma_r, bool adjust_outliers)
  747. {
  748. Ptr<AdaptiveManifoldFilter> amf(new AdaptiveManifoldFilterRefImpl());
  749. amf->setSigmaS(sigma_s);
  750. amf->setSigmaR(sigma_r);
  751. amf->setAdjustOutliers(adjust_outliers);
  752. return amf;
  753. }
  754. } // namespace