linemod.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705
  1. #include <opencv2/core.hpp>
  2. #include <opencv2/core/utility.hpp>
  3. #include <opencv2/imgproc/imgproc_c.h> // cvFindContours
  4. #include <opencv2/imgproc.hpp>
  5. #include <opencv2/rgbd.hpp>
  6. #include <opencv2/videoio.hpp>
  7. #include <opencv2/highgui.hpp>
  8. #include <iterator>
  9. #include <set>
  10. #include <cstdio>
  11. #include <iostream>
  12. // Function prototypes
  13. void subtractPlane(const cv::Mat& depth, cv::Mat& mask, std::vector<CvPoint>& chain, double f);
  14. std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
  15. int num_modalities, cv::Point offset, cv::Size size,
  16. cv::Mat& mask, cv::Mat& dst);
  17. void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
  18. int num_modalities, cv::Point offset, cv::Size size,
  19. cv::Mat& dst);
  20. void drawResponse(const std::vector<cv::linemod::Template>& templates,
  21. int num_modalities, cv::Mat& dst, cv::Point offset, int T);
  22. cv::Mat displayQuantized(const cv::Mat& quantized);
  23. // Copy of cv_mouse from cv_utilities
  24. class Mouse
  25. {
  26. public:
  27. static void start(const std::string& a_img_name)
  28. {
  29. cv::setMouseCallback(a_img_name.c_str(), Mouse::cv_on_mouse, 0);
  30. }
  31. static int event(void)
  32. {
  33. int l_event = m_event;
  34. m_event = -1;
  35. return l_event;
  36. }
  37. static int x(void)
  38. {
  39. return m_x;
  40. }
  41. static int y(void)
  42. {
  43. return m_y;
  44. }
  45. private:
  46. static void cv_on_mouse(int a_event, int a_x, int a_y, int, void *)
  47. {
  48. m_event = a_event;
  49. m_x = a_x;
  50. m_y = a_y;
  51. }
  52. static int m_event;
  53. static int m_x;
  54. static int m_y;
  55. };
  56. int Mouse::m_event;
  57. int Mouse::m_x;
  58. int Mouse::m_y;
  59. static void help()
  60. {
  61. printf("Usage: example_rgbd_linemod [templates.yml]\n\n"
  62. "Place your object on a planar, featureless surface. With the mouse,\n"
  63. "frame it in the 'color' window and right click to learn a first template.\n"
  64. "Then press 'l' to enter online learning mode, and move the camera around.\n"
  65. "When the match score falls between 90-95%% the demo will add a new template.\n\n"
  66. "Keys:\n"
  67. "\t h -- This help page\n"
  68. "\t l -- Toggle online learning\n"
  69. "\t m -- Toggle printing match result\n"
  70. "\t t -- Toggle printing timings\n"
  71. "\t w -- Write learned templates to disk\n"
  72. "\t [ ] -- Adjust matching threshold: '[' down, ']' up\n"
  73. "\t q -- Quit\n\n");
  74. }
  75. // Adapted from cv_timer in cv_utilities
  76. class Timer
  77. {
  78. public:
  79. Timer() : start_(0), time_(0) {}
  80. void start()
  81. {
  82. start_ = cv::getTickCount();
  83. }
  84. void stop()
  85. {
  86. CV_Assert(start_ != 0);
  87. int64 end = cv::getTickCount();
  88. time_ += end - start_;
  89. start_ = 0;
  90. }
  91. double time()
  92. {
  93. double ret = time_ / cv::getTickFrequency();
  94. time_ = 0;
  95. return ret;
  96. }
  97. private:
  98. int64 start_, time_;
  99. };
  100. // Functions to store detector and templates in single XML/YAML file
  101. static cv::Ptr<cv::linemod::Detector> readLinemod(const std::string& filename)
  102. {
  103. cv::Ptr<cv::linemod::Detector> detector = cv::makePtr<cv::linemod::Detector>();
  104. cv::FileStorage fs(filename, cv::FileStorage::READ);
  105. detector->read(fs.root());
  106. cv::FileNode fn = fs["classes"];
  107. for (cv::FileNodeIterator i = fn.begin(), iend = fn.end(); i != iend; ++i)
  108. detector->readClass(*i);
  109. return detector;
  110. }
  111. static void writeLinemod(const cv::Ptr<cv::linemod::Detector>& detector, const std::string& filename)
  112. {
  113. cv::FileStorage fs(filename, cv::FileStorage::WRITE);
  114. detector->write(fs);
  115. std::vector<cv::String> ids = detector->classIds();
  116. fs << "classes" << "[";
  117. for (int i = 0; i < (int)ids.size(); ++i)
  118. {
  119. fs << "{";
  120. detector->writeClass(ids[i], fs);
  121. fs << "}"; // current class
  122. }
  123. fs << "]"; // classes
  124. }
  125. int main(int argc, char * argv[])
  126. {
  127. // Various settings and flags
  128. bool show_match_result = true;
  129. bool show_timings = false;
  130. bool learn_online = false;
  131. int num_classes = 0;
  132. int matching_threshold = 80;
  133. /// @todo Keys for changing these?
  134. cv::Size roi_size(200, 200);
  135. int learning_lower_bound = 90;
  136. int learning_upper_bound = 95;
  137. // Timers
  138. Timer extract_timer;
  139. Timer match_timer;
  140. // Initialize HighGUI
  141. help();
  142. cv::namedWindow("color");
  143. cv::namedWindow("normals");
  144. Mouse::start("color");
  145. // Initialize LINEMOD data structures
  146. cv::Ptr<cv::linemod::Detector> detector;
  147. std::string filename;
  148. if (argc == 1)
  149. {
  150. filename = "linemod_templates.yml";
  151. detector = cv::linemod::getDefaultLINEMOD();
  152. }
  153. else
  154. {
  155. detector = readLinemod(argv[1]);
  156. std::vector<cv::String> ids = detector->classIds();
  157. num_classes = detector->numClasses();
  158. printf("Loaded %s with %d classes and %d templates\n",
  159. argv[1], num_classes, detector->numTemplates());
  160. if (!ids.empty())
  161. {
  162. printf("Class ids:\n");
  163. std::copy(ids.begin(), ids.end(), std::ostream_iterator<std::string>(std::cout, "\n"));
  164. }
  165. }
  166. int num_modalities = (int)detector->getModalities().size();
  167. // Open Kinect sensor
  168. cv::VideoCapture capture( cv::CAP_OPENNI );
  169. if (!capture.isOpened())
  170. {
  171. printf("Could not open OpenNI-capable sensor\n");
  172. return -1;
  173. }
  174. capture.set(cv::CAP_PROP_OPENNI_REGISTRATION, 1);
  175. double focal_length = capture.get(cv::CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH);
  176. //printf("Focal length = %f\n", focal_length);
  177. // Main loop
  178. cv::Mat color, depth;
  179. for(;;)
  180. {
  181. // Capture next color/depth pair
  182. capture.grab();
  183. capture.retrieve(depth, cv::CAP_OPENNI_DEPTH_MAP);
  184. capture.retrieve(color, cv::CAP_OPENNI_BGR_IMAGE);
  185. std::vector<cv::Mat> sources;
  186. sources.push_back(color);
  187. sources.push_back(depth);
  188. cv::Mat display = color.clone();
  189. if (!learn_online)
  190. {
  191. cv::Point mouse(Mouse::x(), Mouse::y());
  192. int event = Mouse::event();
  193. // Compute ROI centered on current mouse location
  194. cv::Point roi_offset(roi_size.width / 2, roi_size.height / 2);
  195. cv::Point pt1 = mouse - roi_offset; // top left
  196. cv::Point pt2 = mouse + roi_offset; // bottom right
  197. if (event == cv::EVENT_RBUTTONDOWN)
  198. {
  199. // Compute object mask by subtracting the plane within the ROI
  200. std::vector<CvPoint> chain(4);
  201. chain[0] = cvPoint(pt1);
  202. chain[1] = cvPoint(pt2.x, pt1.y);
  203. chain[2] = cvPoint(pt2);
  204. chain[3] = cvPoint(pt1.x, pt2.y);
  205. cv::Mat mask;
  206. subtractPlane(depth, mask, chain, focal_length);
  207. cv::imshow("mask", mask);
  208. // Extract template
  209. std::string class_id = cv::format("class%d", num_classes);
  210. cv::Rect bb;
  211. extract_timer.start();
  212. int template_id = detector->addTemplate(sources, class_id, mask, &bb);
  213. extract_timer.stop();
  214. if (template_id != -1)
  215. {
  216. printf("*** Added template (id %d) for new object class %d***\n",
  217. template_id, num_classes);
  218. //printf("Extracted at (%d, %d) size %dx%d\n", bb.x, bb.y, bb.width, bb.height);
  219. }
  220. ++num_classes;
  221. }
  222. // Draw ROI for display
  223. cv::rectangle(display, pt1, pt2, CV_RGB(0,0,0), 3);
  224. cv::rectangle(display, pt1, pt2, CV_RGB(255,255,0), 1);
  225. }
  226. // Perform matching
  227. std::vector<cv::linemod::Match> matches;
  228. std::vector<cv::String> class_ids;
  229. std::vector<cv::Mat> quantized_images;
  230. match_timer.start();
  231. detector->match(sources, (float)matching_threshold, matches, class_ids, quantized_images);
  232. match_timer.stop();
  233. int classes_visited = 0;
  234. std::set<std::string> visited;
  235. for (int i = 0; (i < (int)matches.size()) && (classes_visited < num_classes); ++i)
  236. {
  237. cv::linemod::Match m = matches[i];
  238. if (visited.insert(m.class_id).second)
  239. {
  240. ++classes_visited;
  241. if (show_match_result)
  242. {
  243. printf("Similarity: %5.1f%%; x: %3d; y: %3d; class: %s; template: %3d\n",
  244. m.similarity, m.x, m.y, m.class_id.c_str(), m.template_id);
  245. }
  246. // Draw matching template
  247. const std::vector<cv::linemod::Template>& templates = detector->getTemplates(m.class_id, m.template_id);
  248. drawResponse(templates, num_modalities, display, cv::Point(m.x, m.y), detector->getT(0));
  249. if (learn_online == true)
  250. {
  251. /// @todo Online learning possibly broken by new gradient feature extraction,
  252. /// which assumes an accurate object outline.
  253. // Compute masks based on convex hull of matched template
  254. cv::Mat color_mask, depth_mask;
  255. std::vector<CvPoint> chain = maskFromTemplate(templates, num_modalities,
  256. cv::Point(m.x, m.y), color.size(),
  257. color_mask, display);
  258. subtractPlane(depth, depth_mask, chain, focal_length);
  259. cv::imshow("mask", depth_mask);
  260. // If pretty sure (but not TOO sure), add new template
  261. if (learning_lower_bound < m.similarity && m.similarity < learning_upper_bound)
  262. {
  263. extract_timer.start();
  264. int template_id = detector->addTemplate(sources, m.class_id, depth_mask);
  265. extract_timer.stop();
  266. if (template_id != -1)
  267. {
  268. printf("*** Added template (id %d) for existing object class %s***\n",
  269. template_id, m.class_id.c_str());
  270. }
  271. }
  272. }
  273. }
  274. }
  275. if (show_match_result && matches.empty())
  276. printf("No matches found...\n");
  277. if (show_timings)
  278. {
  279. printf("Training: %.2fs\n", extract_timer.time());
  280. printf("Matching: %.2fs\n", match_timer.time());
  281. }
  282. if (show_match_result || show_timings)
  283. printf("------------------------------------------------------------\n");
  284. cv::imshow("color", display);
  285. cv::imshow("normals", quantized_images[1]);
  286. cv::FileStorage fs;
  287. char key = (char)cv::waitKey(10);
  288. if( key == 'q' )
  289. break;
  290. switch (key)
  291. {
  292. case 'h':
  293. help();
  294. break;
  295. case 'm':
  296. // toggle printing match result
  297. show_match_result = !show_match_result;
  298. printf("Show match result %s\n", show_match_result ? "ON" : "OFF");
  299. break;
  300. case 't':
  301. // toggle printing timings
  302. show_timings = !show_timings;
  303. printf("Show timings %s\n", show_timings ? "ON" : "OFF");
  304. break;
  305. case 'l':
  306. // toggle online learning
  307. learn_online = !learn_online;
  308. printf("Online learning %s\n", learn_online ? "ON" : "OFF");
  309. break;
  310. case '[':
  311. // decrement threshold
  312. matching_threshold = std::max(matching_threshold - 1, -100);
  313. printf("New threshold: %d\n", matching_threshold);
  314. break;
  315. case ']':
  316. // increment threshold
  317. matching_threshold = std::min(matching_threshold + 1, +100);
  318. printf("New threshold: %d\n", matching_threshold);
  319. break;
  320. case 'w':
  321. // write model to disk
  322. writeLinemod(detector, filename);
  323. printf("Wrote detector and templates to %s\n", filename.c_str());
  324. break;
  325. default:
  326. ;
  327. }
  328. }
  329. return 0;
  330. }
  331. static void reprojectPoints(const std::vector<cv::Point3d>& proj, std::vector<cv::Point3d>& real, double f)
  332. {
  333. real.resize(proj.size());
  334. double f_inv = 1.0 / f;
  335. for (int i = 0; i < (int)proj.size(); ++i)
  336. {
  337. double Z = proj[i].z;
  338. real[i].x = (proj[i].x - 320.) * (f_inv * Z);
  339. real[i].y = (proj[i].y - 240.) * (f_inv * Z);
  340. real[i].z = Z;
  341. }
  342. }
  343. static void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::vector<CvPoint> & a_chain, double f)
  344. {
  345. const int l_num_cost_pts = 200;
  346. float l_thres = 4;
  347. IplImage * lp_mask = cvCreateImage(cvGetSize(ap_depth), IPL_DEPTH_8U, 1);
  348. cvSet(lp_mask, cvRealScalar(0));
  349. std::vector<CvPoint> l_chain_vector;
  350. float l_chain_length = 0;
  351. float * lp_seg_length = new float[a_chain.size()];
  352. for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
  353. {
  354. float x_diff = (float)(a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x);
  355. float y_diff = (float)(a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y);
  356. lp_seg_length[l_i] = sqrt(x_diff*x_diff + y_diff*y_diff);
  357. l_chain_length += lp_seg_length[l_i];
  358. }
  359. for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
  360. {
  361. if (lp_seg_length[l_i] > 0)
  362. {
  363. int l_cur_num = cvRound(l_num_cost_pts * lp_seg_length[l_i] / l_chain_length);
  364. float l_cur_len = lp_seg_length[l_i] / l_cur_num;
  365. for (int l_j = 0; l_j < l_cur_num; ++l_j)
  366. {
  367. float l_ratio = (l_cur_len * l_j / lp_seg_length[l_i]);
  368. CvPoint l_pts;
  369. l_pts.x = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].x - a_chain[l_i].x) + a_chain[l_i].x);
  370. l_pts.y = cvRound(l_ratio * (a_chain[(l_i + 1) % a_chain.size()].y - a_chain[l_i].y) + a_chain[l_i].y);
  371. l_chain_vector.push_back(l_pts);
  372. }
  373. }
  374. }
  375. std::vector<cv::Point3d> lp_src_3Dpts(l_chain_vector.size());
  376. for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
  377. {
  378. lp_src_3Dpts[l_i].x = l_chain_vector[l_i].x;
  379. lp_src_3Dpts[l_i].y = l_chain_vector[l_i].y;
  380. lp_src_3Dpts[l_i].z = CV_IMAGE_ELEM(ap_depth, unsigned short, cvRound(lp_src_3Dpts[l_i].y), cvRound(lp_src_3Dpts[l_i].x));
  381. //CV_IMAGE_ELEM(lp_mask,unsigned char,(int)lp_src_3Dpts[l_i].Y,(int)lp_src_3Dpts[l_i].X)=255;
  382. }
  383. //cv_show_image(lp_mask,"hallo2");
  384. reprojectPoints(lp_src_3Dpts, lp_src_3Dpts, f);
  385. CvMat * lp_pts = cvCreateMat((int)l_chain_vector.size(), 4, CV_32F);
  386. CvMat * lp_v = cvCreateMat(4, 4, CV_32F);
  387. CvMat * lp_w = cvCreateMat(4, 1, CV_32F);
  388. for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
  389. {
  390. CV_MAT_ELEM(*lp_pts, float, l_i, 0) = (float)lp_src_3Dpts[l_i].x;
  391. CV_MAT_ELEM(*lp_pts, float, l_i, 1) = (float)lp_src_3Dpts[l_i].y;
  392. CV_MAT_ELEM(*lp_pts, float, l_i, 2) = (float)lp_src_3Dpts[l_i].z;
  393. CV_MAT_ELEM(*lp_pts, float, l_i, 3) = 1.0f;
  394. }
  395. cvSVD(lp_pts, lp_w, 0, lp_v);
  396. float l_n[4] = {CV_MAT_ELEM(*lp_v, float, 0, 3),
  397. CV_MAT_ELEM(*lp_v, float, 1, 3),
  398. CV_MAT_ELEM(*lp_v, float, 2, 3),
  399. CV_MAT_ELEM(*lp_v, float, 3, 3)};
  400. float l_norm = sqrt(l_n[0] * l_n[0] + l_n[1] * l_n[1] + l_n[2] * l_n[2]);
  401. l_n[0] /= l_norm;
  402. l_n[1] /= l_norm;
  403. l_n[2] /= l_norm;
  404. l_n[3] /= l_norm;
  405. float l_max_dist = 0;
  406. for (int l_i = 0; l_i < (int)l_chain_vector.size(); ++l_i)
  407. {
  408. float l_dist = l_n[0] * CV_MAT_ELEM(*lp_pts, float, l_i, 0) +
  409. l_n[1] * CV_MAT_ELEM(*lp_pts, float, l_i, 1) +
  410. l_n[2] * CV_MAT_ELEM(*lp_pts, float, l_i, 2) +
  411. l_n[3] * CV_MAT_ELEM(*lp_pts, float, l_i, 3);
  412. if (fabs(l_dist) > l_max_dist)
  413. l_max_dist = l_dist;
  414. }
  415. //std::cerr << "plane: " << l_n[0] << ";" << l_n[1] << ";" << l_n[2] << ";" << l_n[3] << " maxdist: " << l_max_dist << " end" << std::endl;
  416. int l_minx = ap_depth->width;
  417. int l_miny = ap_depth->height;
  418. int l_maxx = 0;
  419. int l_maxy = 0;
  420. for (int l_i = 0; l_i < (int)a_chain.size(); ++l_i)
  421. {
  422. l_minx = std::min(l_minx, a_chain[l_i].x);
  423. l_miny = std::min(l_miny, a_chain[l_i].y);
  424. l_maxx = std::max(l_maxx, a_chain[l_i].x);
  425. l_maxy = std::max(l_maxy, a_chain[l_i].y);
  426. }
  427. int l_w = l_maxx - l_minx + 1;
  428. int l_h = l_maxy - l_miny + 1;
  429. int l_nn = (int)a_chain.size();
  430. CvPoint * lp_chain = new CvPoint[l_nn];
  431. for (int l_i = 0; l_i < l_nn; ++l_i)
  432. lp_chain[l_i] = a_chain[l_i];
  433. cvFillPoly(lp_mask, &lp_chain, &l_nn, 1, cvScalar(255, 255, 255));
  434. delete[] lp_chain;
  435. //cv_show_image(lp_mask,"hallo1");
  436. std::vector<cv::Point3d> lp_dst_3Dpts(l_h * l_w);
  437. int l_ind = 0;
  438. for (int l_r = 0; l_r < l_h; ++l_r)
  439. {
  440. for (int l_c = 0; l_c < l_w; ++l_c)
  441. {
  442. lp_dst_3Dpts[l_ind].x = l_c + l_minx;
  443. lp_dst_3Dpts[l_ind].y = l_r + l_miny;
  444. lp_dst_3Dpts[l_ind].z = CV_IMAGE_ELEM(ap_depth, unsigned short, l_r + l_miny, l_c + l_minx);
  445. ++l_ind;
  446. }
  447. }
  448. reprojectPoints(lp_dst_3Dpts, lp_dst_3Dpts, f);
  449. l_ind = 0;
  450. for (int l_r = 0; l_r < l_h; ++l_r)
  451. {
  452. for (int l_c = 0; l_c < l_w; ++l_c)
  453. {
  454. float l_dist = (float)(l_n[0] * lp_dst_3Dpts[l_ind].x + l_n[1] * lp_dst_3Dpts[l_ind].y + lp_dst_3Dpts[l_ind].z * l_n[2] + l_n[3]);
  455. ++l_ind;
  456. if (CV_IMAGE_ELEM(lp_mask, unsigned char, l_r + l_miny, l_c + l_minx) != 0)
  457. {
  458. if (fabs(l_dist) < std::max(l_thres, (l_max_dist * 2.0f)))
  459. {
  460. for (int l_p = 0; l_p < (int)a_masks.size(); ++l_p)
  461. {
  462. int l_col = cvRound((l_c + l_minx) / (l_p + 1.0));
  463. int l_row = cvRound((l_r + l_miny) / (l_p + 1.0));
  464. CV_IMAGE_ELEM(a_masks[l_p], unsigned char, l_row, l_col) = 0;
  465. }
  466. }
  467. else
  468. {
  469. for (int l_p = 0; l_p < (int)a_masks.size(); ++l_p)
  470. {
  471. int l_col = cvRound((l_c + l_minx) / (l_p + 1.0));
  472. int l_row = cvRound((l_r + l_miny) / (l_p + 1.0));
  473. CV_IMAGE_ELEM(a_masks[l_p], unsigned char, l_row, l_col) = 255;
  474. }
  475. }
  476. }
  477. }
  478. }
  479. cvReleaseImage(&lp_mask);
  480. cvReleaseMat(&lp_pts);
  481. cvReleaseMat(&lp_w);
  482. cvReleaseMat(&lp_v);
  483. }
  484. void subtractPlane(const cv::Mat& depth, cv::Mat& mask, std::vector<CvPoint>& chain, double f)
  485. {
  486. mask = cv::Mat::zeros(depth.size(), CV_8U);
  487. std::vector<IplImage*> tmp;
  488. IplImage mask_ipl = cvIplImage(mask);
  489. tmp.push_back(&mask_ipl);
  490. IplImage depth_ipl = cvIplImage(depth);
  491. filterPlane(&depth_ipl, tmp, chain, f);
  492. }
  493. std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
  494. int num_modalities, cv::Point offset, cv::Size size,
  495. cv::Mat& mask, cv::Mat& dst)
  496. {
  497. templateConvexHull(templates, num_modalities, offset, size, mask);
  498. const int OFFSET = 30;
  499. cv::dilate(mask, mask, cv::Mat(), cv::Point(-1,-1), OFFSET);
  500. CvMemStorage * lp_storage = cvCreateMemStorage(0);
  501. CvTreeNodeIterator l_iterator;
  502. CvSeqReader l_reader;
  503. CvSeq * lp_contour = 0;
  504. cv::Mat mask_copy = mask.clone();
  505. IplImage mask_copy_ipl = cvIplImage(mask_copy);
  506. cvFindContours(&mask_copy_ipl, lp_storage, &lp_contour, sizeof(CvContour),
  507. CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
  508. std::vector<CvPoint> l_pts1; // to use as input to cv_primesensor::filter_plane
  509. cvInitTreeNodeIterator(&l_iterator, lp_contour, 1);
  510. while ((lp_contour = (CvSeq *)cvNextTreeNode(&l_iterator)) != 0)
  511. {
  512. CvPoint l_pt0;
  513. cvStartReadSeq(lp_contour, &l_reader, 0);
  514. CV_READ_SEQ_ELEM(l_pt0, l_reader);
  515. l_pts1.push_back(l_pt0);
  516. for (int i = 0; i < lp_contour->total; ++i)
  517. {
  518. CvPoint l_pt1;
  519. CV_READ_SEQ_ELEM(l_pt1, l_reader);
  520. /// @todo Really need dst at all? Can just as well do this outside
  521. cv::line(dst, l_pt0, l_pt1, CV_RGB(0, 255, 0), 2);
  522. l_pt0 = l_pt1;
  523. l_pts1.push_back(l_pt0);
  524. }
  525. }
  526. cvReleaseMemStorage(&lp_storage);
  527. return l_pts1;
  528. }
  529. // Adapted from cv_show_angles
  530. cv::Mat displayQuantized(const cv::Mat& quantized)
  531. {
  532. cv::Mat color(quantized.size(), CV_8UC3);
  533. for (int r = 0; r < quantized.rows; ++r)
  534. {
  535. const uchar* quant_r = quantized.ptr(r);
  536. cv::Vec3b* color_r = color.ptr<cv::Vec3b>(r);
  537. for (int c = 0; c < quantized.cols; ++c)
  538. {
  539. cv::Vec3b& bgr = color_r[c];
  540. switch (quant_r[c])
  541. {
  542. case 0: bgr[0]= 0; bgr[1]= 0; bgr[2]= 0; break;
  543. case 1: bgr[0]= 55; bgr[1]= 55; bgr[2]= 55; break;
  544. case 2: bgr[0]= 80; bgr[1]= 80; bgr[2]= 80; break;
  545. case 4: bgr[0]=105; bgr[1]=105; bgr[2]=105; break;
  546. case 8: bgr[0]=130; bgr[1]=130; bgr[2]=130; break;
  547. case 16: bgr[0]=155; bgr[1]=155; bgr[2]=155; break;
  548. case 32: bgr[0]=180; bgr[1]=180; bgr[2]=180; break;
  549. case 64: bgr[0]=205; bgr[1]=205; bgr[2]=205; break;
  550. case 128: bgr[0]=230; bgr[1]=230; bgr[2]=230; break;
  551. case 255: bgr[0]= 0; bgr[1]= 0; bgr[2]=255; break;
  552. default: bgr[0]= 0; bgr[1]=255; bgr[2]= 0; break;
  553. }
  554. }
  555. }
  556. return color;
  557. }
  558. // Adapted from cv_line_template::convex_hull
  559. void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
  560. int num_modalities, cv::Point offset, cv::Size size,
  561. cv::Mat& dst)
  562. {
  563. std::vector<cv::Point> points;
  564. for (int m = 0; m < num_modalities; ++m)
  565. {
  566. for (int i = 0; i < (int)templates[m].features.size(); ++i)
  567. {
  568. cv::linemod::Feature f = templates[m].features[i];
  569. points.push_back(cv::Point(f.x, f.y) + offset);
  570. }
  571. }
  572. std::vector<cv::Point> hull;
  573. cv::convexHull(points, hull);
  574. dst = cv::Mat::zeros(size, CV_8U);
  575. const int hull_count = (int)hull.size();
  576. const cv::Point* hull_pts = &hull[0];
  577. cv::fillPoly(dst, &hull_pts, &hull_count, 1, cv::Scalar(255));
  578. }
  579. void drawResponse(const std::vector<cv::linemod::Template>& templates,
  580. int num_modalities, cv::Mat& dst, cv::Point offset, int T)
  581. {
  582. static const cv::Scalar COLORS[5] = { CV_RGB(0, 0, 255),
  583. CV_RGB(0, 255, 0),
  584. CV_RGB(255, 255, 0),
  585. CV_RGB(255, 140, 0),
  586. CV_RGB(255, 0, 0) };
  587. for (int m = 0; m < num_modalities; ++m)
  588. {
  589. // NOTE: Original demo recalculated max response for each feature in the TxT
  590. // box around it and chose the display color based on that response. Here
  591. // the display color just depends on the modality.
  592. cv::Scalar color = COLORS[m];
  593. for (int i = 0; i < (int)templates[m].features.size(); ++i)
  594. {
  595. cv::linemod::Feature f = templates[m].features[i];
  596. cv::Point pt(f.x + offset.x, f.y + offset.y);
  597. cv::circle(dst, pt, T / 2, color);
  598. }
  599. }
  600. }