Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
tutorial-mb-generic-tracker-rgbd-realsense.cpp
1
2#include <iostream>
3
4#include <visp3/core/vpConfig.h>
5
6#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_PUGIXML) && defined(VISP_HAVE_OPENCV) && \
7 (((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_CALIB3D) && defined(HAVE_OPENCV_FEATURES2D)) || \
8 ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_3D) && defined(HAVE_OPENCV_FEATURES)))
9
10#include <visp3/core/vpDisplay.h>
11#include <visp3/core/vpIoTools.h>
12#include <visp3/gui/vpDisplayFactory.h>
13#include <visp3/mbt/vpMbGenericTracker.h>
14#include <visp3/sensor/vpRealSense2.h>
15#include <visp3/vision/vpKeyPoint.h>
16
17#ifdef ENABLE_VISP_NAMESPACE
18using namespace VISP_NAMESPACE_NAME;
19#endif
20
21#ifndef DOXYGEN_SHOULD_SKIP_THIS
22typedef enum DepthType
23{
24 DEPTH_UNUSED = 0,
25 DEPTH_DENSE = 1,
26 DEPTH_NORMAL = 2,
27 DEPTH_COUNT = 3
28}DepthType;
29
30std::string depthTypeToString(const DepthType &type)
31{
32 std::string name;
33 switch (type) {
34 case DEPTH_UNUSED:
35 name = "unused";
36 break;
37 case DEPTH_DENSE:
38 name = "dense";
39 break;
40 case DEPTH_NORMAL:
41 name = "normals";
42 break;
43 case DEPTH_COUNT:
44 default:
45 name = "unknown";
46 break;
47 }
48 return name;
49}
50
51DepthType depthTypeFromString(const std::string &name)
52{
53 DepthType type(DEPTH_COUNT);
54 unsigned int i = 0;
55 bool notFound = true;
56 while ((i < static_cast<unsigned int>(DEPTH_COUNT)) && notFound) {
57 DepthType candidate = static_cast<DepthType>(i);
58 if (vpIoTools::toLowerCase(name) == depthTypeToString(candidate)) {
59 notFound = false;
60 type = candidate;
61 }
62 ++i;
63 }
64 return type;
65}
66
67std::string getDepthTypeList(const std::string &prefix = "<", const std::string &sep = " , ", const std::string &suffix = ">")
68{
69 std::string list(prefix);
70 unsigned int i = 0;
71 while (i < static_cast<unsigned int>(DEPTH_COUNT - 1)) {
72 DepthType type = static_cast<DepthType>(i);
73 std::string name = depthTypeToString(type);
74 list += name + sep;
75 ++i;
76 }
77 DepthType type = static_cast<DepthType>(DEPTH_COUNT - 1);
78 std::string name = depthTypeToString(type);
79 list += name + suffix;
80 return list;
81}
82#endif
83
84int main(int argc, char *argv[])
85{
86 std::string config_color = "", config_depth = "";
87 std::string model_color = "", model_depth = "";
88 std::string init_file = "";
89 bool use_ogre = false;
90 bool use_scanline = false;
91 bool use_edges = true;
92 bool use_klt = true;
93 DepthType use_depth = DepthType::DEPTH_UNUSED;
94 bool learn = false;
95 bool auto_init = false;
96 double proj_error_threshold = 25;
97 std::string learning_data = "learning/data-learned.bin";
98 bool display_projection_error = false;
99
100 for (int i = 1; i < argc; i++) {
101 if (std::string(argv[i]) == "--config_color" && i + 1 < argc) {
102 config_color = std::string(argv[i + 1]);
103 }
104 else if (std::string(argv[i]) == "--config_depth" && i + 1 < argc) {
105 config_depth = std::string(argv[i + 1]);
106 }
107 else if (std::string(argv[i]) == "--model_color" && i + 1 < argc) {
108 model_color = std::string(argv[i + 1]);
109 }
110 else if (std::string(argv[i]) == "--model_depth" && i + 1 < argc) {
111 model_depth = std::string(argv[i + 1]);
112 }
113 else if (std::string(argv[i]) == "--init_file" && i + 1 < argc) {
114 init_file = std::string(argv[i + 1]);
115 }
116 else if (std::string(argv[i]) == "--proj_error_threshold" && i + 1 < argc) {
117 proj_error_threshold = std::atof(argv[i + 1]);
118 }
119 else if (std::string(argv[i]) == "--use_ogre") {
120 use_ogre = true;
121 }
122 else if (std::string(argv[i]) == "--use_scanline") {
123 use_scanline = true;
124 }
125 else if (std::string(argv[i]) == "--use_edges" && i + 1 < argc) {
126 use_edges = (std::atoi(argv[i + 1]) == 0 ? false : true);
127 }
128 else if (std::string(argv[i]) == "--use_klt" && i + 1 < argc) {
129 use_klt = (std::atoi(argv[i + 1]) == 0 ? false : true);
130 }
131 else if (std::string(argv[i]) == "--use_depth" && i + 1 < argc) {
132 use_depth = depthTypeFromString(std::string(argv[i + 1]));
133 }
134 else if (std::string(argv[i]) == "--learn") {
135 learn = true;
136 }
137 else if (std::string(argv[i]) == "--learning_data" && i + 1 < argc) {
138 learning_data = argv[i + 1];
139 }
140 else if (std::string(argv[i]) == "--auto_init") {
141 auto_init = true;
142 }
143 else if (std::string(argv[i]) == "--display_proj_error") {
144 display_projection_error = true;
145 }
146 else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
147 std::cout << "Usage: \n"
148 << argv[0]
149 << " [--model_color <object.cao>] [--model_depth <object.cao>]"
150 " [--config_color <object.xml>] [--config_depth <object.xml>]"
151 " [--init_file <object.init>] [--use_ogre] [--use_scanline]"
152 " [--proj_error_threshold <threshold between 0 and 90> (default: "
153 << proj_error_threshold
154 << ")]"
155 " [--use_edges <0|1> (default: 1)] [--use_klt <0|1> (default: 1)] [--use_depth " + getDepthTypeList() +" (default: " + depthTypeToString(use_depth) + ")]"
156 " [--learn] [--auto_init] [--learning_data <path to .bin> (default: learning/data-learned.bin)]"
157 " [--display_proj_error]"
158 << std::endl;
159
160 std::cout << "\n** How to track a 4.2 cm width cube with manual initialization:\n"
161 << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth " + depthTypeToString(DEPTH_DENSE) << std::endl;
162 std::cout << "\n** How to learn the cube and create a learning database:\n"
163 << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth " + depthTypeToString(DEPTH_DENSE) + " --learn"
164 << std::endl;
165 std::cout << "\n** How to track the cube with initialization from learning database:\n"
166 << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth " + depthTypeToString(DEPTH_DENSE) + " --auto_init"
167 << std::endl;
168
169 return EXIT_SUCCESS;
170 }
171 }
172
173 if (model_depth.empty()) {
174 model_depth = model_color;
175 }
176 std::string parentname = vpIoTools::getParent(model_color);
177 if (config_color.empty()) {
178 config_color = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".xml";
179 }
180 if (config_depth.empty()) {
181 config_depth = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + "_depth.xml";
182 }
183 if (init_file.empty()) {
184 init_file = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".init";
185 }
186 std::cout << "Tracked features: " << std::endl;
187 std::cout << " Use edges : " << use_edges << std::endl;
188 std::cout << " Use klt : " << use_klt << std::endl;
189 std::cout << " Use depth : " << depthTypeToString(use_depth) << std::endl;
190 std::cout << "Tracker options: " << std::endl;
191 std::cout << " Use ogre : " << use_ogre << std::endl;
192 std::cout << " Use scanline: " << use_scanline << std::endl;
193 std::cout << " Proj. error : " << proj_error_threshold << std::endl;
194 std::cout << " Display proj. error: " << display_projection_error << std::endl;
195 std::cout << "Config files: " << std::endl;
196 std::cout << " Config color: "
197 << "\"" << config_color << "\"" << std::endl;
198 std::cout << " Config depth: "
199 << "\"" << config_depth << "\"" << std::endl;
200 std::cout << " Model color : "
201 << "\"" << model_color << "\"" << std::endl;
202 std::cout << " Model depth : "
203 << "\"" << model_depth << "\"" << std::endl;
204 std::cout << " Init file : "
205 << "\"" << init_file << "\"" << std::endl;
206 std::cout << "Learning options : " << std::endl;
207 std::cout << " Learn : " << learn << std::endl;
208 std::cout << " Auto init : " << auto_init << std::endl;
209 std::cout << " Learning data: " << learning_data << std::endl;
210
211 if ((!use_edges) && (!use_klt) && (use_depth == DEPTH_UNUSED)) {
212 std::cout << "You must choose at least one visual features between edge, KLT and depth." << std::endl;
213 return EXIT_FAILURE;
214 }
215
216 if (config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || init_file.empty()) {
217 std::cout << "config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || "
218 "init_file.empty()"
219 << std::endl;
220 return EXIT_FAILURE;
221 }
222
223 vpRealSense2 realsense;
224 int width = 640, height = 480;
225 int fps = 30;
226 rs2::config config;
227 config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps);
228 config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps);
229
230 try {
231 realsense.open(config);
232 }
233 catch (const vpException &e) {
234 std::cout << "Catch an exception: " << e.what() << std::endl;
235 std::cout << "Check if the Realsense camera is connected..." << std::endl;
236 return EXIT_SUCCESS;
237 }
238
243
244 std::cout << "Sensor internal camera parameters for color camera: " << cam_color << std::endl;
245 std::cout << "Sensor internal camera parameters for depth camera: " << cam_depth << std::endl;
246
247 vpImage<vpRGBa> I_color(height, width);
248 vpImage<unsigned char> I_gray(height, width);
249 vpImage<unsigned char> I_depth(height, width);
250 vpImage<uint16_t> I_depth_raw(height, width);
251
252 unsigned int _posx = 100, _posy = 50;
253
254#ifdef VISP_HAVE_DISPLAY
255#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
256 std::shared_ptr<vpDisplay> display1 = vpDisplayFactory::createDisplay();
257 std::shared_ptr<vpDisplay> display2 = vpDisplayFactory::createDisplay();
258#else
261#endif
262 if (use_edges || use_klt)
263 display1->init(I_gray, _posx, _posy, "Color stream");
264 if (use_depth != DEPTH_UNUSED)
265 display2->init(I_depth, _posx + I_gray.getWidth() + 10, _posy, "Depth stream");
266#endif
267
268 while (true) {
269 realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, nullptr, nullptr);
270
271 if (use_edges || use_klt) {
272 vpImageConvert::convert(I_color, I_gray);
273 vpDisplay::display(I_gray);
274 vpDisplay::displayText(I_gray, 20, 20, "Click when ready.", vpColor::red);
275 vpDisplay::flush(I_gray);
276
277 if (vpDisplay::getClick(I_gray, false)) {
278 break;
279 }
280 }
281 if (use_depth != DEPTH_UNUSED) {
282 vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
283
284 vpDisplay::display(I_depth);
285 vpDisplay::displayText(I_depth, 20, 20, "Click when ready.", vpColor::red);
286 vpDisplay::flush(I_depth);
287
288 if (vpDisplay::getClick(I_depth, false)) {
289 break;
290 }
291 }
292 }
293
294 std::vector<int> trackerTypes;
295 if (use_edges && use_klt)
297 else if (use_edges)
298 trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER);
299 else if (use_klt)
300 trackerTypes.push_back(vpMbGenericTracker::KLT_TRACKER);
301
302 if (use_depth == DEPTH_NORMAL) {
303 trackerTypes.push_back(vpMbGenericTracker::DEPTH_NORMAL_TRACKER);
304 }
305 else if (use_depth == DEPTH_DENSE) {
306 trackerTypes.push_back(vpMbGenericTracker::DEPTH_DENSE_TRACKER);
307 }
308
309 vpHomogeneousMatrix depth_M_color = realsense.getTransformation(RS2_STREAM_COLOR, RS2_STREAM_DEPTH);
310 std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
311 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
312 std::map<std::string, std::string> mapOfInitFiles;
313 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
314 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
315 std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
316
317 std::vector<vpColVector> pointcloud;
318
319 vpMbGenericTracker tracker(trackerTypes);
320
321 if ((use_edges || use_klt) && (use_depth != DEPTH_UNUSED)) {
322 tracker.loadConfigFile(config_color, config_depth);
323 tracker.loadModel(model_color, model_depth);
324 std::cout << "Sensor internal depth_M_color: \n" << depth_M_color << std::endl;
325 mapOfCameraTransformations["Camera2"] = depth_M_color;
326 tracker.setCameraTransformationMatrix(mapOfCameraTransformations);
327 mapOfImages["Camera1"] = &I_gray;
328 mapOfImages["Camera2"] = &I_depth;
329 mapOfInitFiles["Camera1"] = init_file;
330 tracker.setCameraParameters(cam_color, cam_depth);
331 }
332 else if (use_edges || use_klt) {
333 tracker.loadConfigFile(config_color);
334 tracker.loadModel(model_color);
335 tracker.setCameraParameters(cam_color);
336 }
337 else if (use_depth != DEPTH_UNUSED) {
338 tracker.loadConfigFile(config_depth);
339 tracker.loadModel(model_depth);
340 tracker.setCameraParameters(cam_depth);
341 }
342
343 tracker.setDisplayFeatures(true);
344 tracker.setOgreVisibilityTest(use_ogre);
345 tracker.setScanLineVisibilityTest(use_scanline);
346 tracker.setProjectionErrorComputation(true);
347 tracker.setProjectionErrorDisplay(display_projection_error);
348
349#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_XFEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
350 std::string detectorName = "SIFT";
351 std::string extractorName = "SIFT";
352 std::string matcherName = "BruteForce";
353#elif ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
354 std::string detectorName = "FAST";
355 std::string extractorName = "ORB";
356 std::string matcherName = "BruteForce-Hamming";
357#endif
358 vpKeyPoint keypoint;
359 if (learn || auto_init) {
360 keypoint.setDetector(detectorName);
361 keypoint.setExtractor(extractorName);
362 keypoint.setMatcher(matcherName);
363#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
364 cv::Ptr<cv::ORB> orb_detector = keypoint.getDetector("ORB").dynamicCast<cv::ORB>();
365 if (orb_detector) {
366 orb_detector->setNLevels(1);
367 }
368#endif
369 }
370
371 if (auto_init) {
372 if (!vpIoTools::checkFilename(learning_data)) {
373 std::cout << "Cannot enable auto detection. Learning file \"" << learning_data << "\" doesn't exist" << std::endl;
374 return EXIT_FAILURE;
375 }
376 keypoint.loadLearningData(learning_data, true);
377 }
378 else {
379 if ((use_edges || use_klt) && (use_depth != DEPTH_UNUSED)) {
380 tracker.initClick(mapOfImages, mapOfInitFiles, true);
381 }
382 else if (use_edges || use_klt) {
383 tracker.initClick(I_gray, init_file, true);
384 }
385 else if (use_depth != DEPTH_UNUSED) {
386 tracker.initClick(I_depth, init_file, true);
387 }
388
389 if (learn)
391 }
392
393 bool run_auto_init = false;
394 if (auto_init) {
395 run_auto_init = true;
396 }
397 std::vector<double> times_vec;
398
399 try {
400 // To be able to display keypoints matching with test-detection-rs2
401 int learn_id = 1;
402 bool quit = false;
403 bool learn_position = false;
404 double loop_t = 0;
406
407 while (!quit) {
408 double t = vpTime::measureTimeMs();
409 bool tracking_failed = false;
410
411 // Acquire images and update tracker input data
412 realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointcloud, nullptr, nullptr);
413
414 if (use_edges || use_klt || run_auto_init) {
415 vpImageConvert::convert(I_color, I_gray);
416 vpDisplay::display(I_gray);
417 }
418 if (use_depth != DEPTH_UNUSED) {
419 vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
420 vpDisplay::display(I_depth);
421 }
422
423 if ((use_edges || use_klt) && (use_depth != DEPTH_UNUSED)) {
424 mapOfImages["Camera1"] = &I_gray;
425 mapOfPointclouds["Camera2"] = &pointcloud;
426 mapOfWidths["Camera2"] = width;
427 mapOfHeights["Camera2"] = height;
428 }
429 else if (use_edges || use_klt) {
430 mapOfImages["Camera"] = &I_gray;
431 }
432 else if (use_depth != DEPTH_UNUSED) {
433 mapOfPointclouds["Camera"] = &pointcloud;
434 mapOfWidths["Camera"] = width;
435 mapOfHeights["Camera"] = height;
436 }
437
438 // Run auto initialization from learned data
439 if (run_auto_init) {
440 if (keypoint.matchPoint(I_gray, cam_color, cMo)) {
441 std::cout << "Auto init succeed" << std::endl;
442 if ((use_edges || use_klt) && (use_depth != DEPTH_UNUSED)) {
443 mapOfCameraPoses["Camera1"] = cMo;
444 mapOfCameraPoses["Camera2"] = depth_M_color * cMo;
445 tracker.initFromPose(mapOfImages, mapOfCameraPoses);
446 }
447 else if (use_edges || use_klt) {
448 tracker.initFromPose(I_gray, cMo);
449 }
450 else if (use_depth != DEPTH_UNUSED) {
451 tracker.initFromPose(I_depth, depth_M_color * cMo);
452 }
453 }
454 else {
455 if (use_edges || use_klt) {
456 vpDisplay::flush(I_gray);
457 }
458 if (use_depth != DEPTH_UNUSED) {
459 vpDisplay::flush(I_depth);
460 }
461 continue;
462 }
463 }
464
465 // Run the tracker
466 try {
467 if (run_auto_init) {
468 // Turn display features off just after auto init to not display wrong moving-edge if the tracker fails
469 tracker.setDisplayFeatures(false);
470
471 run_auto_init = false;
472 }
473 if ((use_edges || use_klt) && (use_depth != DEPTH_UNUSED)) {
474 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
475 }
476 else if (use_edges || use_klt) {
477 tracker.track(I_gray);
478 }
479 else if (use_depth != DEPTH_UNUSED) {
480 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
481 }
482 }
483 catch (const vpException &e) {
484 std::cout << "Tracker exception: " << e.getStringMessage() << std::endl;
485 tracking_failed = true;
486 if (auto_init) {
487 std::cout << "Tracker needs to restart (tracking exception)" << std::endl;
488 run_auto_init = true;
489 }
490 }
491
492 // Get object pose
493 cMo = tracker.getPose();
494
495 // Check tracking errors
496 double proj_error = 0;
497 if (tracker.getTrackerType() & vpMbGenericTracker::EDGE_TRACKER) {
498 // Check tracking errors
499 proj_error = tracker.getProjectionError();
500 }
501 else {
502 proj_error = tracker.computeCurrentProjectionError(I_gray, cMo, cam_color);
503 }
504
505 if (auto_init && proj_error > proj_error_threshold) {
506 std::cout << "Tracker needs to restart (projection error detected: " << proj_error << ")" << std::endl;
507 run_auto_init = true;
508 tracking_failed = true;
509 }
510
511 // Display tracking results
512 if (!tracking_failed) {
513 // Turn display features on
514 tracker.setDisplayFeatures(true);
515
516 if ((use_edges || use_klt) && (use_depth != DEPTH_UNUSED)) {
517 tracker.display(I_gray, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth, vpColor::red, 3);
518 vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
519 vpDisplay::displayFrame(I_depth, depth_M_color * cMo, cam_depth, 0.05, vpColor::none, 3);
520 }
521 else if (use_edges || use_klt) {
522 tracker.display(I_gray, cMo, cam_color, vpColor::red, 3);
523 vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
524 }
525 else if (use_depth != DEPTH_UNUSED) {
526 tracker.display(I_depth, cMo, cam_depth, vpColor::red, 3);
527 vpDisplay::displayFrame(I_depth, cMo, cam_depth, 0.05, vpColor::none, 3);
528 }
529
530 {
531 std::stringstream ss;
532 ss << "Nb features: " << tracker.getError().size();
533 vpDisplay::displayText(I_gray, I_gray.getHeight() - 50, 20, ss.str(), vpColor::red);
534 }
535 {
536 std::stringstream ss;
537 ss << "Features: edges " << tracker.getNbFeaturesEdge() << ", klt " << tracker.getNbFeaturesKlt()
538 << ", depth " << tracker.getNbFeaturesDepthDense();
539 vpDisplay::displayText(I_gray, I_gray.getHeight() - 30, 20, ss.str(), vpColor::red);
540 }
541 }
542
543 std::stringstream ss;
544 ss << "Loop time: " << loop_t << " ms";
545
547 if (use_edges || use_klt) {
548 vpDisplay::displayText(I_gray, 20, 20, ss.str(), vpColor::red);
549 if (learn)
550 vpDisplay::displayText(I_gray, 35, 20, "Left click: learn Right click: quit", vpColor::red);
551 else if (auto_init)
552 vpDisplay::displayText(I_gray, 35, 20, "Left click: auto_init Right click: quit", vpColor::red);
553 else
554 vpDisplay::displayText(I_gray, 35, 20, "Right click: quit", vpColor::red);
555
556 vpDisplay::flush(I_gray);
557
558 if (vpDisplay::getClick(I_gray, button, false)) {
559 if (button == vpMouseButton::button3) {
560 quit = true;
561 }
562 else if (button == vpMouseButton::button1 && learn) {
563 learn_position = true;
564 }
565 else if (button == vpMouseButton::button1 && auto_init && !learn) {
566 run_auto_init = true;
567 }
568 }
569 }
570 if (use_depth != DEPTH_UNUSED) {
571 vpDisplay::displayText(I_depth, 20, 20, ss.str(), vpColor::red);
572 vpDisplay::displayText(I_depth, 40, 20, "Click to quit", vpColor::red);
573 vpDisplay::flush(I_depth);
574
575 if (vpDisplay::getClick(I_depth, false)) {
576 quit = true;
577 }
578 }
579
580 if (learn_position) {
581 // Detect keypoints on the current image
582 std::vector<cv::KeyPoint> trainKeyPoints;
583 keypoint.detect(I_gray, trainKeyPoints);
584
585 // Keep only keypoints on the cube
586 std::vector<vpPolygon> polygons;
587 std::vector<std::vector<vpPoint> > roisPt;
588 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces();
589 polygons = pair.first;
590 roisPt = pair.second;
591
592 // Compute the 3D coordinates
593 std::vector<cv::Point3f> points3f;
594 vpKeyPoint::compute3DForPointsInPolygons(cMo, cam_color, trainKeyPoints, polygons, roisPt, points3f);
595
596 // Build the reference keypoints
597 keypoint.buildReference(I_gray, trainKeyPoints, points3f, true, learn_id++);
598
599 // Display learned data
600 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
601 vpDisplay::displayCross(I_gray, static_cast<int>(it->pt.y), static_cast<int>(it->pt.x), 10, vpColor::yellow, 3);
602 }
603 learn_position = false;
604 std::cout << "Data learned" << std::endl;
605 }
606 loop_t = vpTime::measureTimeMs() - t;
607 times_vec.push_back(loop_t);
608 }
609 if (learn) {
610 std::cout << "Save learning file: " << learning_data << std::endl;
611 keypoint.saveLearningData(learning_data, true, true);
612 }
613 }
614 catch (const vpException &e) {
615 std::cout << "Catch an exception: " << e.what() << std::endl;
616 }
617
618#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) && defined(VISP_HAVE_DISPLAY)
619 if (display1 != nullptr) {
620 delete display1;
621 }
622 if (display2 != nullptr) {
623 delete display2;
624 }
625#endif
626
627 if (!times_vec.empty()) {
628 std::cout << "\nProcessing time, Mean: " << vpMath::getMean(times_vec)
629 << " ms ; Median: " << vpMath::getMedian(times_vec) << " ; Std: " << vpMath::getStdev(times_vec) << " ms"
630 << std::endl;
631 }
632
633 return EXIT_SUCCESS;
634}
635#elif defined(VISP_HAVE_REALSENSE2)
636int main()
637{
638 std::cout << "Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
639 return EXIT_SUCCESS;
640}
641#else
642int main()
643{
644 std::cout << "Install librealsense2 3rd party, configure and build ViSP again to use this example" << std::endl;
645 return EXIT_SUCCESS;
646}
647#endif
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor red
Definition vpColor.h:198
static const vpColor none
Definition vpColor.h:210
static const vpColor yellow
Definition vpColor.h:206
Class that defines generic functionalities for display.
Definition vpDisplay.h:171
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition vpException.h:60
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
Definition vpImage.h:131
static std::string toLowerCase(const std::string &input)
Return a lower-case version of the string input . Numbers and special characters stay the same.
static bool checkFilename(const std::string &filename)
static void makeDirectory(const std::string &dirname)
static std::string getNameWE(const std::string &pathname)
static std::string getParent(const std::string &pathname)
Class that allows keypoints 2D features detection (and descriptors extraction) and matching thanks to...
Definition vpKeyPoint.h:274
unsigned int buildReference(const vpImage< unsigned char > &I) VP_OVERRIDE
void setExtractor(const vpFeatureDescriptorType &extractorType)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
void setMatcher(const std::string &matcherName)
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
unsigned int matchPoint(const vpImage< unsigned char > &I) VP_OVERRIDE
void setDetector(const vpFeatureDetectorType &detectorType)
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
static double getMedian(const std::vector< double > &v)
Definition vpMath.cpp:343
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition vpMath.cpp:374
static double getMean(const std::vector< double > &v)
Definition vpMath.cpp:323
Real-time 6D object pose tracking using its CAD model.
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
vpHomogeneousMatrix getTransformation(const rs2_stream &from, const rs2_stream &to, int from_index=-1) const
std::shared_ptr< vpDisplay > createDisplay()
Return a smart pointer vpDisplay specialization if a GUI library is available or nullptr otherwise.
vpDisplay * allocateDisplay()
Return a newly allocated vpDisplay specialization if a GUI library is available or nullptr otherwise.
VISP_EXPORT double measureTimeMs()