4#include <visp3/core/vpConfig.h>
16#if (defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
17 defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2) || defined(VISP_HAVE_OPENCV) && \
18 (((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI)) || \
19 ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO)))) && \
20 defined(VISP_HAVE_DISPLAY) && defined(VISP_HAVE_OPENCV) && \
21 (((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_CALIB3D) && defined(HAVE_OPENCV_FEATURES2D)) || \
22 ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_3D) && defined(HAVE_OPENCV_FEATURES)))
24#ifdef VISP_HAVE_MODULE_SENSOR
25#include <visp3/sensor/vp1394CMUGrabber.h>
26#include <visp3/sensor/vp1394TwoGrabber.h>
27#include <visp3/sensor/vpFlyCaptureGrabber.h>
28#include <visp3/sensor/vpRealSense2.h>
29#include <visp3/sensor/vpV4l2Grabber.h>
31#include <visp3/core/vpIoTools.h>
32#include <visp3/core/vpXmlParserCamera.h>
33#include <visp3/gui/vpDisplayFactory.h>
34#include <visp3/io/vpImageIo.h>
35#include <visp3/vision/vpKeyPoint.h>
37#include <visp3/mbt/vpMbGenericTracker.h>
40#if (VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI)
41#include <opencv2/highgui/highgui.hpp>
42#elif (VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO)
43#include <opencv2/videoio/videoio.hpp>
46int main(
int argc,
char **argv)
48#ifdef ENABLE_VISP_NAMESPACE
52#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
53 std::shared_ptr<vpDisplay> display;
59 std::string opt_modelname =
"model/teabox/teabox.cao";
62 double opt_proj_error_threshold = 30.;
63 bool opt_use_ogre =
false;
64 bool opt_use_scanline =
false;
65 bool opt_display_projection_error =
false;
66 bool opt_learn =
false;
67 bool opt_auto_init =
false;
68 std::string opt_learning_data =
"learning/data-learned.bin";
69 std::string opt_intrinsic_file =
"";
70 std::string opt_camera_name =
"";
72 for (
int i = 1;
i < argc;
i++) {
73 if (std::string(argv[i]) ==
"--model" && i + 1 < argc) {
74 opt_modelname = std::string(argv[++i]);
76 else if (std::string(argv[i]) ==
"--tracker" && i + 1 < argc) {
77 opt_tracker = atoi(argv[++i]);
79 else if (std::string(argv[i]) ==
"--camera-device" && i + 1 < argc) {
80 opt_device = atoi(argv[++i]);
82 else if (std::string(argv[i]) ==
"--max_proj_error" && i + 1 < argc) {
83 opt_proj_error_threshold = atof(argv[++i]);
85 else if (std::string(argv[i]) ==
"--use_ogre") {
88 else if (std::string(argv[i]) ==
"--use_scanline") {
89 opt_use_scanline =
true;
91 else if (std::string(argv[i]) ==
"--learn") {
94 else if (std::string(argv[i]) ==
"--learning_data" && i + 1 < argc) {
95 opt_learning_data = argv[++
i];
97 else if (std::string(argv[i]) ==
"--auto_init") {
100 else if (std::string(argv[i]) ==
"--display_proj_error") {
101 opt_display_projection_error =
true;
103 else if (std::string(argv[i]) ==
"--intrinsic" && i + 1 < argc) {
104 opt_intrinsic_file = std::string(argv[++i]);
106 else if (std::string(argv[i]) ==
"--camera-name" && i + 1 < argc) {
107 opt_camera_name = std::string(argv[++i]);
109 else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
111 <<
"\nUsage: " << argv[0] <<
" [--camera-device <camera device> (default: 0)]"
112 <<
" [--intrinsic <intrinsic file> (default: empty)]"
113 <<
" [--camera-name <camera name> (default: empty)]"
114 <<
" [--model <model name> (default: teabox)]"
115 <<
" [--tracker <0=egde|1=keypoint|2=hybrid> (default: 2)]"
116 <<
" [--use_ogre] [--use_scanline]"
117 <<
" [--max_proj_error <allowed projection error> (default: 30)]"
120 <<
" [--learning_data <data-learned.bin> (default: learning/data-learned.bin)]"
121 <<
" [--display_proj_error]"
122 <<
" [--help] [-h]\n"
130 if (!parentname.empty())
131 objectname = parentname +
"/" + objectname;
133 std::cout <<
"Tracker requested config files: " << objectname <<
".[init, cao]" << std::endl;
134 std::cout <<
"Tracker optional config files: " << objectname <<
".[ppm]" << std::endl;
136 std::cout <<
"Tracked features: " << std::endl;
137 std::cout <<
" Use edges : " << (opt_tracker == 0 || opt_tracker == 2) << std::endl;
138 std::cout <<
" Use klt : " << (opt_tracker == 1 || opt_tracker == 2) << std::endl;
139 std::cout <<
"Tracker options: " << std::endl;
140 std::cout <<
" Use ogre : " << opt_use_ogre << std::endl;
141 std::cout <<
" Use scanline: " << opt_use_scanline << std::endl;
142 std::cout <<
" Proj. error : " << opt_proj_error_threshold << std::endl;
143 std::cout <<
" Display proj. error: " << opt_display_projection_error << std::endl;
144 std::cout <<
"Config files: " << std::endl;
145 std::cout <<
" Config file : "
146 <<
"\"" << objectname +
".xml"
147 <<
"\"" << std::endl;
148 std::cout <<
" Model file : "
149 <<
"\"" << objectname +
".cao"
150 <<
"\"" << std::endl;
151 std::cout <<
" Init file : "
152 <<
"\"" << objectname +
".init"
153 <<
"\"" << std::endl;
154 std::cout <<
"Learning options : " << std::endl;
155 std::cout <<
" Learn : " << opt_learn << std::endl;
156 std::cout <<
" Auto init : " << opt_auto_init << std::endl;
157 std::cout <<
" Learning data: " << opt_learning_data << std::endl;
160#if VISP_VERSION_INT > VP_VERSION_INT(3, 2, 0)
169 cam.initPersProjWithoutDistortion(839, 839, 325, 243);
172#if defined(VISP_HAVE_PUGIXML)
174 if (!opt_intrinsic_file.empty() && !opt_camera_name.empty()) {
184#if defined(VISP_HAVE_V4L2)
186 std::ostringstream device;
187 device <<
"/dev/video" << opt_device;
188 std::cout <<
"Use Video 4 Linux grabber on device " << device.str() << std::endl;
192#elif defined(VISP_HAVE_DC1394)
194 std::cout <<
"Use DC1394 grabber" << std::endl;
197#elif defined(VISP_HAVE_CMU1394)
199 std::cout <<
"Use CMU1394 grabber" << std::endl;
202#elif defined(VISP_HAVE_FLYCAPTURE)
204 std::cout <<
"Use FlyCapture grabber" << std::endl;
207#elif defined(VISP_HAVE_REALSENSE2)
209 std::cout <<
"Use Realsense 2 grabber" << std::endl;
212 config.disable_stream(RS2_STREAM_DEPTH);
213 config.disable_stream(RS2_STREAM_INFRARED);
214 config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30);
218 std::cout <<
"Read camera parameters from Realsense device" << std::endl;
220#elif defined(VISP_HAVE_OPENCV) && \
221 (((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI)) || \
222 ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO)))
223 std::cout <<
"Use OpenCV grabber on device " << opt_device << std::endl;
224 cv::VideoCapture g(opt_device);
226 std::cout <<
"Failed to open the camera" << std::endl;
235#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
240 display->init(I, 100, 100,
"Model-based tracker");
243#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
245#elif defined(VISP_HAVE_OPENCV) && \
246 (((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI)) || \
247 ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO)))
263 if (opt_tracker == 0)
265#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
266 else if (opt_tracker == 1)
272#if !defined(VISP_HAVE_MODULE_KLT)
273 std::cout <<
"klt and hybrid model-based tracker are not available since visp_klt module is not available. "
274 "In CMakeGUI turn visp_klt module ON, configure and build ViSP again."
277 std::cout <<
"Hybrid tracking is impossible since OpenCV is not enabled. "
278 <<
"Install OpenCV, configure and build ViSP again to run this tutorial." << std::endl;
287#if defined(VISP_HAVE_PUGIXML)
289 tracker.loadConfigFile(objectname +
".xml");
297 if (opt_tracker == 0 || opt_tracker == 2) {
312#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
313 if (opt_tracker == 1 || opt_tracker == 2) {
323 tracker.setKltOpencv(klt_settings);
330 tracker.setCameraParameters(cam);
334 tracker.loadModel(objectname +
".cao");
337 tracker.setDisplayFeatures(
true);
340 tracker.setOgreVisibilityTest(opt_use_ogre);
341 tracker.setScanLineVisibilityTest(opt_use_scanline);
344 tracker.setProjectionErrorComputation(
true);
345 tracker.setProjectionErrorDisplay(opt_display_projection_error);
348#if defined(VISP_HAVE_OPENCV) && \
349 (((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_XFEATURES2D)) || \
350 ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES)))
351 std::string detectorName =
"SIFT";
352 std::string extractorName =
"SIFT";
353 std::string matcherName =
"BruteForce";
354#elif defined(VISP_HAVE_OPENCV) && \
355 (((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || \
356 ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES)))
357 std::string detectorName =
"FAST";
358 std::string extractorName =
"ORB";
359 std::string matcherName =
"BruteForce-Hamming";
362 if (opt_learn || opt_auto_init) {
366#if defined(VISP_HAVE_OPENCV) && \
367 (((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || \
368 ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES)))
369#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
370 keypoint.setDetectorParameter(
"ORB",
"nLevels", 1);
372 cv::Ptr<cv::ORB> orb_detector = keypoint.
getDetector(
"ORB").dynamicCast<cv::ORB>();
374 orb_detector->setNLevels(1);
382 std::cout <<
"Cannot enable auto detection. Learning file \"" << opt_learning_data <<
"\" doesn't exist"
389 tracker.initClick(I, objectname +
".init",
true);
392 bool learn_position =
false;
393 bool run_auto_init =
false;
395 run_auto_init =
true;
400 unsigned int learn_cpt = 0;
402 bool tracking_failed =
false;
406#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
408#elif defined(VISP_HAVE_OPENCV) && \
409 (((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI)) || \
410 ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO)))
418 tracking_failed =
false;
420 std::cout <<
"Auto init succeed" << std::endl;
428 else if (tracking_failed) {
430 tracking_failed =
false;
431 tracker.initClick(I, objectname +
".init",
true);
438 tracker.setDisplayFeatures(
false);
440 run_auto_init =
false;
445 std::cout <<
"Tracker exception: " <<
e.getStringMessage() << std::endl;
446 tracking_failed =
true;
448 std::cout <<
"Tracker needs to restart (tracking exception)" << std::endl;
449 run_auto_init =
true;
453 if (!tracking_failed) {
454 double proj_error = 0;
457 proj_error =
tracker.getProjectionError();
461 tracker.getCameraParameters(cam);
462 proj_error =
tracker.computeCurrentProjectionError(I, cMo, cam);
464 if (proj_error > opt_proj_error_threshold) {
465 std::cout <<
"Tracker needs to restart (projection error detected: " << proj_error <<
")" << std::endl;
467 run_auto_init =
true;
469 tracking_failed =
true;
473 if (!tracking_failed) {
474 tracker.setDisplayFeatures(
true);
479 tracker.getCameraParameters(cam);
486 std::stringstream ss;
487 ss <<
"Translation: " << std::setprecision(5) << pose[0] <<
" " << pose[1] <<
" " << pose[2] <<
" [m]";
490 ss <<
"Rotation tu: " << std::setprecision(4) <<
vpMath::deg(pose[3]) <<
" " <<
vpMath::deg(pose[4]) <<
" "
495 std::stringstream ss;
496 ss <<
"Features: edges " <<
tracker.getNbFeaturesEdge() <<
", klt " <<
tracker.getNbFeaturesKlt();
501 if (learn_position) {
504 std::vector<cv::KeyPoint> trainKeyPoints;
505 keypoint.
detect(I, trainKeyPoints);
508 std::vector<vpPolygon> polygons;
509 std::vector<std::vector<vpPoint> > roisPt;
510 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair =
tracker.getPolygonFaces();
511 polygons = pair.first;
512 roisPt = pair.second;
515 std::vector<cv::Point3f> points3f;
519 keypoint.
buildReference(I, trainKeyPoints, points3f,
true, learn_id++);
522 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
525 learn_position =
false;
526 std::cout <<
"Data learned" << std::endl;
529 std::stringstream ss;
534 else if (opt_auto_init)
545 learn_position =
true;
548 run_auto_init =
true;
554 if (opt_learn && learn_cpt) {
555 std::cout <<
"Save learning from " << learn_cpt <<
" images in file: " << opt_learning_data << std::endl;
560 std::cout <<
"Catch a ViSP exception: " <<
e << std::endl;
564#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
565 if (display !=
nullptr) {
576#if defined(VISP_HAVE_OPENCV)
577 std::cout <<
"Install a 3rd party dedicated to frame grabbing (dc1394, cmu1394, v4l2, OpenCV, FlyCapture, "
578 <<
"Realsense2), configure and build ViSP again to use this tutorial."
581 std::cout <<
"Install OpenCV 3rd party, configure and build ViSP again to use this tutorial." << std::endl;
Firewire cameras video capture based on CMU 1394 Digital Camera SDK.
void open(vpImage< unsigned char > &I)
Class for firewire ieee1394 video devices using libdc1394-2.x api.
void open(vpImage< unsigned char > &I)
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor none
static const vpColor yellow
static const vpColor green
Class that defines generic functionalities for display.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
void open(vpImage< unsigned char > &I)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
Class that allows keypoints 2D features detection (and descriptors extraction) and matching thanks to...
unsigned int buildReference(const vpImage< unsigned char > &I) VP_OVERRIDE
void setExtractor(const vpFeatureDescriptorType &extractorType)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
void setMatcher(const std::string &matcherName)
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
unsigned int matchPoint(const vpImage< unsigned char > &I) VP_OVERRIDE
void setDetector(const vpFeatureDetectorType &detectorType)
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double deg(double rad)
Real-time 6D object pose tracking using its CAD model.
void setMu1(const double &mu_1)
void setRange(const unsigned int &range)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setMaskNumber(const unsigned int &mask_number)
void setThreshold(const double &threshold)
void setSampleStep(const double &sample_step)
void setMaskSize(const unsigned int &mask_size)
void setMu2(const double &mu_2)
Implementation of a pose vector and operations on poses.
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
Class that is a wrapper over the Video4Linux2 (V4L2) driver.
void open(vpImage< unsigned char > &I)
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
void setDevice(const std::string &devname)
XML parser to load and save intrinsic camera parameters.
std::shared_ptr< vpDisplay > createDisplay()
Return a smart pointer vpDisplay specialization if a GUI library is available or nullptr otherwise.
vpDisplay * allocateDisplay()
Return a newly allocated vpDisplay specialization if a GUI library is available or nullptr otherwise.
VISP_EXPORT double measureTimeMs()