#include <iostream>
#include <visp3/core/vpConfig.h>
#if defined(VISP_HAVE_APRILTAG) && defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_MODULE_MBT)
#include <fstream>
#include <ios>
#include <visp3/detection/vpDetectorAprilTag.h>
#include <visp3/gui/vpDisplayFactory.h>
#include <visp3/mbt/vpMbGenericTracker.h>
#include <visp3/sensor/vpRealSense2.h>
#ifdef ENABLE_VISP_NAMESPACE
#endif
typedef enum { state_detection, state_tracking, state_quit } state_t;
void createCaoFile(double cubeEdgeSize)
{
std::ofstream fileStream;
fileStream.open("cube.cao", std::ofstream::out | std::ofstream::trunc);
fileStream << "V1\n";
fileStream << "# 3D Points\n";
fileStream << "8 # Number of points\n";
fileStream << cubeEdgeSize / 2 << " " << cubeEdgeSize / 2 << " " << 0 << " # Point 0: (X, Y, Z)\n";
fileStream << cubeEdgeSize / 2 << " " << -cubeEdgeSize / 2 << " " << 0 << " # Point 1\n";
fileStream << -cubeEdgeSize / 2 << " " << -cubeEdgeSize / 2 << " " << 0 << " # Point 2\n";
fileStream << -cubeEdgeSize / 2 << " " << cubeEdgeSize / 2 << " " << 0 << " # Point 3\n";
fileStream << -cubeEdgeSize / 2 << " " << cubeEdgeSize / 2 << " " << -cubeEdgeSize << " # Point 4\n";
fileStream << -cubeEdgeSize / 2 << " " << -cubeEdgeSize / 2 << " " << -cubeEdgeSize << " # Point 5\n";
fileStream << cubeEdgeSize / 2 << " " << -cubeEdgeSize / 2 << " " << -cubeEdgeSize << " # Point 6\n";
fileStream << cubeEdgeSize / 2 << " " << cubeEdgeSize / 2 << " " << -cubeEdgeSize << " # Point 7\n";
fileStream << "# 3D Lines\n";
fileStream << "0 # Number of lines\n";
fileStream << "# Faces from 3D lines\n";
fileStream << "0 # Number of faces\n";
fileStream << "# Faces from 3D points\n";
fileStream << "6 # Number of faces\n";
fileStream << "4 0 3 2 1 # Face 0: [number of points] [index of the 3D points]...\n";
fileStream << "4 1 2 5 6\n";
fileStream << "4 4 7 6 5\n";
fileStream << "4 0 7 4 3\n";
fileStream << "4 5 2 3 4\n";
fileStream << "4 0 1 6 7 # Face 5\n";
fileStream << "# 3D cylinders\n";
fileStream << "0 # Number of cylinders\n";
fileStream << "# 3D circles\n";
fileStream << "0 # Number of circles\n";
fileStream.close();
}
{
std::vector<vpHomogeneousMatrix> cMo_vec;
bool ret = detector.
detect(I, tagSize, cam, cMo_vec);
for (
size_t i = 0;
i < cMo_vec.size();
i++) {
}
return state_tracking;
}
return state_detection;
}
{
try {
}
catch (...) {
return state_detection;
}
double projection_error =
tracker.computeCurrentProjectionError(I, cMo, cam);
if (projection_error > projection_error_threshold) {
return state_detection;
}
{
std::stringstream ss;
ss <<
"Features: edges " <<
tracker.getNbFeaturesEdge() <<
", klt " <<
tracker.getNbFeaturesKlt();
}
return state_tracking;
}
#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
std::map<std::string, pcl::PointCloud<pcl::PointXYZ>::ConstPtr> mapOfPointclouds,
#else
std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds,
std::map<std::string, unsigned int> mapOfWidths, std::map<std::string, unsigned int> mapOfHeights,
#endif
{
tracker.getCameraParameters(cam_color, cam_depth);
try {
#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
tracker.track(mapOfImages, mapOfPointclouds);
#else
tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
#endif
}
catch (...) {
return state_detection;
}
double projection_error =
tracker.computeCurrentProjectionError(I_gray, cMo, cam_color);
if (projection_error > projection_error_threshold) {
return state_detection;
}
tracker.display(I_gray, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth,
vpColor::red, 3);
return state_tracking;
}
void usage(const char **argv, int error)
{
std::cout << "Synopsis" << std::endl
<< " " << argv[0]
<< " [--tag-size <size>]"
<< " [--tag-family <family>]"
<< " [--tag-decision-margin-threshold <threshold>]"
<< " [--tag-hamming-distance-threshold <threshold>]"
<< " [--tag-quad-decimate <factor>]"
<< " [--tag-n-threads <number>]"
#if defined(VISP_HAVE_DISPLAY)
<< " [--display-off]"
#endif
<< " [--cube-size <size]"
<< " [--use-texture]"
<< " [--use-depth]"
<< " [--projection-error-threshold <threshold>]"
<< " [--help, -h]" << std::endl
<< std::endl;
std::cout << "Description" << std::endl
<< " Live execution on images acquired by a realsense camera of the generic model-based" << std::endl
<< " tracker. The considered object is a cube to which an Apriltag is attached on one of its" << std::endl
<< " faces. Once detected, the pose of the Apriltag is used to initialise the tracker." << std::endl
<< " The Apriltag must be centred on a face of the cube. If the tracker fails, the " << std::endl
<< " tag is used to reset the tracker." << std::endl
<< std::endl
<< " --tag-size <size>" << std::endl
<< " Apriltag size in [m]." << std::endl
<< " Default: 0.03" << std::endl
<< std::endl
<< " --tag-family <family>" << std::endl
<< " Apriltag family. Supported values are:" << std::endl
<< " 0: TAG_36h11" << std::endl
<< " 1: TAG_36h10 (DEPRECATED)" << std::endl
<< " 2: TAG_36ARTOOLKIT (DEPRECATED)" << std::endl
<< " 3: TAG_25h9" << std::endl
<< " 4: TAG_25h7 (DEPRECATED)" << std::endl
<< " 5: TAG_16h5" << std::endl
<< " 6: TAG_CIRCLE21h7" << std::endl
<< " 7: TAG_CIRCLE49h12" << std::endl
<< " 8: TAG_CUSTOM48h12" << std::endl
<< " 9: TAG_STANDARD41h12" << std::endl
<< " 10: TAG_STANDARD52h13" << std::endl
<< " 11: TAG_ARUCO_4x4_50" << std::endl
<< " 12: TAG_ARUCO_4x4_100" << std::endl
<< " 13: TAG_ARUCO_4x4_250" << std::endl
<< " 14: TAG_ARUCO_4x4_1000" << std::endl
<< " 15: TAG_ARUCO_5x5_50" << std::endl
<< " 16: TAG_ARUCO_5x5_100" << std::endl
<< " 17: TAG_ARUCO_5x5_250" << std::endl
<< " 18: TAG_ARUCO_5x5_1000" << std::endl
<< " 19: TAG_ARUCO_6x6_50" << std::endl
<< " 20: TAG_ARUCO_6x6_100" << std::endl
<< " 21: TAG_ARUCO_6x6_250" << std::endl
<< " 22: TAG_ARUCO_6x6_1000" << std::endl
<< " 23: TAG_ARUCO_7x7_50" << std::endl
<< " 24: TAG_ARUCO_7x7_100" << std::endl
<< " 25: TAG_ARUCO_7x7_250" << std::endl
<< " 26: TAG_ARUCO_7x7_1000" << std::endl
<< " 27: TAG_ARUCO_MIP_36h12" << std::endl
<< " Default: 0 (36h11)" << std::endl
<< std::endl
<< " --tag-decision-margin-threshold <threshold>" << std::endl
<< " Threshold used to discard low-confident detections. A typical value is " << std::endl
<< " around 100. The higher this value, the more false positives will be filtered" << std::endl
<< " out. When this value is set to -1, false positives are not filtered out." << std::endl
<< " Default: 50" << std::endl
<< std::endl
<< " --tag-hamming-distance-threshold <threshold>" << std::endl
<< " Threshold used to discard low-confident detections with corrected bits." << std::endl
<< " A typical value is between 0 and 3. The lower this value, the more false" << std::endl
<< " positives will be filtered out." << std::endl
<< " Default: 0" << std::endl
<< std::endl
<< " --tag-quad-decimate <factor>" << std::endl
<< " Decimation factor used to detect a tag. " << std::endl
<< " Default: 1" << std::endl
<< std::endl
<< " --tag-n-threads <number>" << std::endl
<< " Number of threads used to detect a tag." << std::endl
<< " Default: 1" << std::endl
<< std::endl
#if defined(VISP_HAVE_DISPLAY)
<< " --display-off" << std::endl
<< " Flag used to turn display off." << std::endl
<< " Default: enabled" << std::endl
<< std::endl
#endif
<< " --cube-size <size>" << std::endl
<< " Cube size in meter." << std::endl
<< " Default: 0.125" << std::endl
<< std::endl
#if defined(VISP_HAVE_OPENCV)
<< " --use-texture" << std::endl
<< " Flag to enable usage of keypoint features." << std::endl
<< " Default: disabled" << std::endl
<< std::endl
#endif
<< " --use-depth" << std::endl
<< " Flag to enable usage of depth map as features." << std::endl
<< " Default: disabled" << std::endl
<< std::endl
<< " --projection-error-threshold <threshold>" << std::endl
<< " Threshold in the range [0:90] deg used to restart the tracker when the projection"
<< " error is below this threshold." << std::endl
<< " Default: 40" << std::endl
<< std::endl
<< " --help, -h" << std::endl
<< " Print this helper message." << std::endl
<< std::endl;
if (error) {
std::cout << "Error" << std::endl
<< " "
<<
"Unsupported parameter " << argv[
error] << std::endl;
}
}
int main(int argc, const char **argv)
{
double opt_tag_size = 0.08;
float opt_tag_quad_decimate = 1.0;
float opt_tag_decision_margin_threshold = 50;
int opt_tag_hamming_distance_threshold = 2;
int opt_tag_nthreads = 1;
double opt_cube_size = 0.125;
#ifdef VISP_HAVE_OPENCV
bool opt_use_texture = false;
#endif
bool opt_use_depth = false;
double opt_projection_error_threshold = 40.;
#if !(defined(VISP_HAVE_DISPLAY))
bool opt_display_off = true;
#else
bool opt_display_off = false;
#endif
for (
int i = 1;
i < argc;
i++) {
if (std::string(argv[i]) == "--tag-size" && i + 1 < argc) {
opt_tag_size = atof(argv[++i]);
}
else if (std::string(argv[i]) == "--tag-family" && i + 1 < argc) {
}
else if (std::string(argv[i]) == "--tag-decision-margin-threshold" && i + 1 < argc) {
opt_tag_decision_margin_threshold = static_cast<float>(atof(argv[++i]));
}
else if (std::string(argv[i]) == "--tag-hamming-distance-threshold" && i + 1 < argc) {
opt_tag_hamming_distance_threshold = atoi(argv[++i]);
}
else if (std::string(argv[i]) == "--tag-quad-decimate" && i + 1 < argc) {
opt_tag_quad_decimate = static_cast<float>(atof(argv[++i]));
}
else if (std::string(argv[i]) == "--tag-n-threads" && i + 1 < argc) {
opt_tag_nthreads = atoi(argv[++i]);
}
#if defined(VISP_HAVE_DISPLAY)
else if (std::string(argv[i]) == "--display-off") {
opt_display_off = true;
}
#endif
else if (std::string(argv[i]) == "--cube-size" && i + 1 < argc) {
opt_cube_size = atof(argv[++i]);
}
#ifdef VISP_HAVE_OPENCV
else if (std::string(argv[i]) == "--use-texture") {
opt_use_texture = true;
}
#endif
else if (std::string(argv[i]) == "--use-depth") {
opt_use_depth = true;
}
else if (std::string(argv[i]) == "--projection-error-threshold" && i + 1 < argc) {
opt_projection_error_threshold = atof(argv[++i]);
}
else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
usage(argv, 0);
return EXIT_SUCCESS;
}
else {
usage(argv, i);
return EXIT_FAILURE;
}
}
createCaoFile(opt_cube_size);
#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
std::shared_ptr<vpDisplay> d_gray;
std::shared_ptr<vpDisplay> d_depth;
#else
#endif
try {
config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, stream_fps);
config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, stream_fps);
config.disable_stream(RS2_STREAM_INFRARED);
if (opt_use_depth) {
}
std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
std::map<std::string, pcl::PointCloud<pcl::PointXYZ>::ConstPtr> mapOfPointclouds;
pcl::PointCloud<pcl::PointXYZ>::Ptr pointcloud(new pcl::PointCloud<pcl::PointXYZ>());
#else
std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
std::vector<vpColVector> pointcloud;
#endif
std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
std::cout << "Cube size : " << opt_cube_size << std::endl;
std::cout << "AprilTag size : " << opt_tag_size << std::endl;
std::cout << "AprilTag family : " << opt_tag_family << std::endl;
std::cout << "Camera parameters" << std::endl;
std::cout <<
" Color :\n" <<
cam_color << std::endl;
if (opt_use_depth) {
std::cout <<
" Depth :\n" <<
cam_depth << std::endl;
}
std::cout << "Detection " << std::endl;
std::cout << " Quad decimate : " << opt_tag_quad_decimate << std::endl;
std::cout << " Threads number : " << opt_tag_nthreads << std::endl;
std::cout << " Decision margin : " << opt_tag_decision_margin_threshold << " (applied to ArUco tags only)" << std::endl;
std::cout << "Tracker " << std::endl;
std::cout << " Use edges : 1" << std::endl;
std::cout << " Use texture : "
#ifdef VISP_HAVE_OPENCV
<< opt_use_texture << std::endl;
#else
<< " na" << std::endl;
#endif
std::cout << " Use depth : " << opt_use_depth << std::endl;
std::cout << " Projection error: " << opt_projection_error_threshold << std::endl;
if (!opt_display_off) {
#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
if (opt_use_depth)
#else
if (opt_use_depth)
#endif
}
std::vector<int> trackerTypes;
#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
if (opt_use_texture)
else
#endif
if (opt_use_depth)
#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
if (opt_use_texture) {
tracker.setKltOpencv(klt_settings);
}
#endif
if (opt_use_depth) {
tracker.setCameraParameters(cam_color, cam_depth);
tracker.loadModel(
"cube.cao",
"cube.cao");
tracker.setCameraTransformationMatrix(mapOfCameraTransformations);
}
else {
tracker.setCameraParameters(cam_color);
}
state_t state = state_detection;
while (state != state_quit) {
if (opt_use_depth) {
#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
realsense.
acquire((
unsigned char *)I_color.bitmap, (
unsigned char *)I_depth_raw.bitmap,
nullptr, pointcloud,
nullptr);
#else
realsense.
acquire((
unsigned char *)I_color.bitmap, (
unsigned char *)I_depth_raw.bitmap, &pointcloud,
nullptr,
nullptr);
#endif
mapOfImages["Camera1"] = &I_gray;
#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
mapOfPointclouds["Camera2"] = pointcloud;
#else
mapOfPointclouds["Camera2"] = &pointcloud;
mapOfWidths[
"Camera2"] =
width;
mapOfHeights[
"Camera2"] =
height;
#endif
}
else {
}
if (state == state_detection) {
state = detectAprilTag(I_gray, detector, opt_tag_size, cam_color, cMo);
if (state == state_tracking) {
if (opt_use_depth) {
mapOfCameraPoses[
"Camera1"] =
cMo;
tracker.initFromPose(mapOfImages, mapOfCameraPoses);
}
else {
}
}
}
if (state == state_tracking) {
if (opt_use_depth) {
#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
state = track(mapOfImages, mapOfPointclouds, I_gray, I_depth, depth_M_color, tracker,
opt_projection_error_threshold, cMo);
#else
state = track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights, I_gray, I_depth, depth_M_color,
tracker, opt_projection_error_threshold, cMo);
#endif
}
else {
state = track(I_gray, tracker, opt_projection_error_threshold, cMo);
}
{
std::stringstream ss;
ss <<
"Features: edges " <<
tracker.getNbFeaturesEdge() <<
", klt " <<
tracker.getNbFeaturesKlt()
<<
", depth " <<
tracker.getNbFeaturesDepthDense();
}
}
state = state_quit;
}
if (opt_use_depth) {
state = state_quit;
}
}
}
}
std::cerr <<
"Catch an exception: " <<
e.getMessage() << std::endl;
}
#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
if (!opt_display_off) {
delete d_gray;
if (opt_use_depth)
delete d_depth;
}
#endif
return EXIT_SUCCESS;
}
#else
int main()
{
#ifndef VISP_HAVE_APRILTAG
std::cout << "ViSP is not build with Apriltag support" << std::endl;
#endif
#ifndef VISP_HAVE_REALSENSE2
std::cout << "ViSP is not build with librealsense2 support" << std::endl;
#endif
std::cout << "Install missing 3rd parties, configure and build ViSP to run this tutorial" << std::endl;
return EXIT_SUCCESS;
}
#endif
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor none
void setAprilTagQuadDecimate(float quadDecimate)
void setAprilTagHammingDistanceThreshold(int hammingDistanceThreshold)
bool detect(const vpImage< unsigned char > &I) VP_OVERRIDE
@ TAG_36h11
AprilTag 36h11 pattern (recommended).
void setAprilTagNbThreads(int nThreads)
void setAprilTagDecisionMarginThreshold(float decisionMarginThreshold)
size_t getNbObjects() const
Class that defines generic functionalities for display.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double rad(double deg)
Real-time 6D object pose tracking using its CAD model.
void setMu1(const double &mu_1)
void setRange(const unsigned int &range)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setMaskNumber(const unsigned int &mask_number)
void setThreshold(const double &threshold)
void setSampleStep(const double &sample_step)
void setMaskSize(const unsigned int &mask_size)
void setMu2(const double &mu_2)
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
vpHomogeneousMatrix getTransformation(const rs2_stream &from, const rs2_stream &to, int from_index=-1) const
std::shared_ptr< vpDisplay > createDisplay()
Return a smart pointer vpDisplay specialization if a GUI library is available or nullptr otherwise.
vpDisplay * allocateDisplay()
Return a newly allocated vpDisplay specialization if a GUI library is available or nullptr otherwise.