2#include <visp3/core/vpConfig.h>
4#include <visp3/detection/vpDetectorAprilTag.h>
6#include <visp3/core/vpXmlParserCamera.h>
7#include <visp3/gui/vpDisplayFactory.h>
8#include <visp3/io/vpImageIo.h>
10void usage(
const char **argv,
int error);
12void usage(
const char **argv,
int error)
14 std::cout <<
"Synopsis" << std::endl
16 <<
" [--input <filename>]"
17 <<
" [--tag-size <size>]"
18 <<
" [--tag-family <family>]"
19 <<
" [--tag-decision-margin-threshold <threshold>]"
20 <<
" [--tag-hamming-distance-threshold <threshold>]"
21 <<
" [--tag-quad-decimate <factor>]"
22 <<
" [--tag-n-threads <number>]"
23 <<
" [--tag-z-aligned]"
24 <<
" [--tag-pose-method <method>]"
25#if defined(VISP_HAVE_PUGIXML)
26 <<
" [--intrinsic <xmlfile>]"
27 <<
" [--camera <name>]"
29#if defined(VISP_HAVE_DISPLAY)
32 <<
" [--thickness <thickness>"
34 <<
" [--help, -h]" << std::endl
36 std::cout <<
"Description" << std::endl
37 <<
" Detect AprilTags in an image and compute their corresponding pose." << std::endl
39 <<
" --input <filename>" << std::endl
40 <<
" Image filename to process." << std::endl
41 <<
" Default: AprilTag.jpg" << std::endl
43 <<
" --tag-size <size>" << std::endl
44 <<
" Apriltag size in [m]." << std::endl
45 <<
" Default: 0.03" << std::endl
47 <<
" --tag-family <family>" << std::endl
48 <<
" Apriltag family. Supported values are:" << std::endl
49 <<
" 0: TAG_36h11" << std::endl
50 <<
" 1: TAG_36h10 (DEPRECATED)" << std::endl
51 <<
" 2: TAG_36ARTOOLKIT (DEPRECATED)" << std::endl
52 <<
" 3: TAG_25h9" << std::endl
53 <<
" 4: TAG_25h7 (DEPRECATED)" << std::endl
54 <<
" 5: TAG_16h5" << std::endl
55 <<
" 6: TAG_CIRCLE21h7" << std::endl
56 <<
" 7: TAG_CIRCLE49h12" << std::endl
57 <<
" 8: TAG_CUSTOM48h12" << std::endl
58 <<
" 9: TAG_STANDARD41h12" << std::endl
59 <<
" 10: TAG_STANDARD52h13" << std::endl
60 <<
" 11: TAG_ARUCO_4x4_50" << std::endl
61 <<
" 12: TAG_ARUCO_4x4_100" << std::endl
62 <<
" 13: TAG_ARUCO_4x4_250" << std::endl
63 <<
" 14: TAG_ARUCO_4x4_1000" << std::endl
64 <<
" 15: TAG_ARUCO_5x5_50" << std::endl
65 <<
" 16: TAG_ARUCO_5x5_100" << std::endl
66 <<
" 17: TAG_ARUCO_5x5_250" << std::endl
67 <<
" 18: TAG_ARUCO_5x5_1000" << std::endl
68 <<
" 19: TAG_ARUCO_6x6_50" << std::endl
69 <<
" 20: TAG_ARUCO_6x6_100" << std::endl
70 <<
" 21: TAG_ARUCO_6x6_250" << std::endl
71 <<
" 22: TAG_ARUCO_6x6_1000" << std::endl
72 <<
" 23: TAG_ARUCO_7x7_50" << std::endl
73 <<
" 24: TAG_ARUCO_7x7_100" << std::endl
74 <<
" 25: TAG_ARUCO_7x7_250" << std::endl
75 <<
" 26: TAG_ARUCO_7x7_1000" << std::endl
76 <<
" 27: TAG_ARUCO_MIP_36h12" << std::endl
77 <<
" Default: 0 (36h11)" << std::endl
79 <<
" --tag-decision-margin-threshold <threshold>" << std::endl
80 <<
" Threshold used to discard low-confident detections. A typical value is " << std::endl
81 <<
" around 100. The higher this value, the more false positives will be filtered" << std::endl
82 <<
" out. When this value is set to -1, false positives are not filtered out." << std::endl
83 <<
" Default: 50" << std::endl
85 <<
" --tag-hamming-distance-threshold <threshold>" << std::endl
86 <<
" Threshold used to discard low-confident detections with corrected bits." << std::endl
87 <<
" A typical value is between 0 and 3. The lower this value, the more false" << std::endl
88 <<
" positives will be filtered out." << std::endl
89 <<
" Default: 0" << std::endl
91 <<
" --tag-quad-decimate <factor>" << std::endl
92 <<
" Decimation factor used to detect a tag. " << std::endl
93 <<
" Default: 1" << std::endl
95 <<
" --tag-n-threads <number>" << std::endl
96 <<
" Number of threads used to detect a tag." << std::endl
97 <<
" Default: 1" << std::endl
99 <<
" --tag-z-aligned" << std::endl
100 <<
" When enabled, tag z-axis and camera z-axis are aligned." << std::endl
101 <<
" Default: false" << std::endl
103 <<
" --tag-pose-method <method>" << std::endl
104 <<
" Algorithm used to compute the tag pose from its 4 corners." << std::endl
105 <<
" Possible values are:" << std::endl
106 <<
" 0: HOMOGRAPHY" << std::endl
107 <<
" 1: HOMOGRAPHY_VIRTUAL_VS" << std::endl
108 <<
" 2: DEMENTHON_VIRTUAL_VS" << std::endl
109 <<
" 3: LAGRANGE_VIRTUAL_VS" << std::endl
110 <<
" 4: BEST_RESIDUAL_VIRTUAL_VS" << std::endl
111 <<
" 5: HOMOGRAPHY_ORTHOGONAL_ITERATION" << std::endl
112 <<
" Default: 1 (HOMOGRAPHY_VIRTUAL_VS)" << std::endl
114#if defined(VISP_HAVE_PUGIXML)
115 <<
" --intrinsic <xmlfile>" << std::endl
116 <<
" Camera intrinsic parameters file in xml format." << std::endl
117 <<
" Default: empty" << std::endl
119 <<
" --camera <name>" << std::endl
120 <<
" Camera name in the intrinsic parameters file in xml format." << std::endl
121 <<
" Default: empty" << std::endl
124#if defined(VISP_HAVE_DISPLAY)
125 <<
" --display-tag" << std::endl
126 <<
" Flag used to enable displaying the edges of a tag." << std::endl
127 <<
" Default: disabled" << std::endl
129 <<
" --color <id>" << std::endl
130 <<
" Color id used to display the frame over each tag." << std::endl
131 <<
" Possible values are:" << std::endl
132 <<
" -1: R-G-B colors for X, Y, Z axis respectively" << std::endl
133 <<
" 0: all axis in black" << std::endl
134 <<
" 1: all axis in white" << std::endl
135 <<
" ..." << std::endl
136 <<
" Default: -1" << std::endl
138 <<
" --thickness <thickness>" << std::endl
139 <<
" Thickness of the drawings in overlay." << std::endl
140 <<
" Default: 2" << std::endl
143 <<
" --help, -h" << std::endl
144 <<
" Print this helper message." << std::endl
148 std::cout <<
"Error" << std::endl
150 <<
"Unsupported parameter " << argv[
error] << std::endl;
154int main(
int argc,
const char **argv)
157#if defined(VISP_HAVE_APRILTAG) && defined(VISP_HAVE_DISPLAY)
160#ifdef ENABLE_VISP_NAMESPACE
163 std::string opt_input_filename =
"AprilTag.jpg";
166 double opt_tag_size = 0.053;
167 float opt_tag_quad_decimate = 1.0;
168 float opt_tag_decision_margin_threshold = 50;
169 int opt_tag_hamming_distance_threshold = 2;
170 int opt_tag_nThreads = 1;
171 std::string opt_intrinsic_file =
"";
172 std::string opt_camera_name =
"";
173 bool opt_display_tag =
false;
174 int opt_color_id = -1;
175 unsigned int opt_thickness = 2;
176 bool opt_tag_z_align_frame =
false;
178 for (
int i = 1;
i < argc; ++
i) {
179 if (std::string(argv[i]) ==
"--input" && i + 1 < argc) {
180 opt_input_filename = std::string(argv[++i]);
182 else if (std::string(argv[i]) ==
"--tag-size" && i + 1 < argc) {
183 opt_tag_size = atof(argv[++i]);
185 else if (std::string(argv[i]) ==
"--tag-family" && i + 1 < argc) {
188 else if (std::string(argv[i]) ==
"--tag-quad-decimate" && i + 1 < argc) {
189 opt_tag_quad_decimate =
static_cast<float>(atof(argv[++i]));
191 else if (std::string(argv[i]) ==
"--tag-n-threads" && i + 1 < argc) {
192 opt_tag_nThreads = atoi(argv[++i]);
194 else if (std::string(argv[i]) ==
"--tag-z-aligned") {
195 opt_tag_z_align_frame =
true;
197 else if (std::string(argv[i]) ==
"--tag-pose-method" && i + 1 < argc) {
200 else if (std::string(argv[i]) ==
"--tag-decision-margin-threshold" && i + 1 < argc) {
201 opt_tag_decision_margin_threshold =
static_cast<float>(atof(argv[++i]));
203 else if (std::string(argv[i]) ==
"--tag-hamming-distance-threshold" && i + 1 < argc) {
204 opt_tag_hamming_distance_threshold = atoi(argv[++i]);
206#if defined(VISP_HAVE_PUGIXML)
207 else if (std::string(argv[i]) ==
"--intrinsic" && i + 1 < argc) {
208 opt_intrinsic_file = std::string(argv[++i]);
210 else if (std::string(argv[i]) ==
"--camera-name" && i + 1 < argc) {
211 opt_camera_name = std::string(argv[++i]);
214#if defined(VISP_HAVE_DISPLAY)
215 else if (std::string(argv[i]) ==
"--display-tag") {
216 opt_display_tag =
true;
218 else if (std::string(argv[i]) ==
"--color" && i + 1 < argc) {
219 opt_color_id = atoi(argv[++i]);
221 else if (std::string(argv[i]) ==
"--thickness" && i + 1 < argc) {
222 opt_thickness =
static_cast<unsigned int>(atoi(argv[++i]));
225 else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
235 std::cout <<
"Input data" << std::endl;
236 std::cout <<
" Image : " << opt_input_filename << std::endl;
239 cam.initPersProjWithoutDistortion(615.1674805, 615.1675415, 312.1889954, 243.4373779);
240#if defined(VISP_HAVE_PUGIXML)
242 if (!opt_intrinsic_file.empty() && !opt_camera_name.empty()) {
243 std::cout <<
" Intrinsics : " << opt_intrinsic_file << std::endl << std::endl;
247 std::cout <<
" Intrinsics : default" << std::endl << std::endl;
250 std::cout <<
" Intrinsics : default" << std::endl << std::endl;
253 std::cout <<
cam << std::endl;
254 std::cout <<
"Tag detector settings" << std::endl;
255 std::cout <<
" Tag size [m] : " << opt_tag_size << std::endl;
256 std::cout <<
" Tag family : " << opt_tag_family << std::endl;
257 std::cout <<
" Quad decimate : " << opt_tag_quad_decimate << std::endl;
258 std::cout <<
" Decision margin threshold : " << opt_tag_decision_margin_threshold << std::endl;
259 std::cout <<
" Hamming distance threshold: " << opt_tag_hamming_distance_threshold << std::endl;
260 std::cout <<
" Num threads : " << opt_tag_nThreads << std::endl;
261 std::cout <<
" Z aligned : " << opt_tag_z_align_frame << std::endl;
262 std::cout <<
" Pose estimation : " << opt_tag_pose_estimation_method << std::endl;
264#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
265 std::shared_ptr<vpDisplay> display, display2;
276#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
287 detector.setAprilTagQuadDecimate(opt_tag_quad_decimate);
288 detector.setAprilTagPoseEstimationMethod(opt_tag_pose_estimation_method);
289 detector.setAprilTagNbThreads(opt_tag_nThreads);
291 detector.setZAlignedWithCameraAxis(opt_tag_z_align_frame);
292 detector.setAprilTagDecisionMarginThreshold(opt_tag_decision_margin_threshold);
293 detector.setAprilTagHammingDistanceThreshold(opt_tag_hamming_distance_threshold);
300 std::vector<vpHomogeneousMatrix> cMo_vec;
301 detector.detect(I, opt_tag_size, cam, cMo_vec);
305 std::stringstream ss;
306 ss <<
"Detection time: " <<
t <<
" ms for " << detector.getNbObjects() <<
" tags";
310 std::vector<int> tag_ids = detector.getTagsId();
314 std::vector<float> tag_decision_margins = detector.getTagsDecisionMargin();
318 std::vector<int> tag_hamming_distances = detector.getTagsHammingDistance();
322 std::cout <<
"\nDetected tags" << std::endl;
323 for (
size_t i = 0;
i < detector.getNbObjects();
i++) {
326 std::vector<vpImagePoint>
p = detector.getPolygon(i);
327 vpRect bbox = detector.getBBox(i);
331 std::string message = detector.getMessage(i);
334 ss << message <<
" with decision margin: " << tag_decision_margins[
i] <<
" and hamming distance: " << tag_hamming_distances[
i];
335 std::cout <<
" " << ss.str() << std::endl;
338 ss <<
"Tag id: " << tag_ids[
i] <<
" - " << tag_decision_margins[
i];
342 for (
size_t j = 0;
j <
p.size();
j++) {
344 std::ostringstream number;
358 for (
size_t i = 0;
i < cMo_vec.size();
i++) {
369#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
378 std::vector<std::vector<vpImagePoint> > tagsCorners = detector.getTagsCorners();
379 detector.displayTags(I_color, tagsCorners,
vpColor::none, 3);
380 detector.displayFrames(I_color, cMo_vec, cam, opt_tag_size / 2,
vpColor::none, 3);
387 std::cerr <<
"Catch an exception: " <<
e.getMessage() << std::endl;
390#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
391 if (display !=
nullptr) {
394 if (display2 !=
nullptr) {
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static vpColor getColor(const unsigned int &i)
static const vpColor none
static const vpColor blue
static const vpColor green
@ TAG_36h11
AprilTag 36h11 pattern (recommended).
Class that defines generic functionalities for display.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void close(vpImage< unsigned char > &I)
static void flush(const vpImage< unsigned char > &I)
static void displayRectangle(const vpImage< unsigned char > &I, const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
Defines a rectangle in the plane.
XML parser to load and save intrinsic camera parameters.
std::shared_ptr< vpDisplay > createDisplay()
Return a smart pointer vpDisplay specialization if a GUI library is available or nullptr otherwise.
vpDisplay * allocateDisplay()
Return a newly allocated vpDisplay specialization if a GUI library is available or nullptr otherwise.
VISP_EXPORT double measureTimeMs()