Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
vpMbDepthNormalTracker.cpp
1/*
2 * ViSP, open source Visual Servoing Platform software.
3 * Copyright (C) 2005 - 2025 by Inria. All rights reserved.
4 *
5 * This software is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * See the file LICENSE.txt at the root directory of this source
10 * distribution for additional information about the GNU GPL.
11 *
12 * For using ViSP with software that can not be combined with the GNU
13 * GPL, please contact Inria about acquiring a ViSP Professional
14 * Edition License.
15 *
16 * See https://visp.inria.fr for more information.
17 *
18 * This software was developed at:
19 * Inria Rennes - Bretagne Atlantique
20 * Campus Universitaire de Beaulieu
21 * 35042 Rennes Cedex
22 * France
23 *
24 * If you have questions regarding the use of this file, please contact
25 * Inria at visp@inria.fr
26 *
27 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Description:
31 * Model-based tracker using depth normal features.
32 */
33
34#include <iostream>
35
36#include <visp3/core/vpConfig.h>
37
38#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
39#include <pcl/point_cloud.h>
40#endif
41
42#include <visp3/core/vpDisplay.h>
43#include <visp3/core/vpExponentialMap.h>
44#include <visp3/core/vpTrackingException.h>
45#include <visp3/mbt/vpMbDepthNormalTracker.h>
46#include <visp3/mbt/vpMbtXmlGenericParser.h>
47
48#if DEBUG_DISPLAY_DEPTH_NORMAL
49#include <visp3/gui/vpDisplayGDI.h>
50#include <visp3/gui/vpDisplayX.h>
51#endif
52
64#if DEBUG_DISPLAY_DEPTH_NORMAL
65 ,
66 m_debugDisp_depthNormal(nullptr), m_debugImage_depthNormal()
67#endif
68{
69#ifdef VISP_HAVE_OGRE
70 faces.getOgreContext()->setWindowName("MBT Depth");
71#endif
72
73#if defined(VISP_HAVE_X11) && DEBUG_DISPLAY_DEPTH_NORMAL
74 m_debugDisp_depthNormal = new vpDisplayX;
75#elif defined(VISP_HAVE_GDI) && DEBUG_DISPLAY_DEPTH_NORMAL
76 m_debugDisp_depthNormal = new vpDisplayGDI;
77#endif
78}
79
81{
82 for (size_t i = 0; i < m_depthNormalFaces.size(); i++) {
83 delete m_depthNormalFaces[i];
84 }
85}
86
87void vpMbDepthNormalTracker::addFace(vpMbtPolygon &polygon, bool alreadyClose)
88{
89 if (polygon.nbpt < 3) {
90 return;
91 }
92
93 // Copy hidden faces
95
97 normal_face->m_hiddenFace = &faces;
98 normal_face->m_polygon = &polygon;
99 normal_face->m_cam = m_cam;
100 normal_face->m_useScanLine = useScanLine;
101 normal_face->m_clippingFlag = clippingFlag;
102 normal_face->m_distNearClip = distNearClip;
103 normal_face->m_distFarClip = distFarClip;
108
109 // Add lines that compose the face
110 unsigned int nbpt = polygon.getNbPoint();
111 if (nbpt > 0) {
112 for (unsigned int i = 0; i < nbpt - 1; i++) {
113 normal_face->addLine(polygon.p[i], polygon.p[i + 1], &m_depthNormalHiddenFacesDisplay, m_rand, polygon.getIndex(),
114 polygon.getName());
115 }
116
117 if (!alreadyClose) {
118 // Add last line that closes the face
119 normal_face->addLine(polygon.p[nbpt - 1], polygon.p[0], &m_depthNormalHiddenFacesDisplay, m_rand,
120 polygon.getIndex(), polygon.getName());
121 }
122 }
123
124 // Construct a vpPlane in object frame
125 vpPoint pts[3];
126 pts[0] = polygon.p[0];
127 pts[1] = polygon.p[1];
128 pts[2] = polygon.p[2];
129 normal_face->m_planeObject = vpPlane(pts[0], pts[1], pts[2], vpPlane::object_frame);
130
131 m_depthNormalFaces.push_back(normal_face);
132}
133
134void vpMbDepthNormalTracker::computeVisibility(unsigned int width, unsigned int height)
135{
136 bool changed = false;
137 faces.setVisible(width, height, m_cam, m_cMo, angleAppears, angleDisappears, changed);
138
139 if (useScanLine) {
140 // if (clippingFlag <= 2) {
141 // cam.computeFov(width, height);
142 // }
143
144 faces.computeClippedPolygons(m_cMo, m_cam);
145 faces.computeScanLineRender(m_cam, width, height);
146 }
147
148 for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
149 it != m_depthNormalFaces.end(); ++it) {
150 vpMbtFaceDepthNormal *face_normal = *it;
151 face_normal->computeVisibility();
152 }
153}
154
156{
157 double normRes = 0;
158 double normRes_1 = -1;
159 unsigned int iter = 0;
160
162 unsigned int nb_features = static_cast<unsigned int>(3 * m_depthNormalListOfDesiredFeatures.size());
163
164 vpColVector error_prev(nb_features);
165 vpMatrix LTL;
166 vpColVector LTR, v;
167
168 double mu = m_initialMu;
169 vpHomogeneousMatrix cMo_prev;
170
171 bool isoJoIdentity = m_isoJoIdentity; // Backup since it can be modified if L is not full rank
172 if (isoJoIdentity)
173 oJo.eye();
174
176 vpMatrix L_true, LVJ_true;
177
178 while (std::fabs(normRes_1 - normRes) > m_stopCriteriaEpsilon && (iter < m_maxIter)) {
180
181 bool reStartFromLastIncrement = false;
182 computeVVSCheckLevenbergMarquardt(iter, m_error_depthNormal, error_prev, cMo_prev, mu, reStartFromLastIncrement);
183
184 if (!reStartFromLastIncrement) {
187
188 if (computeCovariance) {
189 L_true = m_L_depthNormal;
190 if (!isoJoIdentity) {
191 cVo.buildFrom(m_cMo);
192 LVJ_true = (m_L_depthNormal * (cVo * oJo));
193 }
194 }
195
196 // Compute DoF only once
197 if (iter == 0) {
198 // If all the 6 dof should be estimated, we check if the interaction
199 // matrix is full rank. If not we remove automatically the dof that
200 // cannot be estimated. This is particularly useful when considering
201 // circles (rank 5) and cylinders (rank 4)
202 if (isoJoIdentity) {
203 cVo.buildFrom(m_cMo);
204
205 vpMatrix K; // kernel
206 unsigned int rank = (m_L_depthNormal * cVo).kernel(K);
207 if (rank == 0) {
208 throw vpException(vpException::fatalError, "Rank=0, cannot estimate the pose !");
209 }
210
211 if (rank != 6) {
212 vpMatrix I; // Identity
213 I.eye(6);
214 oJo = I - K.AtA();
215
216 isoJoIdentity = false;
217 }
218 }
219 }
220
221 double num = 0.0, den = 0.0;
222 for (unsigned int i = 0; i < m_L_depthNormal.getRows(); i++) {
223 // Compute weighted errors and stop criteria
226 den += m_w_depthNormal[i];
227
228 // weight interaction matrix
229 for (unsigned int j = 0; j < 6; j++) {
231 }
232 }
233
235 m_error_depthNormal, error_prev, LTR, mu, v);
236
237 cMo_prev = m_cMo;
239
240 normRes_1 = normRes;
241 normRes = sqrt(num / den);
242 }
243
244 iter++;
245 }
246
247 computeCovarianceMatrixVVS(isoJoIdentity, m_w_depthNormal, cMo_prev, L_true, LVJ_true, m_error_depthNormal);
248}
249
251{
252 unsigned int nb_features = static_cast<unsigned int>(3 * m_depthNormalListOfDesiredFeatures.size());
253
254 m_L_depthNormal.resize(nb_features, 6, false, false);
255 m_error_depthNormal.resize(nb_features, false);
256 m_weightedError_depthNormal.resize(nb_features, false);
257
258 m_w_depthNormal.resize(nb_features, false);
259 m_w_depthNormal = 1;
260
261 m_robust_depthNormal.setMinMedianAbsoluteDeviation(1e-3);
262}
263
265{
266 unsigned int cpt = 0;
267 for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalListOfActiveFaces.begin();
268 it != m_depthNormalListOfActiveFaces.end(); ++it) {
269 vpMatrix L_face;
270 vpColVector features_face;
271
272 (*it)->computeInteractionMatrix(m_cMo, L_face, features_face);
273
274 vpColVector face_error = features_face - m_depthNormalListOfDesiredFeatures[static_cast<size_t>(cpt)];
275
276 if (!(*it)->planeIsInvalid(m_cMo, angleDisappears)) {
277 m_error_depthNormal.insert(cpt * 3, face_error);
278 m_L_depthNormal.insert(L_face, cpt * 3, 0);
279 }
280 else {
281 face_error = 0;
282 L_face = 0;
283 m_error_depthNormal.insert(cpt * 3, face_error);
284 m_L_depthNormal.insert(L_face, cpt * 3, 0);
285 }
286
287 cpt++;
288 }
289}
290
292 const vpCameraParameters &cam, const vpColor &col, unsigned int thickness,
293 bool displayFullModel)
294{
295 std::vector<std::vector<double> > models =
296 vpMbDepthNormalTracker::getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel);
297
298 for (size_t i = 0; i < models.size(); i++) {
299 if (vpMath::equal(models[i][0], 0)) {
300 vpImagePoint ip1(models[i][1], models[i][2]);
301 vpImagePoint ip2(models[i][3], models[i][4]);
302 vpDisplay::displayLine(I, ip1, ip2, col, thickness);
303 }
304 }
305
306 if (displayFeatures) {
307 std::vector<std::vector<double> > features = getFeaturesForDisplayDepthNormal();
308 for (size_t i = 0; i < features.size(); i++) {
309 vpImagePoint im_centroid(features[i][1], features[i][2]);
310 vpImagePoint im_extremity(features[i][3], features[i][4]);
311 bool desired = vpMath::equal(features[i][0], 2);
312 vpDisplay::displayArrow(I, im_centroid, im_extremity, desired ? vpColor::blue : vpColor::red, 4, 2, thickness);
313 }
314 }
315}
316
318 const vpCameraParameters &cam, const vpColor &col, unsigned int thickness,
319 bool displayFullModel)
320{
321 std::vector<std::vector<double> > models =
322 vpMbDepthNormalTracker::getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel);
323
324 for (size_t i = 0; i < models.size(); i++) {
325 if (vpMath::equal(models[i][0], 0)) {
326 vpImagePoint ip1(models[i][1], models[i][2]);
327 vpImagePoint ip2(models[i][3], models[i][4]);
328 vpDisplay::displayLine(I, ip1, ip2, col, thickness);
329 }
330 }
331
332 if (displayFeatures) {
333 std::vector<std::vector<double> > features = getFeaturesForDisplayDepthNormal();
334 for (size_t i = 0; i < features.size(); i++) {
335 vpImagePoint im_centroid(features[i][1], features[i][2]);
336 vpImagePoint im_extremity(features[i][3], features[i][4]);
337 bool desired = vpMath::equal(features[i][0], 2);
338 vpDisplay::displayArrow(I, im_centroid, im_extremity, desired ? vpColor::blue : vpColor::red, 4, 2, thickness);
339 }
340 }
341}
342
344{
345 std::vector<std::vector<double> > features;
346
347 for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
348 it != m_depthNormalFaces.end(); ++it) {
349 vpMbtFaceDepthNormal *face_normal = *it;
350 std::vector<std::vector<double> > currentFeatures = face_normal->getFeaturesForDisplay(m_cMo, m_cam);
351 features.insert(features.end(), currentFeatures.begin(), currentFeatures.end());
352 }
353
354 return features;
355}
356
372std::vector<std::vector<double> > vpMbDepthNormalTracker::getModelForDisplay(unsigned int width, unsigned int height,
373 const vpHomogeneousMatrix &cMo,
374 const vpCameraParameters &cam,
375 bool displayFullModel)
376{
377 std::vector<std::vector<double> > models;
378
379 vpCameraParameters c = cam;
380
381 bool changed = false;
382 m_depthNormalHiddenFacesDisplay.setVisible(width, height, c, cMo, angleAppears, angleDisappears, changed);
383
384 if (useScanLine) {
385 c.computeFov(width, height);
386
387 m_depthNormalHiddenFacesDisplay.computeClippedPolygons(cMo, c);
388 m_depthNormalHiddenFacesDisplay.computeScanLineRender(c, width, height);
389 }
390
391 for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
392 it != m_depthNormalFaces.end(); ++it) {
393 vpMbtFaceDepthNormal *face_normal = *it;
394 std::vector<std::vector<double> > modelLines =
395 face_normal->getModelForDisplay(width, height, cMo, cam, displayFullModel);
396 models.insert(models.end(), modelLines.begin(), modelLines.end());
397 }
398
399 return models;
400}
401
403{
404 if (!modelInitialised) {
405 throw vpException(vpException::fatalError, "model not initialized");
406 }
407
408 bool reInitialisation = false;
409 if (!useOgre) {
410 faces.setVisible(I.getWidth(), I.getHeight(), m_cam, m_cMo, angleAppears, angleDisappears, reInitialisation);
411 }
412 else {
413#ifdef VISP_HAVE_OGRE
414 if (!faces.isOgreInitialised()) {
415 faces.setBackgroundSizeOgre(I.getHeight(), I.getWidth());
416 faces.setOgreShowConfigDialog(ogreShowConfigDialog);
417 faces.initOgre(m_cam);
418 // Turn off Ogre config dialog display for the next call to this
419 // function since settings are saved in the ogre.cfg file and used
420 // during the next call
421 ogreShowConfigDialog = false;
422 }
423
424 faces.setVisibleOgre(I.getWidth(), I.getHeight(), m_cam, m_cMo, angleAppears, angleDisappears, reInitialisation);
425#else
426 faces.setVisible(I.getWidth(), I.getHeight(), m_cam, m_cMo, angleAppears, angleDisappears, reInitialisation);
427#endif
428 }
429
430 if (useScanLine || clippingFlag > 3)
431 m_cam.computeFov(I.getWidth(), I.getHeight());
432
433 computeVisibility(I.getWidth(), I.getHeight());
434}
435
436void vpMbDepthNormalTracker::loadConfigFile(const std::string &configFile, bool verbose)
437{
438#if defined(VISP_HAVE_PUGIXML)
440 xmlp.setVerbose(verbose);
444
451
452 try {
453 if (verbose) {
454 std::cout << " *********** Parsing XML for Mb Depth Tracker ************ " << std::endl;
455 }
456 xmlp.parse(configFile);
457 }
458 catch (const vpException &e) {
459 std::cerr << "Exception: " << e.what() << std::endl;
460 throw vpException(vpException::ioError, "Cannot open XML file \"%s\"", configFile.c_str());
461 }
462
463 vpCameraParameters camera;
464 xmlp.getCameraParameters(camera);
465 setCameraParameters(camera);
466
469
470 if (xmlp.hasNearClippingDistance())
472
473 if (xmlp.hasFarClippingDistance())
475
476 if (xmlp.getFovClipping())
478
484#else
485 (void)configFile;
486 (void)verbose;
487 throw(vpException(vpException::ioError, "vpMbDepthDenseTracker::loadConfigFile() needs pugixml built-in 3rdparty"));
488#endif
489}
490
491void vpMbDepthNormalTracker::reInitModel(const vpImage<unsigned char> &I, const std::string &cad_name,
492 const vpHomogeneousMatrix &cMo, bool verbose)
493{
494 m_cMo.eye();
495
496 for (size_t i = 0; i < m_depthNormalFaces.size(); i++) {
497 delete m_depthNormalFaces[i];
498 m_depthNormalFaces[i] = nullptr;
499 }
500
501 m_depthNormalFaces.clear();
502
503 loadModel(cad_name, verbose);
504 initFromPose(I, cMo);
505}
506
507#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
508void vpMbDepthNormalTracker::reInitModel(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud,
509 const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose)
510{
511 vpImage<unsigned char> I_dummy(point_cloud->height, point_cloud->width);
512 reInitModel(I_dummy, cad_name, cMo, verbose);
513}
514
515#endif
516
518{
519 m_cMo.eye();
520
521 for (std::vector<vpMbtFaceDepthNormal *>::iterator it = m_depthNormalFaces.begin(); it != m_depthNormalFaces.end();
522 ++it) {
523 vpMbtFaceDepthNormal *normal_face = *it;
524 delete normal_face;
525 normal_face = nullptr;
526 }
527
528 m_depthNormalFaces.clear();
529
531 computeCovariance = false;
532
535
537
538 m_lambda = 1.0;
539
540 faces.reset();
541
543
544 useScanLine = false;
545
546#ifdef VISP_HAVE_OGRE
547 useOgre = false;
548#endif
549
552}
553
555{
557#ifdef VISP_HAVE_OGRE
558 faces.getOgreContext()->setWindowName("MBT Depth");
559#endif
560}
561
563{
564 m_cMo = cdMo;
565 init(I);
566}
567
569{
570 m_cMo = cdMo;
572 init(m_I);
573}
574
575#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
576void vpMbDepthNormalTracker::setPose(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud,
577 const vpHomogeneousMatrix &cdMo)
578{
579 vpImage<unsigned char> I_dummy(point_cloud->height, point_cloud->width);
580 m_cMo = cdMo;
581 init(I_dummy);
582}
583#endif
584
586{
588
589 for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
590 it != m_depthNormalFaces.end(); ++it) {
591 (*it)->setScanLineVisibilityTest(v);
592 }
593}
594
595void vpMbDepthNormalTracker::setUseDepthNormalTracking(const std::string &name, const bool &useDepthNormalTracking)
596{
597 for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
598 it != m_depthNormalFaces.end(); ++it) {
599 vpMbtFaceDepthNormal *face = *it;
600 if (face->m_polygon->getName() == name) {
601 face->setTracked(useDepthNormalTracking);
602 }
603 }
604}
605
607
608#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_SEGMENTATION) && defined(VISP_HAVE_PCL_FILTERS) && defined(VISP_HAVE_PCL_COMMON)
609void vpMbDepthNormalTracker::segmentPointCloud(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud)
610{
613
614#if DEBUG_DISPLAY_DEPTH_NORMAL
615 if (!m_debugDisp_depthNormal->isInitialised()) {
616 m_debugImage_depthNormal.resize(point_cloud->height, point_cloud->width);
617 m_debugDisp_depthNormal->init(m_debugImage_depthNormal, 50, 0, "Debug display normal depth tracker");
618 }
619
620 m_debugImage_depthNormal = 0;
621 std::vector<std::vector<vpImagePoint> > roiPts_vec;
622#endif
623
624 for (std::vector<vpMbtFaceDepthNormal *>::iterator it = m_depthNormalFaces.begin(); it != m_depthNormalFaces.end();
625 ++it) {
626 vpMbtFaceDepthNormal *face = *it;
627
628 if (face->isVisible() && face->isTracked()) {
629 vpColVector desired_features;
630
631#if DEBUG_DISPLAY_DEPTH_NORMAL
632 std::vector<std::vector<vpImagePoint> > roiPts_vec_;
633#endif
634 if (face->computeDesiredFeatures(m_cMo, point_cloud->width, point_cloud->height, point_cloud, desired_features,
636#if DEBUG_DISPLAY_DEPTH_NORMAL
637 ,
638 m_debugImage_depthNormal, roiPts_vec_
639#endif
640 ,
641 m_mask)) {
642 m_depthNormalListOfDesiredFeatures.push_back(desired_features);
643 m_depthNormalListOfActiveFaces.push_back(face);
644
645#if DEBUG_DISPLAY_DEPTH_NORMAL
646 roiPts_vec.insert(roiPts_vec.end(), roiPts_vec_.begin(), roiPts_vec_.end());
647#endif
648 }
649 }
650 }
651
652#if DEBUG_DISPLAY_DEPTH_NORMAL
653 vpDisplay::display(m_debugImage_depthNormal);
654
655 for (size_t i = 0; i < roiPts_vec.size(); i++) {
656 if (roiPts_vec[i].empty())
657 continue;
658
659 for (size_t j = 0; j < roiPts_vec[i].size() - 1; j++) {
660 vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][j], roiPts_vec[i][j + 1], vpColor::red, 2);
661 }
662 vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][0], roiPts_vec[i][roiPts_vec[i].size() - 1],
663 vpColor::red, 2);
664 }
665
666 vpDisplay::flush(m_debugImage_depthNormal);
667#endif
668}
669#endif
670
671void vpMbDepthNormalTracker::segmentPointCloud(const std::vector<vpColVector> &point_cloud, unsigned int width,
672 unsigned int height)
673{
676
677#if DEBUG_DISPLAY_DEPTH_NORMAL
678 if (!m_debugDisp_depthNormal->isInitialised()) {
679 m_debugImage_depthNormal.resize(height, width);
680 m_debugDisp_depthNormal->init(m_debugImage_depthNormal, 50, 0, "Debug display normal depth tracker");
681 }
682
683 m_debugImage_depthNormal = 0;
684 std::vector<std::vector<vpImagePoint> > roiPts_vec;
685#endif
686
687 for (std::vector<vpMbtFaceDepthNormal *>::iterator it = m_depthNormalFaces.begin(); it != m_depthNormalFaces.end();
688 ++it) {
689 vpMbtFaceDepthNormal *face = *it;
690
691 if (face->isVisible() && face->isTracked()) {
692 vpColVector desired_features;
693
694#if DEBUG_DISPLAY_DEPTH_NORMAL
695 std::vector<std::vector<vpImagePoint> > roiPts_vec_;
696#endif
697
698 if (face->computeDesiredFeatures(m_cMo, width, height, point_cloud, desired_features, m_depthNormalSamplingStepX,
700#if DEBUG_DISPLAY_DEPTH_NORMAL
701 ,
702 m_debugImage_depthNormal, roiPts_vec_
703#endif
704 ,
705 m_mask)) {
706 m_depthNormalListOfDesiredFeatures.push_back(desired_features);
707 m_depthNormalListOfActiveFaces.push_back(face);
708
709#if DEBUG_DISPLAY_DEPTH_NORMAL
710 roiPts_vec.insert(roiPts_vec.end(), roiPts_vec_.begin(), roiPts_vec_.end());
711#endif
712 }
713 }
714 }
715
716#if DEBUG_DISPLAY_DEPTH_NORMAL
717 vpDisplay::display(m_debugImage_depthNormal);
718
719 for (size_t i = 0; i < roiPts_vec.size(); i++) {
720 if (roiPts_vec[i].empty())
721 continue;
722
723 for (size_t j = 0; j < roiPts_vec[i].size() - 1; j++) {
724 vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][j], roiPts_vec[i][j + 1], vpColor::red, 2);
725 }
726 vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][0], roiPts_vec[i][roiPts_vec[i].size() - 1],
727 vpColor::red, 2);
728 }
729
730 vpDisplay::flush(m_debugImage_depthNormal);
731#endif
732}
733
734void vpMbDepthNormalTracker::segmentPointCloud(const vpMatrix &point_cloud, unsigned int width,
735 unsigned int height)
736{
739
740#if DEBUG_DISPLAY_DEPTH_NORMAL
741 if (!m_debugDisp_depthNormal->isInitialised()) {
742 m_debugImage_depthNormal.resize(height, width);
743 m_debugDisp_depthNormal->init(m_debugImage_depthNormal, 50, 0, "Debug display normal depth tracker");
744 }
745
746 m_debugImage_depthNormal = 0;
747 std::vector<std::vector<vpImagePoint> > roiPts_vec;
748#endif
749
750 for (std::vector<vpMbtFaceDepthNormal *>::iterator it = m_depthNormalFaces.begin(); it != m_depthNormalFaces.end();
751 ++it) {
752 vpMbtFaceDepthNormal *face = *it;
753
754 if (face->isVisible() && face->isTracked()) {
755 vpColVector desired_features;
756
757#if DEBUG_DISPLAY_DEPTH_NORMAL
758 std::vector<std::vector<vpImagePoint> > roiPts_vec_;
759#endif
760
761 if (face->computeDesiredFeatures(m_cMo, width, height, point_cloud, desired_features, m_depthNormalSamplingStepX,
763#if DEBUG_DISPLAY_DEPTH_NORMAL
764 ,
765 m_debugImage_depthNormal, roiPts_vec_
766#endif
767 ,
768 m_mask)) {
769 m_depthNormalListOfDesiredFeatures.push_back(desired_features);
770 m_depthNormalListOfActiveFaces.push_back(face);
771
772#if DEBUG_DISPLAY_DEPTH_NORMAL
773 roiPts_vec.insert(roiPts_vec.end(), roiPts_vec_.begin(), roiPts_vec_.end());
774#endif
775 }
776 }
777 }
778
779#if DEBUG_DISPLAY_DEPTH_NORMAL
780 vpDisplay::display(m_debugImage_depthNormal);
781
782 for (size_t i = 0; i < roiPts_vec.size(); i++) {
783 if (roiPts_vec[i].empty())
784 continue;
785
786 for (size_t j = 0; j < roiPts_vec[i].size() - 1; j++) {
787 vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][j], roiPts_vec[i][j + 1], vpColor::red, 2);
788 }
789 vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][0], roiPts_vec[i][roiPts_vec[i].size() - 1],
790 vpColor::red, 2);
791 }
792
793 vpDisplay::flush(m_debugImage_depthNormal);
794#endif
795}
796
798{
799 m_cam = cam;
800
801 for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
802 it != m_depthNormalFaces.end(); ++it) {
803 (*it)->setCameraParameters(cam);
804 }
805}
806
808{
809 for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
810 it != m_depthNormalFaces.end(); ++it) {
811 (*it)->setFaceCentroidMethod(method);
812 }
813}
814
817{
819
820 for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
821 it != m_depthNormalFaces.end(); ++it) {
822 (*it)->setFeatureEstimationMethod(method);
823 }
824}
825
827{
829
830 for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
831 it != m_depthNormalFaces.end(); ++it) {
832 (*it)->setPclPlaneEstimationMethod(method);
833 }
834}
835
837{
839
840 for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
841 it != m_depthNormalFaces.end(); ++it) {
842 (*it)->setPclPlaneEstimationRansacMaxIter(maxIter);
843 }
844}
845
847{
849
850 for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
851 it != m_depthNormalFaces.end(); ++it) {
852 (*it)->setPclPlaneEstimationRansacThreshold(threshold);
853 }
854}
855
856void vpMbDepthNormalTracker::setDepthNormalSamplingStep(unsigned int stepX, unsigned int stepY)
857{
858 if (stepX == 0 || stepY == 0) {
859 std::cerr << "stepX and stepY must be greater than zero!" << std::endl;
860 return;
861 }
862
865}
866
867// void vpMbDepthNormalTracker::setDepthNormalUseRobust(bool use) {
868// m_depthNormalUseRobust = use;
869//}
870
872{
873 throw vpException(vpException::fatalError, "Cannot track with a grayscale image!");
874}
875
877{
878 throw vpException(vpException::fatalError, "Cannot track with a color image!");
879}
880
881#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_SEGMENTATION) && defined(VISP_HAVE_PCL_FILTERS) && defined(VISP_HAVE_PCL_COMMON)
882void vpMbDepthNormalTracker::track(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud)
883{
884 segmentPointCloud(point_cloud);
885
886 computeVVS();
887
888 computeVisibility(point_cloud->width, point_cloud->height);
889}
890#endif
891
892void vpMbDepthNormalTracker::track(const std::vector<vpColVector> &point_cloud, unsigned int width, unsigned int height)
893{
894 segmentPointCloud(point_cloud, width, height);
895
896 computeVVS();
897
898 computeVisibility(width, height);
899}
900
901void vpMbDepthNormalTracker::initCircle(const vpPoint & /*p1*/, const vpPoint & /*p2*/, const vpPoint & /*p3*/,
902 double /*radius*/, int /*idFace*/, const std::string & /*name*/)
903{
904 throw vpException(vpException::fatalError, "vpMbDepthNormalTracker::initCircle() should not be called!");
905}
906
907void vpMbDepthNormalTracker::initCylinder(const vpPoint & /*p1*/, const vpPoint & /*p2*/, double /*radius*/,
908 int /*idFace*/, const std::string & /*name*/)
909{
910 throw vpException(vpException::fatalError, "vpMbDepthNormalTracker::initCylinder() should not be called!");
911}
912
914
916END_VISP_NAMESPACE
Generic class defining intrinsic camera parameters.
void computeFov(const unsigned int &w, const unsigned int &h)
Implementation of column vector and the associated operations.
Class to define RGB colors available for display functionalities.
Definition vpColor.h:157
static const vpColor red
Definition vpColor.h:198
static const vpColor blue
Definition vpColor.h:204
Display for windows using GDI (available on any windows 32 platform).
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition vpDisplayX.h:135
static void display(const vpImage< unsigned char > &I)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void flush(const vpImage< unsigned char > &I)
static void displayArrow(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color=vpColor::white, unsigned int w=4, unsigned int h=2, unsigned int thickness=1)
error that can be emitted by ViSP classes.
Definition vpException.h:60
@ ioError
I/O error.
Definition vpException.h:67
@ fatalError
Fatal error.
Definition vpException.h:72
static vpHomogeneousMatrix direct(const vpColVector &v)
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
Definition vpImage.h:131
static double rad(double deg)
Definition vpMath.h:129
static double sqr(double x)
Definition vpMath.h:203
static bool equal(double x, double y, double threshold=0.001)
Definition vpMath.h:470
static double deg(double rad)
Definition vpMath.h:119
Implementation of a matrix and operations on matrices.
Definition vpMatrix.h:175
virtual void computeVVSInteractionMatrixAndResidu() VP_OVERRIDE
void segmentPointCloud(const pcl::PointCloud< pcl::PointXYZ >::ConstPtr &point_cloud)
virtual void initCylinder(const vpPoint &p1, const vpPoint &p2, double radius, int idFace=0, const std::string &name="") VP_OVERRIDE
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false) VP_OVERRIDE
virtual void initCircle(const vpPoint &p1, const vpPoint &p2, const vpPoint &p3, double radius, int idFace=0, const std::string &name="") VP_OVERRIDE
vpMatrix m_L_depthNormal
Interaction matrix.
virtual void setDepthNormalSamplingStep(unsigned int stepX, unsigned int stepY)
virtual std::vector< std::vector< double > > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, bool displayFullModel=false) VP_OVERRIDE
vpMbtFaceDepthNormal::vpFeatureEstimationType m_depthNormalFeatureEstimationMethod
Method to estimate the desired features.
std::vector< vpMbtFaceDepthNormal * > m_depthNormalFaces
List of faces.
int m_depthNormalPclPlaneEstimationRansacMaxIter
PCL RANSAC maximum number of iterations.
unsigned int m_depthNormalSamplingStepY
Sampling step in y-direction.
virtual void setScanLineVisibilityTest(const bool &v) VP_OVERRIDE
double m_depthNormalPclPlaneEstimationRansacThreshold
PCL RANSAC threshold.
virtual void setDepthNormalPclPlaneEstimationRansacThreshold(double thresold)
virtual void resetTracker() VP_OVERRIDE
void addFace(vpMbtPolygon &polygon, bool alreadyClose)
bool m_depthNormalUseRobust
If true, use Tukey robust M-Estimator.
virtual void track(const vpImage< unsigned char > &) VP_OVERRIDE
void reInitModel(const vpImage< unsigned char > &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose=false)
virtual std::vector< std::vector< double > > getFeaturesForDisplayDepthNormal()
vpColVector m_w_depthNormal
Robust weights.
virtual void init(const vpImage< unsigned char > &I) VP_OVERRIDE
vpColVector m_error_depthNormal
(s - s*)
vpColVector m_weightedError_depthNormal
Weighted error.
virtual void setPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cdMo) VP_OVERRIDE
virtual void setOgreVisibilityTest(const bool &v) VP_OVERRIDE
std::vector< vpMbtFaceDepthNormal * > m_depthNormalListOfActiveFaces
List of current active (visible and with features extracted) faces.
virtual void loadConfigFile(const std::string &configFile, bool verbose=true) VP_OVERRIDE
virtual void initFaceFromCorners(vpMbtPolygon &polygon) VP_OVERRIDE
virtual void setCameraParameters(const vpCameraParameters &camera) VP_OVERRIDE
int m_depthNormalPclPlaneEstimationMethod
PCL plane estimation method.
virtual void setDepthNormalFaceCentroidMethod(const vpMbtFaceDepthNormal::vpFaceCentroidType &method)
virtual void initFaceFromLines(vpMbtPolygon &polygon) VP_OVERRIDE
virtual void setDepthNormalPclPlaneEstimationMethod(int method)
std::vector< vpColVector > m_depthNormalListOfDesiredFeatures
List of desired features.
virtual void computeVVSInit() VP_OVERRIDE
virtual void testTracking() VP_OVERRIDE
void setUseDepthNormalTracking(const std::string &name, const bool &useDepthNormalTracking)
void computeVisibility(unsigned int width, unsigned int height)
vpMbHiddenFaces< vpMbtPolygon > m_depthNormalHiddenFacesDisplay
Set of faces describing the object used only for display with scan line.
virtual void setDepthNormalFeatureEstimationMethod(const vpMbtFaceDepthNormal::vpFeatureEstimationType &method)
virtual void setDepthNormalPclPlaneEstimationRansacMaxIter(int maxIter)
virtual ~vpMbDepthNormalTracker() VP_OVERRIDE
std::vector< std::vector< double > > m_featuresToBeDisplayedDepthNormal
Display features.
unsigned int m_depthNormalSamplingStepX
Sampling step in x-direction.
double m_lambda
Gain of the virtual visual servoing stage.
bool modelInitialised
vpImage< unsigned char > m_I
Grayscale image buffer, used when passing color images.
bool m_computeInteraction
vpMatrix oJo
The Degrees of Freedom to estimate.
vpUniRand m_rand
Random number generator used in vpMbtDistanceLine::buildFrom().
double m_initialMu
Initial Mu for Levenberg Marquardt optimization loop.
vpHomogeneousMatrix m_cMo
The current pose.
virtual void computeVVSCheckLevenbergMarquardt(unsigned int iter, vpColVector &error, const vpColVector &m_error_prev, const vpHomogeneousMatrix &cMoPrev, double &mu, bool &reStartFromLastIncrement, vpColVector *const w=nullptr, const vpColVector *const m_w_prev=nullptr)
vpCameraParameters m_cam
The camera parameters.
double m_stopCriteriaEpsilon
Epsilon threshold to stop the VVS optimization loop.
bool useOgre
Use Ogre3d for global visibility tests.
virtual void computeVVSWeights(vpRobust &robust, const vpColVector &error, vpColVector &w)
vpMbHiddenFaces< vpMbtPolygon > faces
Set of faces describing the object.
virtual void setScanLineVisibilityTest(const bool &v)
virtual void setOgreVisibilityTest(const bool &v)
virtual void computeCovarianceMatrixVVS(const bool isoJoIdentity, const vpColVector &w_true, const vpHomogeneousMatrix &cMoPrev, const vpMatrix &L_true, const vpMatrix &LVJ_true, const vpColVector &error)
vpMbtOptimizationMethod m_optimizationMethod
Optimization method used.
virtual void computeVVSPoseEstimation(const bool isoJoIdentity, unsigned int iter, vpMatrix &L, vpMatrix &LTL, vpColVector &R, const vpColVector &error, vpColVector &error_prev, vpColVector &LTR, double &mu, vpColVector &v, const vpColVector *const w=nullptr, vpColVector *const m_w_prev=nullptr)
bool displayFeatures
If true, the features are displayed.
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &od_M_o=vpHomogeneousMatrix())
double angleDisappears
Angle used to detect a face disappearance.
virtual void setNearClippingDistance(const double &dist)
virtual void setFarClippingDistance(const double &dist)
double distFarClip
Distance for near clipping.
bool m_isoJoIdentity
Boolean to know if oJo is identity (for fast computation).
bool useScanLine
Use Scanline for global visibility tests.
virtual void setClipping(const unsigned int &flags)
double angleAppears
Angle used to detect a face appearance.
const vpImage< bool > * m_mask
Mask used to disable tracking on a part of image.
virtual void initFromPose(const vpImage< unsigned char > &I, const std::string &initFile)
bool computeCovariance
Flag used to specify if the covariance matrix has to be computed or not.
double distNearClip
Distance for near clipping.
unsigned int m_maxIter
Maximum number of iterations of the virtual visual servoing stage.
bool ogreShowConfigDialog
unsigned int clippingFlag
Flags specifying which clipping to used.
Manage depth normal features for a particular face.
double m_distNearClip
Distance for near clipping.
std::vector< std::vector< double > > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, bool displayFullModel=false)
void addLine(vpPoint &p1, vpPoint &p2, vpMbHiddenFaces< vpMbtPolygon > *const faces, vpUniRand &rand_gen, int polygon=-1, std::string name="")
double m_distFarClip
Distance for near clipping.
vpMbHiddenFaces< vpMbtPolygon > * m_hiddenFace
Pointer to the list of faces.
std::vector< std::vector< double > > getFeaturesForDisplay(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double scale=0.05)
void setPclPlaneEstimationMethod(int method)
void setPclPlaneEstimationRansacThreshold(double threshold)
vpMbtPolygon * m_polygon
Polygon defining the face.
vpCameraParameters m_cam
Camera intrinsic parameters.
vpPlane m_planeObject
Plane equation described in the object frame.
unsigned int m_clippingFlag
Flags specifying which clipping to used.
void setTracked(bool tracked)
void setFeatureEstimationMethod(const vpFeatureEstimationType &method)
void setPclPlaneEstimationRansacMaxIter(int maxIter)
bool m_useScanLine
Scan line visibility.
bool computeDesiredFeatures(const vpHomogeneousMatrix &cMo, unsigned int width, unsigned int height, const pcl::PointCloud< pcl::PointXYZ >::ConstPtr &point_cloud, vpColVector &desired_features, unsigned int stepX, unsigned int stepY, const vpImage< bool > *mask=nullptr)
Implementation of a polygon of the model used by the model-based tracker.
std::string getName() const
int getIndex() const
Parse an Xml file to extract configuration parameters of a mbtConfig object.
void setDepthNormalPclPlaneEstimationRansacMaxIter(int maxIter)
int getDepthNormalPclPlaneEstimationRansacMaxIter() const
unsigned int getDepthNormalSamplingStepX() const
void getCameraParameters(vpCameraParameters &cam) const
unsigned int getDepthNormalSamplingStepY() const
void setDepthNormalFeatureEstimationMethod(const vpMbtFaceDepthNormal::vpFeatureEstimationType &method)
vpMbtFaceDepthNormal::vpFeatureEstimationType getDepthNormalFeatureEstimationMethod() const
void setDepthNormalPclPlaneEstimationMethod(int method)
void setDepthNormalSamplingStepX(unsigned int stepX)
void setAngleDisappear(const double &adisappear)
int getDepthNormalPclPlaneEstimationMethod() const
void setAngleAppear(const double &aappear)
double getDepthNormalPclPlaneEstimationRansacThreshold() const
void parse(const std::string &filename)
void setDepthNormalPclPlaneEstimationRansacThreshold(double threshold)
void setCameraParameters(const vpCameraParameters &cam)
void setDepthNormalSamplingStepY(unsigned int stepY)
This class defines the container for a plane geometrical structure.
Definition vpPlane.h:56
@ object_frame
Definition vpPlane.h:64
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition vpPoint.h:79
unsigned int nbpt
Number of points used to define the polygon.
Definition vpPolygon3D.h:74
unsigned int getNbPoint() const
vpPoint * p
corners in the object frame
Definition vpPolygon3D.h:79
vpVelocityTwistMatrix & buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R)