34#include <visp3/core/vpConfig.h>
37#if defined(VISP_HAVE_OPENCV) && \
38 (((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_CALIB3D) && defined(HAVE_OPENCV_FEATURES2D)) || \
39 ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_3D) && defined(HAVE_OPENCV_FEATURES)))
44#include <visp3/core/vpIoTools.h>
45#include <visp3/vision/vpKeyPoint.h>
47#if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
48#include <opencv2/3d.hpp>
49#include <opencv2/features.hpp>
52#if (VISP_HAVE_OPENCV_VERSION <0x050000)
53#include <opencv2/calib3d/calib3d.hpp>
56#if defined(HAVE_OPENCV_XFEATURES2D)
57#include <opencv2/xfeatures2d.hpp>
60#if defined(VISP_HAVE_PUGIXML)
66#ifndef DOXYGEN_SHOULD_SKIP_THIS
70inline cv::DMatch knnToDMatch(
const std::vector<cv::DMatch> &knnMatches)
72 if (knnMatches.size() > 0) {
79inline vpImagePoint matchRansacToVpImage(
const std::pair<cv::KeyPoint, cv::Point3f> &pair)
90 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(
detectionScore),
91 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
92 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
93 m_imageFormat(
jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
94 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
95 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
96 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
97 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
98 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
99 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
100#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
101 m_useBruteForceCrossCheck(true),
103 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
104 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
108 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
109 m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
116 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(
detectionScore),
117 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
118 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
119 m_imageFormat(
jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
120 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
121 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
122 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
123 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
124 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
125 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
126#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
127 m_useBruteForceCrossCheck(true),
129 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
130 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
134 m_detectorNames.push_back(detectorName);
135 m_extractorNames.push_back(extractorName);
142 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(
detectionScore),
143 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
144 m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
145 m_filterType(filterType), m_imageFormat(
jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
146 m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
147 m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
148 m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
149 m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0),
150 m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(), m_ransacParallel(false),
151 m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01), m_trainDescriptors(),
152 m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
153#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
154 m_useBruteForceCrossCheck(true),
156 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
157 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
163void vpKeyPoint::affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
168 mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
170 cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
173 if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
178 A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
180 cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
181 cv::Mat tcorners = corners * A.t();
182 cv::Mat tcorners_x, tcorners_y;
183 tcorners.col(0).copyTo(tcorners_x);
184 tcorners.col(1).copyTo(tcorners_y);
185 std::vector<cv::Mat> channels;
186 channels.push_back(tcorners_x);
187 channels.push_back(tcorners_y);
188 cv::merge(channels, tcorners);
190 cv::Rect rect = cv::boundingRect(tcorners);
191 A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
193 cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
196 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
197 double s = 0.8 * sqrt(tilt * tilt - 1);
198 cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
199 cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
200 A.row(0) = A.row(0) / tilt;
203 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
204 std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
207 cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
209 cv::invertAffineTransform(A, Ai);
232 m_trainPoints.clear();
233 m_mapOfImageId.clear();
234 m_mapOfImages.clear();
235 m_currentImageId = 1;
237 if (m_useAffineDetection) {
238 std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
239 std::vector<cv::Mat> listOfTrainDescriptors;
245 m_trainKeyPoints.clear();
246 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
247 it != listOfTrainKeyPoints.end(); ++it) {
248 m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
252 for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end();
256 it->copyTo(m_trainDescriptors);
259 m_trainDescriptors.push_back(*it);
264 detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
265 extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
270 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
271 m_mapOfImageId[it->class_id] = m_currentImageId;
275 m_mapOfImages[m_currentImageId] = I;
284 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
286 return static_cast<unsigned int>(m_trainKeyPoints.size());
296 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
298 cv::Mat trainDescriptors;
300 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
302 extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
304 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
308 std::map<size_t, size_t> mapOfKeypointHashes;
310 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
312 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
315 std::vector<cv::Point3f> trainPoints_tmp;
316 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
317 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
318 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
323 points3f = trainPoints_tmp;
326 return (
buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
330 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
332 cv::Mat trainDescriptors;
334 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
336 extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
338 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
342 std::map<size_t, size_t> mapOfKeypointHashes;
344 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
346 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
349 std::vector<cv::Point3f> trainPoints_tmp;
350 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
351 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
352 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
357 points3f = trainPoints_tmp;
360 return (
buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id));
365 const std::vector<cv::KeyPoint> &trainKeyPoints,
366 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
367 bool append,
int class_id)
370 m_trainPoints.clear();
371 m_mapOfImageId.clear();
372 m_mapOfImages.clear();
373 m_currentImageId = 0;
374 m_trainKeyPoints.clear();
379 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
381 if (class_id != -1) {
382 for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
383 it->class_id = class_id;
389 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
391 m_mapOfImageId[it->class_id] = m_currentImageId;
395 m_mapOfImages[m_currentImageId] = I;
398 m_trainKeyPoints.insert(m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
400 trainDescriptors.copyTo(m_trainDescriptors);
403 m_trainDescriptors.push_back(trainDescriptors);
405 this->m_trainPoints.insert(m_trainPoints.end(), points3f.begin(), points3f.end());
413 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
417 return static_cast<unsigned int>(m_trainKeyPoints.size());
421 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
422 bool append,
int class_id)
425 return (
buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
432 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
439 vpPlane Po(pts[0], pts[1], pts[2]);
440 double xc = 0.0, yc = 0.0;
451 point_obj = cMo.inverse() * point_cam;
452 point = cv::Point3f(
static_cast<float>(point_obj[0]),
static_cast<float>(point_obj[1]),
static_cast<float>(point_obj[2]));
459 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
466 vpPlane Po(pts[0], pts[1], pts[2]);
467 double xc = 0.0, yc = 0.0;
478 point_obj = cMo.inverse() * point_cam;
483 std::vector<cv::KeyPoint> &candidates,
484 const std::vector<vpPolygon> &polygons,
485 const std::vector<std::vector<vpPoint> > &roisPt,
486 std::vector<cv::Point3f> &points, cv::Mat *descriptors)
488 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
495 std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
496 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
497 pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
501 std::vector<vpPolygon> polygons_tmp = polygons;
502 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
503 std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
505 while (it2 != pairOfCandidatesToCheck.end()) {
506 imPt.
set_ij(it2->first.pt.y, it2->first.pt.x);
507 if (it1->isInside(imPt)) {
508 candidates.push_back(it2->first);
510 points.push_back(pt);
512 if (descriptors !=
nullptr) {
513 desc.push_back(descriptors->row(
static_cast<int>(it2->second)));
517 it2 = pairOfCandidatesToCheck.erase(it2);
525 if (descriptors !=
nullptr) {
526 desc.copyTo(*descriptors);
531 std::vector<vpImagePoint> &candidates,
532 const std::vector<vpPolygon> &polygons,
533 const std::vector<std::vector<vpPoint> > &roisPt,
534 std::vector<vpPoint> &points, cv::Mat *descriptors)
536 std::vector<vpImagePoint> candidatesToCheck = candidates;
542 std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
543 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
544 pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
548 std::vector<vpPolygon> polygons_tmp = polygons;
549 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
550 std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
552 while (it2 != pairOfCandidatesToCheck.end()) {
553 if (it1->isInside(it2->first)) {
554 candidates.push_back(it2->first);
556 points.push_back(pt);
558 if (descriptors !=
nullptr) {
559 desc.push_back(descriptors->row(
static_cast<int>(it2->second)));
563 it2 = pairOfCandidatesToCheck.erase(it2);
574 std::vector<cv::KeyPoint> &candidates,
const std::vector<vpCylinder> &cylinders,
575 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
576 std::vector<cv::Point3f> &points, cv::Mat *descriptors)
578 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
584 size_t cpt_keypoint = 0;
585 for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
586 ++it1, cpt_keypoint++) {
587 size_t cpt_cylinder = 0;
590 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
591 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
594 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
596 candidates.push_back(*it1);
600 double xm = 0.0, ym = 0.0;
602 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
604 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
606 point_cam[0] = xm * Z;
607 point_cam[1] = ym * Z;
611 point_obj = cMo.inverse() * point_cam;
614 points.push_back(cv::Point3f(
static_cast<float>(pt.
get_oX()),
static_cast<float>(pt.
get_oY()),
static_cast<float>(pt.
get_oZ())));
616 if (descriptors !=
nullptr) {
617 desc.push_back(descriptors->row(
static_cast<int>(cpt_keypoint)));
627 if (descriptors !=
nullptr) {
628 desc.copyTo(*descriptors);
633 std::vector<vpImagePoint> &candidates,
const std::vector<vpCylinder> &cylinders,
634 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
635 std::vector<vpPoint> &points, cv::Mat *descriptors)
637 std::vector<vpImagePoint> candidatesToCheck = candidates;
643 size_t cpt_keypoint = 0;
644 for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
645 ++it1, cpt_keypoint++) {
646 size_t cpt_cylinder = 0;
649 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
650 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
653 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
655 candidates.push_back(*it1);
659 double xm = 0.0, ym = 0.0;
661 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
663 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
665 point_cam[0] = xm * Z;
666 point_cam[1] = ym * Z;
670 point_obj = cMo.inverse() * point_cam;
673 points.push_back(pt);
675 if (descriptors !=
nullptr) {
676 desc.push_back(descriptors->row(
static_cast<int>(cpt_keypoint)));
686 if (descriptors !=
nullptr) {
687 desc.copyTo(*descriptors);
697 if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
699 std::cerr <<
"Not enough points to compute the pose (at least 4 points "
706 cv::Mat cameraMatrix =
707 (cv::Mat_<double>(3, 3) << cam.get_px(), 0, cam.get_u0(), 0, cam.get_py(), cam.get_v0(), 0, 0, 1);
716 cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
719#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
721 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
722 static_cast<float>(m_ransacReprojectionError),
725 inlierIndex, cv::SOLVEPNP_ITERATIVE);
745 int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
746 if (m_useConsensusPercentage) {
747 nbInlierToReachConsensus =
static_cast<int>(m_ransacConsensusPercentage / 100.0 *
static_cast<double>(m_queryFilteredKeyPoints.size()));
750 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
751 static_cast<float>(m_ransacReprojectionError), nbInlierToReachConsensus, inlierIndex);
754 catch (cv::Exception &e) {
755 std::cerr << e.what() << std::endl;
759 vpTranslationVector translationVec(tvec.at<
double>(0), tvec.at<
double>(1), tvec.at<
double>(2));
760 vpThetaUVector thetaUVector(rvec.at<
double>(0), rvec.at<
double>(1), rvec.at<
double>(2));
763 if (func !=
nullptr) {
777 std::vector<vpPoint> &inliers,
double &elapsedTime,
780 std::vector<unsigned int> inlierIndex;
781 return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
785 std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
790 if (objectVpPoints.size() < 4) {
800 for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
804 unsigned int nbInlierToReachConsensus =
static_cast<unsigned int>(m_nbRansacMinInlierCount);
805 if (m_useConsensusPercentage) {
806 nbInlierToReachConsensus =
807 static_cast<unsigned int>(m_ransacConsensusPercentage / 100.0 *
static_cast<double>(m_queryFilteredKeyPoints.size()));
817 bool isRansacPoseEstimationOk =
false;
824 if (m_computeCovariance) {
829 std::cerr <<
"e=" << e.what() << std::endl;
847 return isRansacPoseEstimationOk;
850double vpKeyPoint::computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
853 if (matchKeyPoints.size() == 0) {
859 std::vector<double> errors(matchKeyPoints.size());
862 for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
863 it != matchKeyPoints.end(); ++it, cpt++) {
868 double u = 0.0,
v = 0.0;
870 errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
873 return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
900 unsigned int nbImg =
static_cast<unsigned int>(m_mapOfImages.size() + 1);
902 if (m_mapOfImages.empty()) {
903 std::cerr <<
"There is no training image loaded !" << std::endl;
913 unsigned int nbImgSqrt =
static_cast<unsigned int>(
vpMath::round(std::sqrt(
static_cast<double>(nbImg))));
916 unsigned int nbWidth = nbImgSqrt;
918 unsigned int nbHeight = nbImgSqrt;
921 if (nbImgSqrt * nbImgSqrt < nbImg) {
925 unsigned int maxW = ICurrent.
getWidth();
926 unsigned int maxH = ICurrent.
getHeight();
927 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
929 if (maxW < it->second.getWidth()) {
930 maxW = it->second.getWidth();
933 if (maxH < it->second.getHeight()) {
934 maxH = it->second.getHeight();
946 unsigned int nbImg =
static_cast<unsigned int>(m_mapOfImages.size() + 1);
948 if (m_mapOfImages.empty()) {
949 std::cerr <<
"There is no training image loaded !" << std::endl;
959 unsigned int nbImgSqrt =
static_cast<unsigned int>(
vpMath::round(std::sqrt(
static_cast<double>(nbImg))));
962 unsigned int nbWidth = nbImgSqrt;
964 unsigned int nbHeight = nbImgSqrt;
967 if (nbImgSqrt * nbImgSqrt < nbImg) {
971 unsigned int maxW = ICurrent.
getWidth();
972 unsigned int maxH = ICurrent.
getHeight();
973 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
975 if (maxW < it->second.getWidth()) {
976 maxW = it->second.getWidth();
979 if (maxH < it->second.getHeight()) {
980 maxH = it->second.getHeight();
991 detect(I, keyPoints, elapsedTime, rectangle);
997 detect(I_color, keyPoints, elapsedTime, rectangle);
1003 detect(matImg, keyPoints, elapsedTime, mask);
1011 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1014#if VISP_HAVE_OPENCV_VERSION >= 0x030000
1015 int filled = cv::FILLED;
1017 int filled = CV_FILLED;
1019 cv::Point leftTop(
static_cast<int>(rectangle.
getLeft()),
static_cast<int>(rectangle.
getTop())),
1020 rightBottom(
static_cast<int>(rectangle.
getRight()),
static_cast<int>(rectangle.
getBottom()));
1021 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), filled);
1024 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1027 detect(matImg, keyPoints, elapsedTime, mask);
1035 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1038#if VISP_HAVE_OPENCV_VERSION >= 0x030000
1039 int filled = cv::FILLED;
1041 int filled = CV_FILLED;
1043 cv::Point leftTop(
static_cast<int>(rectangle.
getLeft()),
static_cast<int>(rectangle.
getTop()));
1044 cv::Point rightBottom(
static_cast<int>(rectangle.
getRight()),
static_cast<int>(rectangle.
getBottom()));
1045 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), filled);
1048 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1051 detect(matImg, keyPoints, elapsedTime, mask);
1055 const cv::Mat &mask)
1060 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1061 it != m_detectors.end(); ++it) {
1062 std::vector<cv::KeyPoint> kp;
1064 it->second->detect(matImg, kp, mask);
1065 keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1073 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1075 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1078 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1086 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1088 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1091 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1099 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1102 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1109 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1112 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1118 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1121 srand(
static_cast<unsigned int>(time(
nullptr)));
1124 std::vector<vpImagePoint> queryImageKeyPoints;
1126 std::vector<vpImagePoint> trainImageKeyPoints;
1130 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1132 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1135 leftPt = trainImageKeyPoints[
static_cast<size_t>(it->trainIdx)];
1136 rightPt =
vpImagePoint(queryImageKeyPoints[
static_cast<size_t>(it->queryIdx)].get_i(),
1137 queryImageKeyPoints[
static_cast<size_t>(it->queryIdx)].get_j() + IRef.
getWidth());
1145 unsigned int lineThickness,
const vpColor &color)
1148 srand(
static_cast<unsigned int>(time(
nullptr)));
1151 std::vector<vpImagePoint> queryImageKeyPoints;
1153 std::vector<vpImagePoint> trainImageKeyPoints;
1157 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1159 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1162 leftPt = trainImageKeyPoints[
static_cast<size_t>(it->trainIdx)];
1163 rightPt =
vpImagePoint(queryImageKeyPoints[
static_cast<size_t>(it->queryIdx)].get_i(),
1164 queryImageKeyPoints[
static_cast<size_t>(it->queryIdx)].get_j() + IRef.
getWidth());
1172 unsigned int lineThickness,
const vpColor &color)
1175 srand(
static_cast<unsigned int>(time(
nullptr)));
1178 std::vector<vpImagePoint> queryImageKeyPoints;
1180 std::vector<vpImagePoint> trainImageKeyPoints;
1184 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1186 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1189 leftPt = trainImageKeyPoints[
static_cast<size_t>(it->trainIdx)];
1190 rightPt =
vpImagePoint(queryImageKeyPoints[
static_cast<size_t>(it->queryIdx)].get_i(),
1191 queryImageKeyPoints[
static_cast<size_t>(it->queryIdx)].get_j() + IRef.
getWidth());
1200 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1201 unsigned int lineThickness)
1203 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1205 std::cerr <<
"There is no training image loaded !" << std::endl;
1211 int nbImg =
static_cast<int>(m_mapOfImages.size() + 1);
1219 int nbImgSqrt =
vpMath::round(std::sqrt(
static_cast<double>(nbImg)));
1220 int nbWidth = nbImgSqrt;
1221 int nbHeight = nbImgSqrt;
1223 if (nbImgSqrt * nbImgSqrt < nbImg) {
1227 std::map<int, int> mapOfImageIdIndex;
1230 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1232 mapOfImageIdIndex[it->first] = cpt;
1234 if (maxW < it->second.getWidth()) {
1235 maxW = it->second.getWidth();
1238 if (maxH < it->second.getHeight()) {
1239 maxH = it->second.getHeight();
1245 int medianI = nbHeight / 2;
1246 int medianJ = nbWidth / 2;
1247 int medianIndex = medianI * nbWidth + medianJ;
1248 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1250 int current_class_id_index = 0;
1251 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1252 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1257 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1260 int indexI = current_class_id_index / nbWidth;
1261 int indexJ = current_class_id_index - (indexI * nbWidth);
1262 topLeftCorner.
set_ij(
static_cast<int>(maxH) * indexI,
static_cast<int>(maxW) * indexJ);
1269 vpImagePoint topLeftCorner(
static_cast<int>(maxH) * medianI,
static_cast<int>(maxW) * medianJ);
1270 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1275 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1280 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1286 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1287 int current_class_id = 0;
1288 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[
static_cast<size_t>(it->trainIdx)].class_id]] < medianIndex) {
1289 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[
static_cast<size_t>(it->trainIdx)].class_id]];
1294 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[
static_cast<size_t>(it->trainIdx)].class_id]] + 1;
1297 int indexI = current_class_id / nbWidth;
1298 int indexJ = current_class_id - (indexI * nbWidth);
1300 vpImagePoint end(
static_cast<int>(maxH) * indexI + m_trainKeyPoints[
static_cast<size_t>(it->trainIdx)].pt.y,
1301 static_cast<int>(maxW) * indexJ + m_trainKeyPoints[
static_cast<size_t>(it->trainIdx)].pt.x);
1302 vpImagePoint start(
static_cast<int>(maxH) * medianI + m_queryFilteredKeyPoints[
static_cast<size_t>(it->queryIdx)].pt.y,
1303 static_cast<int>(maxW) * medianJ + m_queryFilteredKeyPoints[
static_cast<size_t>(it->queryIdx)].pt.x);
1313 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1314 unsigned int lineThickness)
1316 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1318 std::cerr <<
"There is no training image loaded !" << std::endl;
1324 int nbImg =
static_cast<int>(m_mapOfImages.size() + 1);
1332 int nbImgSqrt =
vpMath::round(std::sqrt(
static_cast<double>(nbImg)));
1333 int nbWidth = nbImgSqrt;
1334 int nbHeight = nbImgSqrt;
1336 if (nbImgSqrt * nbImgSqrt < nbImg) {
1340 std::map<int, int> mapOfImageIdIndex;
1343 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1345 mapOfImageIdIndex[it->first] = cpt;
1347 if (maxW < it->second.getWidth()) {
1348 maxW = it->second.getWidth();
1351 if (maxH < it->second.getHeight()) {
1352 maxH = it->second.getHeight();
1358 int medianI = nbHeight / 2;
1359 int medianJ = nbWidth / 2;
1360 int medianIndex = medianI * nbWidth + medianJ;
1361 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1363 int current_class_id_index = 0;
1364 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1365 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1370 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1373 int indexI = current_class_id_index / nbWidth;
1374 int indexJ = current_class_id_index - (indexI * nbWidth);
1375 topLeftCorner.
set_ij(
static_cast<int>(maxH) * indexI,
static_cast<int>(maxW) * indexJ);
1382 vpImagePoint topLeftCorner(
static_cast<int>(maxH) * medianI,
static_cast<int>(maxW) * medianJ);
1383 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1388 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1393 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1399 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1400 int current_class_id = 0;
1401 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[
static_cast<size_t>(it->trainIdx)].class_id]] < medianIndex) {
1402 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[
static_cast<size_t>(it->trainIdx)].class_id]];
1407 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[
static_cast<size_t>(it->trainIdx)].class_id]] + 1;
1410 int indexI = current_class_id / nbWidth;
1411 int indexJ = current_class_id - (indexI * nbWidth);
1413 vpImagePoint end(
static_cast<int>(maxH) * indexI + m_trainKeyPoints[
static_cast<size_t>(it->trainIdx)].pt.y,
1414 static_cast<int>(maxW) * indexJ + m_trainKeyPoints[
static_cast<size_t>(it->trainIdx)].pt.x);
1415 vpImagePoint start(
static_cast<int>(maxH) * medianI + m_queryFilteredKeyPoints[
static_cast<size_t>(it->queryIdx)].pt.y,
1416 static_cast<int>(maxW) * medianJ + m_queryFilteredKeyPoints[
static_cast<size_t>(it->queryIdx)].pt.x);
1426 std::vector<cv::Point3f> *trainPoints)
1429 extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1433 std::vector<cv::Point3f> *trainPoints)
1436 extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1440 std::vector<cv::Point3f> *trainPoints)
1443 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1447 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1451 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1455 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1459 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1463 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1468 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1469 itd != m_extractors.end(); ++itd) {
1473 if (trainPoints !=
nullptr && !trainPoints->empty()) {
1476 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1479 itd->second->compute(matImg, keyPoints, descriptors);
1481 if (keyPoints.size() != keyPoints_tmp.size()) {
1485 std::map<size_t, size_t> mapOfKeypointHashes;
1487 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1489 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1492 std::vector<cv::Point3f> trainPoints_tmp;
1493 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1494 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1495 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1500 *trainPoints = trainPoints_tmp;
1505 itd->second->compute(matImg, keyPoints, descriptors);
1511 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1515 itd->second->compute(matImg, keyPoints, desc);
1517 if (keyPoints.size() != keyPoints_tmp.size()) {
1521 std::map<size_t, size_t> mapOfKeypointHashes;
1523 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1525 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1528 std::vector<cv::Point3f> trainPoints_tmp;
1529 cv::Mat descriptors_tmp;
1530 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1531 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1532 if (trainPoints !=
nullptr && !trainPoints->empty()) {
1533 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1536 if (!descriptors.empty()) {
1537 descriptors_tmp.push_back(descriptors.row(
static_cast<int>(mapOfKeypointHashes[myKeypointHash(*it)])));
1542 if (trainPoints !=
nullptr) {
1544 *trainPoints = trainPoints_tmp;
1547 descriptors_tmp.copyTo(descriptors);
1551 if (descriptors.empty()) {
1552 desc.copyTo(descriptors);
1555 cv::hconcat(descriptors, desc, descriptors);
1560 if (keyPoints.size() !=
static_cast<size_t>(descriptors.rows)) {
1561 std::cerr <<
"keyPoints.size() != static_cast<size_t>(descriptors.rows)" << std::endl;
1566void vpKeyPoint::filterMatches()
1568 std::vector<cv::KeyPoint> queryKpts;
1569 std::vector<cv::Point3f> trainPts;
1570 std::vector<cv::DMatch> m;
1576 double min_dist = DBL_MAX;
1578 std::vector<double> distance_vec(m_knnMatches.size());
1581 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
1582 double dist = m_knnMatches[i][0].distance;
1584 distance_vec[i] = dist;
1586 if (dist < min_dist) {
1593 mean /= m_queryDescriptors.rows;
1596 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1597 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1598 double threshold = min_dist + stdev;
1600 for (
size_t i = 0;
i < m_knnMatches.size();
i++) {
1601 if (m_knnMatches[i].size() >= 2) {
1604 float ratio = m_knnMatches[
i][0].distance / m_knnMatches[
i][1].distance;
1609 double dist = m_knnMatches[
i][0].distance;
1612 m.push_back(cv::DMatch(
static_cast<int>(queryKpts.size()), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
1614 if (!m_trainPoints.empty()) {
1615 trainPts.push_back(m_trainPoints[
static_cast<size_t>(m_knnMatches[i][0].trainIdx)]);
1617 queryKpts.push_back(m_queryKeyPoints[
static_cast<size_t>(m_knnMatches[i][0].queryIdx)]);
1626 double min_dist = DBL_MAX;
1628 std::vector<double> distance_vec(m_matches.size());
1629 for (
size_t i = 0;
i < m_matches.size();
i++) {
1630 double dist = m_matches[
i].distance;
1632 distance_vec[
i] = dist;
1634 if (dist < min_dist) {
1641 mean /= m_queryDescriptors.rows;
1643 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1644 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1654 for (
size_t i = 0;
i < m_matches.size();
i++) {
1655 if (m_matches[i].distance <= threshold) {
1656 m.push_back(cv::DMatch(
static_cast<int>(queryKpts.size()), m_matches[i].trainIdx, m_matches[i].distance));
1658 if (!m_trainPoints.empty()) {
1659 trainPts.push_back(m_trainPoints[
static_cast<size_t>(m_matches[i].trainIdx)]);
1661 queryKpts.push_back(m_queryKeyPoints[
static_cast<size_t>(m_matches[i].queryIdx)]);
1666 if (m_useSingleMatchFilter) {
1669 std::vector<cv::DMatch> mTmp;
1670 std::vector<cv::Point3f> trainPtsTmp;
1671 std::vector<cv::KeyPoint> queryKptsTmp;
1673 std::map<int, int> mapOfTrainIdx;
1675 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1676 mapOfTrainIdx[it->trainIdx]++;
1680 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1681 if (mapOfTrainIdx[it->trainIdx] == 1) {
1682 mTmp.push_back(cv::DMatch(
static_cast<int>(queryKptsTmp.size()), it->trainIdx, it->distance));
1684 if (!m_trainPoints.empty()) {
1685 trainPtsTmp.push_back(m_trainPoints[
static_cast<size_t>(it->trainIdx)]);
1687 queryKptsTmp.push_back(queryKpts[
static_cast<size_t>(it->queryIdx)]);
1691 m_filteredMatches = mTmp;
1692 m_objectFilteredPoints = trainPtsTmp;
1693 m_queryFilteredKeyPoints = queryKptsTmp;
1696 m_filteredMatches = m;
1697 m_objectFilteredPoints = trainPts;
1698 m_queryFilteredKeyPoints = queryKpts;
1704 objectPoints = m_objectFilteredPoints;
1715 keyPoints = m_queryFilteredKeyPoints;
1718 keyPoints = m_queryKeyPoints;
1740void vpKeyPoint::init()
1743#if defined(HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
1745 if (!cv::initModule_nonfree()) {
1746 std::cerr <<
"Cannot init module non free, SURF cannot be used." << std::endl;
1756 initDetectors(m_detectorNames);
1757 initExtractors(m_extractorNames);
1761void vpKeyPoint::initDetector(
const std::string &detectorName)
1763#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1764 m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
1766 if (m_detectors[detectorName] ==
nullptr) {
1767 std::stringstream ss_msg;
1768 ss_msg <<
"Fail to initialize the detector: " << detectorName
1769 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1773 std::string detectorNameTmp = detectorName;
1774 std::string pyramid =
"Pyramid";
1775 std::size_t pos = detectorName.find(pyramid);
1776 bool usePyramid =
false;
1777 if (pos != std::string::npos) {
1778 detectorNameTmp = detectorName.substr(pos + pyramid.size());
1782 if (detectorNameTmp ==
"SIFT") {
1783#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && ((VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)) && defined(HAVE_OPENCV_FEATURES2D)) \
1784 || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
1785# if (VISP_HAVE_OPENCV_VERSION >= 0x040500)
1786 cv::Ptr<cv::FeatureDetector> siftDetector = cv::SiftFeatureDetector::create();
1788 m_detectors[detectorNameTmp] = siftDetector;
1791 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1793# elif (VISP_HAVE_OPENCV_VERSION >= 0x030411)
1795 cv::Ptr<cv::FeatureDetector> siftDetector;
1796 if (m_maxFeatures > 0) {
1797 siftDetector = cv::SIFT::create(m_maxFeatures);
1800 siftDetector = cv::SIFT::create();
1803 cv::Ptr<cv::FeatureDetector> siftDetector;
1804 siftDetector = cv::xfeatures2d::SIFT::create();
1807 m_detectors[detectorNameTmp] = siftDetector;
1810 std::cerr <<
"You should not use SIFT with Pyramid feature detection!" << std::endl;
1811 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1814 std::stringstream ss_msg;
1815 ss_msg <<
"Failed to initialize the SIFT 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1819 else if (detectorNameTmp ==
"SURF") {
1820#if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
1821 cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
1823 m_detectors[detectorNameTmp] = surfDetector;
1826 std::cerr <<
"You should not use SURF with Pyramid feature detection!" << std::endl;
1827 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
1830 std::stringstream ss_msg;
1831 ss_msg <<
"Failed to initialize the SURF 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1835 else if (detectorNameTmp ==
"FAST") {
1836#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
1837 cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
1839 m_detectors[detectorNameTmp] = fastDetector;
1842 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1845 std::stringstream ss_msg;
1846 ss_msg <<
"Failed to initialize the FAST 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1850 else if (detectorNameTmp ==
"MSER") {
1851#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
1852 cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
1854 m_detectors[detectorNameTmp] = fastDetector;
1857 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1860 std::stringstream ss_msg;
1861 ss_msg <<
"Failed to initialize the MSER 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1865 else if (detectorNameTmp ==
"ORB") {
1866#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
1867 cv::Ptr<cv::FeatureDetector> orbDetector;
1868 if (m_maxFeatures > 0) {
1869 orbDetector = cv::ORB::create(m_maxFeatures);
1872 orbDetector = cv::ORB::create();
1875 m_detectors[detectorNameTmp] = orbDetector;
1878 std::cerr <<
"You should not use ORB with Pyramid feature detection!" << std::endl;
1879 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
1882 std::stringstream ss_msg;
1883 ss_msg <<
"Failed to initialize the ORB 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1887 else if (detectorNameTmp ==
"BRISK") {
1888#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
1889#if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
1890 cv::Ptr<cv::FeatureDetector> briskDetector = cv::xfeatures2d::BRISK::create();
1892 cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
1895 m_detectors[detectorNameTmp] = briskDetector;
1898 std::cerr <<
"You should not use BRISK with Pyramid feature detection!" << std::endl;
1899 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
1902 std::stringstream ss_msg;
1904 ss_msg <<
"Failed to initialize the BRISK 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1908 else if (detectorNameTmp ==
"KAZE") {
1909#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
1910#if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
1911 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::xfeatures2d::KAZE::create();
1913 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
1916 m_detectors[detectorNameTmp] = kazeDetector;
1919 std::cerr <<
"You should not use KAZE with Pyramid feature detection!" << std::endl;
1920 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
1923 std::stringstream ss_msg;
1924 ss_msg <<
"Failed to initialize the KAZE 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1928 else if (detectorNameTmp ==
"AKAZE") {
1929#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
1930#if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
1931 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::xfeatures2d::AKAZE::create();
1933 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
1936 m_detectors[detectorNameTmp] = akazeDetector;
1939 std::cerr <<
"You should not use AKAZE with Pyramid feature detection!" << std::endl;
1940 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
1943 std::stringstream ss_msg;
1944 ss_msg <<
"Failed to initialize the AKAZE 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1948 else if (detectorNameTmp ==
"GFTT") {
1949#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
1950 cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
1952 m_detectors[detectorNameTmp] = gfttDetector;
1955 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
1958 std::stringstream ss_msg;
1959 ss_msg <<
"Failed to initialize the GFTT 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1963 else if (detectorNameTmp ==
"SimpleBlob") {
1964#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
1965 cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
1967 m_detectors[detectorNameTmp] = simpleBlobDetector;
1970 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
1973 std::stringstream ss_msg;
1974 ss_msg <<
"Failed to initialize the SimpleBlob 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1978 else if (detectorNameTmp ==
"STAR") {
1979#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))) \
1980 || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
1981 cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
1983 m_detectors[detectorNameTmp] = starDetector;
1986 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
1989 std::stringstream ss_msg;
1990 ss_msg <<
"Failed to initialize the STAR 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1994 else if (detectorNameTmp ==
"AGAST") {
1995#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
1996#if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
1997 cv::Ptr<cv::FeatureDetector> agastDetector = cv::xfeatures2d::AgastFeatureDetector::create();
1999 cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
2002 m_detectors[detectorNameTmp] = agastDetector;
2005 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
2008 std::stringstream ss_msg;
2009 ss_msg <<
"Failed to initialize the STAR 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2013 else if (detectorNameTmp ==
"MSD") {
2014#if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2015 cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
2017 m_detectors[detectorNameTmp] = msdDetector;
2020 std::cerr <<
"You should not use MSD with Pyramid feature detection!" << std::endl;
2021 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
2024 std::stringstream ss_msg;
2025 ss_msg <<
"Failed to initialize the MSD 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2030 std::cerr <<
"The detector:" << detectorNameTmp <<
" is not available." << std::endl;
2033 bool detectorInitialized =
false;
2036 detectorInitialized = !m_detectors[detectorNameTmp].empty();
2040 detectorInitialized = !m_detectors[detectorName].empty();
2043 if (!detectorInitialized) {
2044 std::stringstream ss_msg;
2045 ss_msg <<
"Fail to initialize the detector: " << detectorNameTmp
2046 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2052void vpKeyPoint::initDetectors(
const std::vector<std::string> &detectorNames)
2054 for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
2059void vpKeyPoint::initExtractor(
const std::string &extractorName)
2061#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2062 m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
2064 if (extractorName ==
"SIFT") {
2065#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && ((VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
2067# if (VISP_HAVE_OPENCV_VERSION >= 0x030411)
2068 m_extractors[extractorName] = cv::SIFT::create();
2070 m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
2073 std::stringstream ss_msg;
2074 ss_msg <<
"Fail to initialize the SIFT 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2078 else if (extractorName ==
"SURF") {
2079#if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
2081 m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3,
true);
2083 std::stringstream ss_msg;
2084 ss_msg <<
"Fail to initialize the SURF 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2088 else if (extractorName ==
"ORB") {
2089#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
2090 m_extractors[extractorName] = cv::ORB::create();
2092 std::stringstream ss_msg;
2093 ss_msg <<
"Fail to initialize the ORB 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2097 else if (extractorName ==
"BRISK") {
2098#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
2099#if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
2100 m_extractors[extractorName] = cv::xfeatures2d::BRISK::create();
2102 m_extractors[extractorName] = cv::BRISK::create();
2105 std::stringstream ss_msg;
2106 ss_msg <<
"Fail to initialize the BRISK 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2110 else if (extractorName ==
"FREAK") {
2111#if defined(HAVE_OPENCV_XFEATURES2D)
2112 m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2114 std::stringstream ss_msg;
2115 ss_msg <<
"Fail to initialize the FREAK 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2119 else if (extractorName ==
"BRIEF") {
2120#if defined(HAVE_OPENCV_XFEATURES2D)
2121 m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2123 std::stringstream ss_msg;
2124 ss_msg <<
"Fail to initialize the BRIEF 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2128 else if (extractorName ==
"KAZE") {
2129#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
2130#if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
2131 m_extractors[extractorName] = cv::xfeatures2d::KAZE::create();
2133 m_extractors[extractorName] = cv::KAZE::create();
2136 std::stringstream ss_msg;
2137 ss_msg <<
"Fail to initialize the KAZE 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2141 else if (extractorName ==
"AKAZE") {
2142#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
2143#if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
2144 m_extractors[extractorName] = cv::xfeatures2d::AKAZE::create();
2146 m_extractors[extractorName] = cv::AKAZE::create();
2149 std::stringstream ss_msg;
2150 ss_msg <<
"Fail to initialize the AKAZE 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2154 else if (extractorName ==
"DAISY") {
2155#if defined(HAVE_OPENCV_XFEATURES2D)
2156 m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2158 std::stringstream ss_msg;
2159 ss_msg <<
"Fail to initialize the DAISY 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2163 else if (extractorName ==
"LATCH") {
2164#if defined(HAVE_OPENCV_XFEATURES2D)
2165 m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2167 std::stringstream ss_msg;
2168 ss_msg <<
"Fail to initialize the LATCH 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2172 else if (extractorName ==
"VGG") {
2173#if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(HAVE_OPENCV_XFEATURES2D)
2174 m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2176 std::stringstream ss_msg;
2177 ss_msg <<
"Fail to initialize the VGG 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2181 else if (extractorName ==
"BoostDesc") {
2182#if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(HAVE_OPENCV_XFEATURES2D)
2183 m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2185 std::stringstream ss_msg;
2186 ss_msg <<
"Fail to initialize the BoostDesc 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2191 std::cerr <<
"The extractor:" << extractorName <<
" is not available." << std::endl;
2195 if (!m_extractors[extractorName]) {
2196 std::stringstream ss_msg;
2197 ss_msg <<
"Fail to initialize the extractor: " << extractorName
2198 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2202#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2203 if (extractorName ==
"SURF") {
2205 m_extractors[extractorName]->set(
"extended", 1);
2210void vpKeyPoint::initExtractors(
const std::vector<std::string> &extractorNames)
2212 for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2216 int descriptorType = CV_32F;
2217 bool firstIteration =
true;
2218 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2219 it != m_extractors.end(); ++it) {
2220 if (firstIteration) {
2221 firstIteration =
false;
2222 descriptorType = it->second->descriptorType();
2225 if (descriptorType != it->second->descriptorType()) {
2232void vpKeyPoint::initFeatureNames()
2235#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
2242#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
2244#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2250#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))) \
2251 || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
2252 m_mapOfDetectorNames[DETECTOR_STAR] =
"STAR";
2254#if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2255 m_mapOfDetectorNames[DETECTOR_MSD] =
"MSD";
2257#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && ((VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)) && defined(HAVE_OPENCV_FEATURES2D)) \
2258 || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
2259 m_mapOfDetectorNames[DETECTOR_SIFT] =
"SIFT";
2261#if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
2262 m_mapOfDetectorNames[DETECTOR_SURF] =
"SURF";
2265#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
2268#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
2271#if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2275#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && ((VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)) && defined(HAVE_OPENCV_FEATURES2D)) \
2276 || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
2277 m_mapOfDescriptorNames[DESCRIPTOR_SIFT] =
"SIFT";
2279#if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
2280 m_mapOfDescriptorNames[DESCRIPTOR_SURF] =
"SURF";
2282#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
2283#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2288#if defined(HAVE_OPENCV_XFEATURES2D)
2292#if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(HAVE_OPENCV_XFEATURES2D)
2293 m_mapOfDescriptorNames[DESCRIPTOR_VGG] =
"VGG";
2294 m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] =
"BoostDesc";
2300 int descriptorType = CV_32F;
2301 bool firstIteration =
true;
2302 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2303 it != m_extractors.end(); ++it) {
2304 if (firstIteration) {
2305 firstIteration =
false;
2306 descriptorType = it->second->descriptorType();
2309 if (descriptorType != it->second->descriptorType()) {
2315 if (matcherName ==
"FlannBased") {
2316 if (m_extractors.empty()) {
2317 std::cout <<
"Warning: No extractor initialized, by default use "
2318 "floating values (CV_32F) "
2319 "for descriptor type !"
2323 if (descriptorType == CV_8U) {
2324#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2325 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2327 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::LshIndexParams(12, 20, 2));
2331#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2332 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2334 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::KDTreeIndexParams());
2339 m_matcher = cv::DescriptorMatcher::create(matcherName);
2342#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2343 if (m_matcher !=
nullptr && !m_useKnn && matcherName ==
"BruteForce") {
2344 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
2349 std::stringstream ss_msg;
2350 ss_msg <<
"Fail to initialize the matcher: " << matcherName
2351 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2360 IMatching.
insert(IRef, topLeftCorner);
2362 IMatching.
insert(ICurrent, topLeftCorner);
2369 IMatching.
insert(IRef, topLeftCorner);
2371 IMatching.
insert(ICurrent, topLeftCorner);
2378 int nbImg =
static_cast<int>(m_mapOfImages.size() + 1);
2380 if (m_mapOfImages.empty()) {
2381 std::cerr <<
"There is no training image loaded !" << std::endl;
2391 int nbImgSqrt =
vpMath::round(std::sqrt(
static_cast<double>(nbImg)));
2392 int nbWidth = nbImgSqrt;
2393 int nbHeight = nbImgSqrt;
2395 if (nbImgSqrt * nbImgSqrt < nbImg) {
2400 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2402 if (maxW < it->second.getWidth()) {
2403 maxW = it->second.getWidth();
2406 if (maxH < it->second.getHeight()) {
2407 maxH = it->second.getHeight();
2413 int medianI = nbHeight / 2;
2414 int medianJ = nbWidth / 2;
2415 int medianIndex = medianI * nbWidth + medianJ;
2418 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2420 int local_cpt = cpt;
2421 if (cpt >= medianIndex) {
2426 int indexI = local_cpt / nbWidth;
2427 int indexJ = local_cpt - (indexI * nbWidth);
2428 vpImagePoint topLeftCorner(
static_cast<int>(maxH) * indexI,
static_cast<int>(maxW) * indexJ);
2430 IMatching.
insert(it->second, topLeftCorner);
2433 vpImagePoint topLeftCorner(
static_cast<int>(maxH) * medianI,
static_cast<int>(maxW) * medianJ);
2434 IMatching.
insert(ICurrent, topLeftCorner);
2442 int nbImg =
static_cast<int>(m_mapOfImages.size() + 1);
2444 if (m_mapOfImages.empty()) {
2445 std::cerr <<
"There is no training image loaded !" << std::endl;
2457 int nbImgSqrt =
vpMath::round(std::sqrt(
static_cast<double>(nbImg)));
2458 int nbWidth = nbImgSqrt;
2459 int nbHeight = nbImgSqrt;
2461 if (nbImgSqrt * nbImgSqrt < nbImg) {
2466 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2468 if (maxW < it->second.getWidth()) {
2469 maxW = it->second.getWidth();
2472 if (maxH < it->second.getHeight()) {
2473 maxH = it->second.getHeight();
2479 int medianI = nbHeight / 2;
2480 int medianJ = nbWidth / 2;
2481 int medianIndex = medianI * nbWidth + medianJ;
2484 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2486 int local_cpt = cpt;
2487 if (cpt >= medianIndex) {
2492 int indexI = local_cpt / nbWidth;
2493 int indexJ = local_cpt - (indexI * nbWidth);
2494 vpImagePoint topLeftCorner(
static_cast<int>(maxH) * indexI,
static_cast<int>(maxW) * indexJ);
2498 IMatching.
insert(IRef, topLeftCorner);
2501 vpImagePoint topLeftCorner(
static_cast<int>(maxH) * medianI,
static_cast<int>(maxW) * medianJ);
2502 IMatching.
insert(ICurrent, topLeftCorner);
2508#if defined(VISP_HAVE_PUGIXML)
2513 m_detectorNames.clear();
2514 m_extractorNames.clear();
2515 m_detectors.clear();
2516 m_extractors.clear();
2518 std::cout <<
" *********** Parsing XML for configuration for vpKeyPoint "
2521 xmlp.
parse(configFile);
2583 int startClassId = 0;
2584 int startImageId = 0;
2586 m_trainKeyPoints.clear();
2587 m_trainPoints.clear();
2588 m_mapOfImageId.clear();
2589 m_mapOfImages.clear();
2593 for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
2594 if (startClassId < it->first) {
2595 startClassId = it->first;
2600 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2602 if (startImageId < it->first) {
2603 startImageId = it->first;
2610 if (!parent.empty()) {
2615 std::ifstream file(filename.c_str(), std::ifstream::binary);
2616 if (!file.is_open()) {
2624#if !defined(VISP_HAVE_MODULE_IO)
2626 std::cout <<
"Warning: The learning file contains image data that will "
2627 "not be loaded as visp_io module "
2628 "is not available !"
2633 for (
int i = 0; i < nbImgs; i++) {
2641 char *path =
new char[length + 1];
2643 for (
int cpt = 0; cpt < length; cpt++) {
2645 file.read((
char *)(&c),
sizeof(c));
2648 path[length] =
'\0';
2651#ifdef VISP_HAVE_MODULE_IO
2660 m_mapOfImages[
id + startImageId] = I;
2668 int have3DInfoInt = 0;
2670 bool have3DInfo = have3DInfoInt != 0;
2681 int descriptorType = 5;
2684 cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2685 for (
int i = 0; i < nRows; i++) {
2687 float u, v, size, angle, response;
2688 int octave, class_id, image_id;
2697 cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
2698 m_trainKeyPoints.push_back(keyPoint);
2700 if (image_id != -1) {
2701#ifdef VISP_HAVE_MODULE_IO
2703 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2713 m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
2716 for (
int j = 0; j < nCols; j++) {
2718 switch (descriptorType) {
2720 unsigned char value;
2721 file.read((
char *)(&value),
sizeof(value));
2722 trainDescriptorsTmp.at<
unsigned char>(i, j) = value;
2727 file.read((
char *)(&value),
sizeof(value));
2728 trainDescriptorsTmp.at<
char>(i, j) = value;
2732 unsigned short int value;
2734 trainDescriptorsTmp.at<
unsigned short int>(i, j) = value;
2740 trainDescriptorsTmp.at<
short int>(i, j) = value;
2746 trainDescriptorsTmp.at<
int>(i, j) = value;
2752 trainDescriptorsTmp.at<
float>(i, j) = value;
2758 trainDescriptorsTmp.at<
double>(i, j) = value;
2764 trainDescriptorsTmp.at<
float>(i, j) = value;
2770 if (!append || m_trainDescriptors.empty()) {
2771 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2774 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2780#if defined(VISP_HAVE_PUGIXML)
2781 pugi::xml_document doc;
2784 if (!doc.load_file(filename.c_str())) {
2788 pugi::xml_node root_element = doc.document_element();
2790 int descriptorType = CV_32F;
2791 int nRows = 0, nCols = 0;
2794 cv::Mat trainDescriptorsTmp;
2796 for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node;
2797 first_level_node = first_level_node.next_sibling()) {
2799 std::string name(first_level_node.name());
2800 if (first_level_node.type() == pugi::node_element && name ==
"TrainingImageInfo") {
2802 for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node;
2803 image_info_node = image_info_node.next_sibling()) {
2804 name = std::string(image_info_node.name());
2806 if (name ==
"trainImg") {
2808 int id = image_info_node.attribute(
"image_id").as_int();
2811#ifdef VISP_HAVE_MODULE_IO
2812 std::string path(image_info_node.text().as_string());
2822 m_mapOfImages[
id + startImageId] = I;
2827 else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorsInfo") {
2828 for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
2829 descriptors_info_node = descriptors_info_node.next_sibling()) {
2830 if (descriptors_info_node.type() == pugi::node_element) {
2831 name = std::string(descriptors_info_node.name());
2833 if (name ==
"nrows") {
2834 nRows = descriptors_info_node.text().as_int();
2836 else if (name ==
"ncols") {
2837 nCols = descriptors_info_node.text().as_int();
2839 else if (name ==
"type") {
2840 descriptorType = descriptors_info_node.text().as_int();
2845 trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2847 else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorInfo") {
2848 double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
2849 int octave = 0, class_id = 0, image_id = 0;
2850 double oX = 0.0, oY = 0.0, oZ = 0.0;
2852 std::stringstream ss;
2854 for (pugi::xml_node point_node = first_level_node.first_child(); point_node;
2855 point_node = point_node.next_sibling()) {
2856 if (point_node.type() == pugi::node_element) {
2857 name = std::string(point_node.name());
2861 u = point_node.text().as_double();
2863 else if (name ==
"v") {
2864 v = point_node.text().as_double();
2866 else if (name ==
"size") {
2867 size = point_node.text().as_double();
2869 else if (name ==
"angle") {
2870 angle = point_node.text().as_double();
2872 else if (name ==
"response") {
2873 response = point_node.text().as_double();
2875 else if (name ==
"octave") {
2876 octave = point_node.text().as_int();
2878 else if (name ==
"class_id") {
2879 class_id = point_node.text().as_int();
2880 cv::KeyPoint keyPoint(cv::Point2f(
static_cast<float>(u),
static_cast<float>(v)),
static_cast<float>(size),
2881 static_cast<float>(angle),
static_cast<float>(response), octave, (class_id + startClassId));
2882 m_trainKeyPoints.push_back(keyPoint);
2884 else if (name ==
"image_id") {
2885 image_id = point_node.text().as_int();
2886 if (image_id != -1) {
2887#ifdef VISP_HAVE_MODULE_IO
2889 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2893 else if (name ==
"oX") {
2894 oX = point_node.text().as_double();
2896 else if (name ==
"oY") {
2897 oY = point_node.text().as_double();
2899 else if (name ==
"oZ") {
2900 oZ = point_node.text().as_double();
2901 m_trainPoints.push_back(cv::Point3f(
static_cast<float>(oX),
static_cast<float>(oY),
static_cast<float>(oZ)));
2903 else if (name ==
"desc") {
2906 for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
2907 descriptor_value_node = descriptor_value_node.next_sibling()) {
2909 if (descriptor_value_node.type() == pugi::node_element) {
2911 std::string parseStr(descriptor_value_node.text().as_string());
2916 switch (descriptorType) {
2921 trainDescriptorsTmp.at<
unsigned char>(i, j) = (
unsigned char)parseValue;
2928 trainDescriptorsTmp.at<
char>(i, j) = (
char)parseValue;
2932 ss >> trainDescriptorsTmp.at<
unsigned short int>(i, j);
2936 ss >> trainDescriptorsTmp.at<
short int>(i, j);
2940 ss >> trainDescriptorsTmp.at<
int>(i, j);
2944 ss >> trainDescriptorsTmp.at<
float>(i, j);
2948 ss >> trainDescriptorsTmp.at<
double>(i, j);
2952 ss >> trainDescriptorsTmp.at<
float>(i, j);
2957 std::cerr <<
"Error when converting:" << ss.str() << std::endl;
2970 if (!append || m_trainDescriptors.empty()) {
2971 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2974 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2987 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
2993 m_currentImageId =
static_cast<int>(m_mapOfImages.size());
2997 std::vector<cv::DMatch> &matches,
double &elapsedTime)
3002 m_knnMatches.clear();
3004 if (m_useMatchTrainToQuery) {
3005 std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
3008 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3009 matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
3011 for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
3012 it1 != knnMatchesTmp.end(); ++it1) {
3013 std::vector<cv::DMatch> tmp;
3014 for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
3015 tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
3017 m_knnMatches.push_back(tmp);
3020 matches.resize(m_knnMatches.size());
3021 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3025 m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
3026 matches.resize(m_knnMatches.size());
3027 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3033 if (m_useMatchTrainToQuery) {
3034 std::vector<cv::DMatch> matchesTmp;
3036 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3037 matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
3039 for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
3040 matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
3045 m_matcher->match(queryDescriptors, matches);
3069 if (m_trainDescriptors.empty()) {
3070 std::cerr <<
"Reference is empty." << std::endl;
3072 std::cerr <<
"Reference is not computed." << std::endl;
3074 std::cerr <<
"Matching is not possible." << std::endl;
3079 if (m_useAffineDetection) {
3080 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3081 std::vector<cv::Mat> listOfQueryDescriptors;
3087 m_queryKeyPoints.clear();
3088 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3089 it != listOfQueryKeyPoints.end(); ++it) {
3090 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3094 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3098 it->copyTo(m_queryDescriptors);
3101 m_queryDescriptors.push_back(*it);
3106 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3107 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3110 return matchPoint(m_queryKeyPoints, m_queryDescriptors);
3115 m_queryKeyPoints = queryKeyPoints;
3116 m_queryDescriptors = queryDescriptors;
3118 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3121 m_queryFilteredKeyPoints.clear();
3122 m_objectFilteredPoints.clear();
3123 m_filteredMatches.clear();
3128 if (m_useMatchTrainToQuery) {
3130 m_queryFilteredKeyPoints.clear();
3131 m_filteredMatches.clear();
3132 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3133 m_filteredMatches.push_back(cv::DMatch(
static_cast<int>(m_queryFilteredKeyPoints.size()), it->trainIdx, it->distance));
3134 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[
static_cast<size_t>(it->queryIdx)]);
3138 m_queryFilteredKeyPoints = m_queryKeyPoints;
3139 m_filteredMatches = m_matches;
3142 if (!m_trainPoints.empty()) {
3143 m_objectFilteredPoints.clear();
3147 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3149 m_objectFilteredPoints.push_back(m_trainPoints[
static_cast<size_t>(it->trainIdx)]);
3158 return static_cast<unsigned int>(m_filteredMatches.size());
3170 double error, elapsedTime;
3171 return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3177 double error, elapsedTime;
3178 return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3186 if (m_trainDescriptors.empty()) {
3187 std::cerr <<
"Reference is empty." << std::endl;
3189 std::cerr <<
"Reference is not computed." << std::endl;
3191 std::cerr <<
"Matching is not possible." << std::endl;
3196 if (m_useAffineDetection) {
3197 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3198 std::vector<cv::Mat> listOfQueryDescriptors;
3204 m_queryKeyPoints.clear();
3205 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3206 it != listOfQueryKeyPoints.end(); ++it) {
3207 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3211 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3215 it->copyTo(m_queryDescriptors);
3218 m_queryDescriptors.push_back(*it);
3223 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3224 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3227 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3229 elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3232 m_queryFilteredKeyPoints.clear();
3233 m_objectFilteredPoints.clear();
3234 m_filteredMatches.clear();
3239 if (m_useMatchTrainToQuery) {
3241 m_queryFilteredKeyPoints.clear();
3242 m_filteredMatches.clear();
3243 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3244 m_filteredMatches.push_back(cv::DMatch(
static_cast<int>(m_queryFilteredKeyPoints.size()), it->trainIdx, it->distance));
3245 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[
static_cast<size_t>(it->queryIdx)]);
3249 m_queryFilteredKeyPoints = m_queryKeyPoints;
3250 m_filteredMatches = m_matches;
3253 if (!m_trainPoints.empty()) {
3254 m_objectFilteredPoints.clear();
3258 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3260 m_objectFilteredPoints.push_back(m_trainPoints[
static_cast<size_t>(it->trainIdx)]);
3272 m_ransacInliers.clear();
3273 m_ransacOutliers.clear();
3275 if (m_useRansacVVS) {
3276 std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3280 for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3281 it != m_objectFilteredPoints.end(); ++it, cpt++) {
3285 vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3287 double x = 0.0, y = 0.0;
3292 objectVpPoints[cpt] = pt;
3295 std::vector<vpPoint> inliers;
3296 std::vector<unsigned int> inlierIndex;
3298 bool res =
computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3300 std::map<unsigned int, bool> mapOfInlierIndex;
3301 m_matchRansacKeyPointsToPoints.clear();
3303 for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3304 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3305 m_queryFilteredKeyPoints[
static_cast<size_t>(*it)], m_objectFilteredPoints[
static_cast<size_t>(*it)]));
3306 mapOfInlierIndex[*it] =
true;
3309 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3310 if (mapOfInlierIndex.find(
static_cast<unsigned int>(i)) == mapOfInlierIndex.end()) {
3311 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3315 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3317 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3318 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3319 m_ransacInliers.begin(), matchRansacToVpImage);
3321 elapsedTime += m_poseTime;
3326 std::vector<cv::Point2f> imageFilteredPoints;
3327 cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3328 std::vector<int> inlierIndex;
3329 bool res =
computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3331 std::map<int, bool> mapOfInlierIndex;
3332 m_matchRansacKeyPointsToPoints.clear();
3334 for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3335 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3336 m_queryFilteredKeyPoints[
static_cast<size_t>(*it)], m_objectFilteredPoints[
static_cast<size_t>(*it)]));
3337 mapOfInlierIndex[*it] =
true;
3340 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3341 if (mapOfInlierIndex.find(
static_cast<int>(i)) == mapOfInlierIndex.end()) {
3342 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3346 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3348 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3349 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3350 m_ransacInliers.begin(), matchRansacToVpImage);
3352 elapsedTime += m_poseTime;
3363 return (
matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3367 vpImagePoint ¢erOfGravity,
const bool isPlanarObject,
3368 std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3371 if (imPts1 !=
nullptr && imPts2 !=
nullptr) {
3378 double meanDescriptorDistanceTmp = 0.0;
3379 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3380 meanDescriptorDistanceTmp +=
static_cast<double>(it->distance);
3383 meanDescriptorDistanceTmp /=
static_cast<double>(m_filteredMatches.size());
3384 double score =
static_cast<double>(m_filteredMatches.size()) / meanDescriptorDistanceTmp;
3386 if (meanDescriptorDistance !=
nullptr) {
3387 *meanDescriptorDistance = meanDescriptorDistanceTmp;
3393 if (m_filteredMatches.size() >= 4) {
3395 std::vector<cv::Point2f> points1(m_filteredMatches.size());
3397 std::vector<cv::Point2f> points2(m_filteredMatches.size());
3399 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3400 points1[i] = cv::Point2f(m_trainKeyPoints[
static_cast<size_t>(m_filteredMatches[i].trainIdx)].pt);
3401 points2[i] = cv::Point2f(m_queryFilteredKeyPoints[
static_cast<size_t>(m_filteredMatches[i].queryIdx)].pt);
3404 std::vector<vpImagePoint> inliers;
3405 if (isPlanarObject) {
3406#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3407 cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3409 cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3412 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3414 cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3415 realPoint.at<
double>(0, 0) = points1[i].x;
3416 realPoint.at<
double>(1, 0) = points1[i].y;
3417 realPoint.at<
double>(2, 0) = 1.f;
3419 cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3420 double err_x = (reprojectedPoint.at<
double>(0, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].x;
3421 double err_y = (reprojectedPoint.at<
double>(1, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].y;
3422 double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3424 if (reprojectionError < 6.0) {
3425 inliers.push_back(
vpImagePoint(
static_cast<double>(points2[i].y),
static_cast<double>(points2[i].x)));
3426 if (imPts1 !=
nullptr) {
3427 imPts1->push_back(
vpImagePoint(
static_cast<double>(points1[i].y),
static_cast<double>(points1[i].x)));
3430 if (imPts2 !=
nullptr) {
3431 imPts2->push_back(
vpImagePoint(
static_cast<double>(points2[i].y),
static_cast<double>(points2[i].x)));
3436 else if (m_filteredMatches.size() >= 8) {
3437 cv::Mat fundamentalInliers;
3438 cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3440 for (
size_t i = 0; i < static_cast<size_t>(fundamentalInliers.rows); i++) {
3441 if (fundamentalInliers.at<uchar>(
static_cast<int>(i), 0)) {
3442 inliers.push_back(
vpImagePoint(
static_cast<double>(points2[i].y),
static_cast<double>(points2[i].x)));
3444 if (imPts1 !=
nullptr) {
3445 imPts1->push_back(
vpImagePoint(
static_cast<double>(points1[i].y),
static_cast<double>(points1[i].x)));
3448 if (imPts2 !=
nullptr) {
3449 imPts2->push_back(
vpImagePoint(
static_cast<double>(points2[i].y),
static_cast<double>(points2[i].x)));
3455 if (!inliers.empty()) {
3462 double meanU = 0.0, meanV = 0.0;
3463 for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
3464 meanU += it->get_u();
3465 meanV += it->get_v();
3468 meanU /=
static_cast<double>(inliers.size());
3469 meanV /=
static_cast<double>(inliers.size());
3471 centerOfGravity.
set_u(meanU);
3472 centerOfGravity.
set_v(meanV);
3481 return meanDescriptorDistanceTmp < m_detectionThreshold;
3484 return score > m_detectionScore;
3493 bool isMatchOk =
matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3498 std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
3500 for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
3504 modelImagePoints[cpt] = imPt;
3513 double meanU = 0.0, meanV = 0.0;
3514 for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
3515 meanU += it->get_u();
3516 meanV += it->get_v();
3519 meanU /=
static_cast<double>(m_ransacInliers.size());
3520 meanV /=
static_cast<double>(m_ransacInliers.size());
3522 centerOfGravity.
set_u(meanU);
3523 centerOfGravity.
set_v(meanV);
3530 std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
3531 std::vector<cv::Mat> &listOfDescriptors,
3537 listOfKeypoints.clear();
3538 listOfDescriptors.clear();
3540 for (
int tl = 1; tl < 6; tl++) {
3541 double t = pow(2, 0.5 * tl);
3542 for (
int phi = 0; phi < 180; phi += static_cast<int>(72.0 / t)) {
3543 std::vector<cv::KeyPoint> keypoints;
3544 cv::Mat descriptors;
3546 cv::Mat timg, mask, Ai;
3549 affineSkew(t, phi, timg, mask, Ai);
3552 if (listOfAffineI !=
nullptr) {
3554 bitwise_and(mask, timg, img_disp);
3557 listOfAffineI->push_back(tI);
3561 cv::bitwise_and(mask, timg, img_disp);
3562 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE);
3563 cv::imshow(
"Skew", img_disp);
3567 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3568 it != m_detectors.end(); ++it) {
3569 std::vector<cv::KeyPoint> kp;
3570 it->second->detect(timg, kp, mask);
3571 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3575 extract(timg, keypoints, descriptors, elapsedTime);
3577 for (
unsigned int i = 0; i < keypoints.size(); i++) {
3578 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3579 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3580 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3581 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3584 listOfKeypoints.push_back(keypoints);
3585 listOfDescriptors.push_back(descriptors);
3594 std::vector<std::pair<double, int> > listOfAffineParams;
3595 for (
int tl = 1; tl < 6; tl++) {
3596 double t = pow(2, 0.5 * tl);
3597 for (
int phi = 0; phi < 180; phi += static_cast<int>(72.0 / t)) {
3598 listOfAffineParams.push_back(std::pair<double, int>(t, phi));
3602 listOfKeypoints.resize(listOfAffineParams.size());
3603 listOfDescriptors.resize(listOfAffineParams.size());
3605 if (listOfAffineI !=
nullptr) {
3606 listOfAffineI->resize(listOfAffineParams.size());
3609#ifdef VISP_HAVE_OPENMP
3610#pragma omp parallel for
3612 for (
int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
3613 std::vector<cv::KeyPoint> keypoints;
3614 cv::Mat descriptors;
3616 cv::Mat timg, mask, Ai;
3619 affineSkew(listOfAffineParams[
static_cast<size_t>(cpt)].first, listOfAffineParams[
static_cast<size_t>(cpt)].second, timg, mask, Ai);
3621 if (listOfAffineI !=
nullptr) {
3623 bitwise_and(mask, timg, img_disp);
3626 (*listOfAffineI)[
static_cast<size_t>(cpt)] = tI;
3631 cv::bitwise_and(mask, timg, img_disp);
3632 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE);
3633 cv::imshow(
"Skew", img_disp);
3637 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3638 it != m_detectors.end(); ++it) {
3639 std::vector<cv::KeyPoint> kp;
3640 it->second->detect(timg, kp, mask);
3641 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3645 extract(timg, keypoints, descriptors, elapsedTime);
3647 for (
size_t i = 0; i < keypoints.size(); i++) {
3648 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3649 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3650 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3651 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3654 listOfKeypoints[
static_cast<size_t>(cpt)] = keypoints;
3655 listOfDescriptors[
static_cast<size_t>(cpt)] = descriptors;
3668 m_computeCovariance =
false;
3670 m_currentImageId = 0;
3672 m_detectionScore = 0.15;
3673 m_detectionThreshold = 100.0;
3674 m_detectionTime = 0.0;
3675 m_detectorNames.clear();
3676 m_detectors.clear();
3677 m_extractionTime = 0.0;
3678 m_extractorNames.clear();
3679 m_extractors.clear();
3680 m_filteredMatches.clear();
3683 m_knnMatches.clear();
3684 m_mapOfImageId.clear();
3685 m_mapOfImages.clear();
3686 m_matcher = cv::Ptr<cv::DescriptorMatcher>();
3687 m_matcherName =
"BruteForce-Hamming";
3689 m_matchingFactorThreshold = 2.0;
3690 m_matchingRatioThreshold = 0.85;
3691 m_matchingTime = 0.0;
3692 m_matchRansacKeyPointsToPoints.clear();
3693 m_nbRansacIterations = 200;
3694 m_nbRansacMinInlierCount = 100;
3695 m_objectFilteredPoints.clear();
3697 m_queryDescriptors = cv::Mat();
3698 m_queryFilteredKeyPoints.clear();
3699 m_queryKeyPoints.clear();
3700 m_ransacConsensusPercentage = 20.0;
3702 m_ransacInliers.clear();
3703 m_ransacOutliers.clear();
3704 m_ransacParallel =
true;
3705 m_ransacParallelNbThreads = 0;
3706 m_ransacReprojectionError = 6.0;
3707 m_ransacThreshold = 0.01;
3708 m_trainDescriptors = cv::Mat();
3709 m_trainKeyPoints.clear();
3710 m_trainPoints.clear();
3711 m_trainVpPoints.clear();
3712 m_useAffineDetection =
false;
3713#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
3714 m_useBruteForceCrossCheck =
true;
3716 m_useConsensusPercentage =
false;
3718 m_useMatchTrainToQuery =
false;
3719 m_useRansacVVS =
true;
3720 m_useSingleMatchFilter =
true;
3722 m_detectorNames.push_back(
"ORB");
3723 m_extractorNames.push_back(
"ORB");
3731 if (!parent.empty()) {
3735 std::map<int, std::string> mapOfImgPath;
3736 if (saveTrainingImages) {
3737#ifdef VISP_HAVE_MODULE_IO
3739 unsigned int cpt = 0;
3741 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3747 std::stringstream ss;
3748 ss <<
"train_image_" << std::setfill(
'0') << std::setw(3) << cpt;
3750 switch (m_imageFormat) {
3772 std::string imgFilename = ss.str();
3773 mapOfImgPath[it->first] = imgFilename;
3774 vpImageIo::write(it->second, parent + (!parent.empty() ?
"/" :
"") + imgFilename);
3777 std::cout <<
"Warning: in vpKeyPoint::saveLearningData() training images "
3778 "are not saved because "
3779 "visp_io module is not available !"
3784 bool have3DInfo = m_trainPoints.size() > 0;
3785 if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
3791 std::ofstream file(filename.c_str(), std::ofstream::binary);
3792 if (!file.is_open()) {
3797 int nbImgs =
static_cast<int>(mapOfImgPath.size());
3800#ifdef VISP_HAVE_MODULE_IO
3801 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3807 std::string path = it->second;
3808 int length =
static_cast<int>(path.length());
3811 for (
int cpt = 0; cpt < length; cpt++) {
3812 file.write((
char *)(&path[
static_cast<size_t>(cpt)]),
sizeof(path[
static_cast<size_t>(cpt)]));
3818 int have3DInfoInt = have3DInfo ? 1 : 0;
3821 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3822 int descriptorType = m_trainDescriptors.type();
3833 for (
int i = 0; i < nRows; i++) {
3834 unsigned int i_ =
static_cast<unsigned int>(i);
3836 float u = m_trainKeyPoints[i_].pt.x;
3840 float v = m_trainKeyPoints[i_].pt.y;
3844 float size = m_trainKeyPoints[i_].size;
3848 float angle = m_trainKeyPoints[i_].angle;
3852 float response = m_trainKeyPoints[i_].response;
3856 int octave = m_trainKeyPoints[i_].octave;
3860 int class_id = m_trainKeyPoints[i_].class_id;
3864#ifdef VISP_HAVE_MODULE_IO
3865 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3866 int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
3875 float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
3886 for (
int j = 0; j < nCols; j++) {
3888 switch (descriptorType) {
3890 file.write((
char *)(&m_trainDescriptors.at<
unsigned char>(i, j)),
3891 sizeof(m_trainDescriptors.at<
unsigned char>(i, j)));
3895 file.write((
char *)(&m_trainDescriptors.at<
char>(i, j)),
sizeof(m_trainDescriptors.at<
char>(i, j)));
3927#if defined(VISP_HAVE_PUGIXML)
3928 pugi::xml_document doc;
3929 pugi::xml_node node = doc.append_child(pugi::node_declaration);
3930 node.append_attribute(
"version") =
"1.0";
3931 node.append_attribute(
"encoding") =
"UTF-8";
3937 pugi::xml_node root_node = doc.append_child(
"LearningData");
3940 pugi::xml_node image_node = root_node.append_child(
"TrainingImageInfo");
3942#ifdef VISP_HAVE_MODULE_IO
3943 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3944 pugi::xml_node image_info_node = image_node.append_child(
"trainImg");
3945 image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
3946 std::stringstream ss;
3948 image_info_node.append_attribute(
"image_id") = ss.str().c_str();
3953 pugi::xml_node descriptors_info_node = root_node.append_child(
"DescriptorsInfo");
3955 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3956 int descriptorType = m_trainDescriptors.type();
3959 descriptors_info_node.append_child(
"nrows").append_child(pugi::node_pcdata).text() = nRows;
3962 descriptors_info_node.append_child(
"ncols").append_child(pugi::node_pcdata).text() = nCols;
3965 descriptors_info_node.append_child(
"type").append_child(pugi::node_pcdata).text() = descriptorType;
3967 for (
int i = 0; i < nRows; i++) {
3968 unsigned int i_ =
static_cast<unsigned int>(i);
3969 pugi::xml_node descriptor_node = root_node.append_child(
"DescriptorInfo");
3971 descriptor_node.append_child(
"u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
3972 descriptor_node.append_child(
"v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
3973 descriptor_node.append_child(
"size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
3974 descriptor_node.append_child(
"angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
3975 descriptor_node.append_child(
"response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
3976 descriptor_node.append_child(
"octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
3977 descriptor_node.append_child(
"class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
3979#ifdef VISP_HAVE_MODULE_IO
3980 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3981 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() =
3982 ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
3984 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() = -1;
3988 descriptor_node.append_child(
"oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
3989 descriptor_node.append_child(
"oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
3990 descriptor_node.append_child(
"oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
3993 pugi::xml_node desc_node = descriptor_node.append_child(
"desc");
3995 for (
int j = 0; j < nCols; j++) {
3996 switch (descriptorType) {
4002 int val_tmp = m_trainDescriptors.at<
unsigned char>(i, j);
4003 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
4011 int val_tmp = m_trainDescriptors.at<
char>(i, j);
4012 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
4016 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4017 m_trainDescriptors.at<
unsigned short int>(i, j);
4021 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
short int>(i, j);
4025 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
int>(i, j);
4029 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
float>(i, j);
4033 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
double>(i, j);
4042 doc.save_file(filename.c_str(), PUGIXML_TEXT(
" "), pugi::format_default, pugi::encoding_utf8);
4049#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
4050#ifndef DOXYGEN_SHOULD_SKIP_THIS
4052struct KeypointResponseGreaterThanThreshold
4054 KeypointResponseGreaterThanThreshold(
float _value) : value(_value) { }
4055 inline bool operator()(
const cv::KeyPoint &kpt)
const {
return kpt.response >= value; }
4059struct KeypointResponseGreater
4061 inline bool operator()(
const cv::KeyPoint &kp1,
const cv::KeyPoint &kp2)
const {
return kp1.response > kp2.response; }
4065void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints,
int n_points)
4069 if (n_points >= 0 && keypoints.size() >
static_cast<size_t>(n_points)) {
4070 if (n_points == 0) {
4076 std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
4078 float ambiguous_response = keypoints[
static_cast<size_t>(n_points - 1)].response;
4081 std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4082 keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
4085 keypoints.resize(
static_cast<size_t>(new_end - keypoints.begin()));
4091 RoiPredicate(
const cv::Rect &_r) :
r(_r) { }
4093 bool operator()(
const cv::KeyPoint &keyPt)
const {
return !
r.contains(keyPt.pt); }
4098void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4101 if (borderSize > 0) {
4102 if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4105 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4106 RoiPredicate(cv::Rect(
4107 cv::Point(borderSize, borderSize),
4108 cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4115 SizePredicate(
float _minSize,
float _maxSize) : minSize(_minSize), maxSize(_maxSize) { }
4117 bool operator()(
const cv::KeyPoint &keyPt)
const
4119 float size = keyPt.size;
4120 return (size < minSize) || (size > maxSize);
4123 float minSize, maxSize;
4126void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize)
4128 CV_Assert(minSize >= 0);
4129 CV_Assert(maxSize >= 0);
4130 CV_Assert(minSize <= maxSize);
4132 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4138 MaskPredicate(
const cv::Mat &_mask) : mask(_mask) { }
4139 bool operator()(
const cv::KeyPoint &key_pt)
const
4141 return mask.at<uchar>(
static_cast<int>(key_pt.pt.y + 0.5f),
static_cast<int>(key_pt.pt.x + 0.5f)) == 0;
4148void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask)
4153 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4156struct KeyPoint_LessThan
4158 KeyPoint_LessThan(
const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) { }
4159 bool operator()(
size_t i,
size_t j)
const
4161 const cv::KeyPoint &kp1 = (*kp)[
i];
4162 const cv::KeyPoint &kp2 = (*kp)[
j];
4164 std::numeric_limits<float>::epsilon())) {
4166 return kp1.pt.x < kp2.pt.x;
4170 std::numeric_limits<float>::epsilon())) {
4172 return kp1.pt.y < kp2.pt.y;
4176 std::numeric_limits<float>::epsilon())) {
4178 return kp1.size > kp2.size;
4182 std::numeric_limits<float>::epsilon())) {
4184 return kp1.angle < kp2.angle;
4188 std::numeric_limits<float>::epsilon())) {
4190 return kp1.response > kp2.response;
4193 if (kp1.octave != kp2.octave) {
4194 return kp1.octave > kp2.octave;
4197 if (kp1.class_id != kp2.class_id) {
4198 return kp1.class_id > kp2.class_id;
4203 const std::vector<cv::KeyPoint> *kp;
4206void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4208 size_t i,
j, n = keypoints.size();
4209 std::vector<size_t> kpidx(n);
4210 std::vector<uchar> mask(n, (uchar)1);
4212 for (i = 0;
i < n;
i++) {
4215 std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4216 for (i = 1, j = 0;
i < n;
i++) {
4217 cv::KeyPoint &kp1 = keypoints[kpidx[
i]];
4218 cv::KeyPoint &kp2 = keypoints[kpidx[
j]];
4221 if (!
vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4222 !
vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4223 !
vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4224 !
vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4232 for (i = j = 0;
i < n;
i++) {
4235 keypoints[
j] = keypoints[
i];
4240 keypoints.resize(j);
4246vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &detector,
4248 : m_detector(detector), m_maxLevel(maxLevel)
4251bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty()
const
4253 return m_detector.empty() || (cv::FeatureDetector *)m_detector->empty();
4256void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4257 CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4259 detectImpl(image.getMat(), keypoints, mask.getMat());
4262void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(
const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4263 const cv::Mat &mask)
const
4265 cv::Mat src = image;
4266 cv::Mat src_mask = mask;
4268 cv::Mat dilated_mask;
4269 if (!mask.empty()) {
4270 cv::dilate(mask, dilated_mask, cv::Mat());
4271 cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4272 mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4273 dilated_mask = mask255;
4276 for (
int l = 0, multiplier = 1; l <= m_maxLevel; ++l, multiplier *= 2) {
4278 std::vector<cv::KeyPoint> new_pts;
4279 m_detector->detect(src, new_pts, src_mask);
4280 std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4281 for (; it != end; ++it) {
4282 it->pt.x *= multiplier;
4283 it->pt.y *= multiplier;
4284 it->size *= multiplier;
4287 keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4290 if (l < m_maxLevel) {
4296#if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
4297 resize(dilated_mask, src_mask, src.size(), 0, 0, cv::INTER_AREA);
4299 resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4305 vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4312#elif !defined(VISP_BUILD_SHARED_LIBS)
4314void dummy_vpKeyPoint() { }
std::vector< unsigned int > m_matchedReferencePoints
std::vector< vpImagePoint > m_currentImagePointsList
bool m_reference_computed
std::vector< vpImagePoint > m_referenceImagePointsList
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
Class to define RGB colors available for display functionalities.
static const vpColor none
static const vpColor green
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
static void displayCircle(const vpImage< unsigned char > &I, const vpImageCircle &circle, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
error that can be emitted by ViSP classes.
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
void set_ij(double ii, double jj)
Definition of the vpImage class member functions.
unsigned int getWidth() const
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
unsigned int getHeight() const
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
unsigned int buildReference(const vpImage< unsigned char > &I) VP_OVERRIDE
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=nullptr)
void initMatcher(const std::string &matcherName)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
@ DETECTOR_KAZE
KAZE detector.
@ DETECTOR_BRISK
BRISK detector.
@ DETECTOR_AKAZE
AKAZE detector.
@ DETECTOR_MSER
MSER detector.
@ DETECTOR_AGAST
AGAST detector.
@ DETECTOR_FAST
FAST detector.
@ DETECTOR_GFTT
GFTT detector.
@ DETECTOR_ORB
ORB detector.
@ DETECTOR_SimpleBlob
SimpleBlob detector.
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=nullptr)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=nullptr)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=nullptr, std::vector< vpImagePoint > *imPts2=nullptr, double *meanDescriptorDistance=nullptr, double *detectionScore=nullptr, const vpRect &rectangle=vpRect())
@ DESCRIPTOR_LATCH
LATCH descriptor.
@ DESCRIPTOR_AKAZE
AKAZE descriptor.
@ DESCRIPTOR_BRIEF
BRIEF descriptor.
@ DESCRIPTOR_FREAK
FREAK descriptor.
@ DESCRIPTOR_ORB
ORB descriptor.
@ DESCRIPTOR_KAZE
KAZE descriptor.
@ DESCRIPTOR_DAISY
DAISY descriptor.
@ DESCRIPTOR_BRISK
BRISK descriptor.
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints, bool matches=true) const
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3) VP_OVERRIDE
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
unsigned int matchPoint(const vpImage< unsigned char > &I) VP_OVERRIDE
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
void loadConfigFile(const std::string &configFile)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
static bool isNaN(double value)
static bool equal(double x, double y, double threshold=0.001)
static int round(double x)
Implementation of a matrix and operations on matrices.
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class defines the container for a plane geometrical structure.
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
double get_oX() const
Get the point oX coordinate in the object frame.
void set_x(double x)
Set the point x coordinate in the image plane.
double get_y() const
Get the point y coordinate in the image plane.
double get_oZ() const
Get the point oZ coordinate in the object frame.
void set_oY(double oY)
Set the point oY coordinate in the object frame.
double get_x() const
Get the point x coordinate in the image plane.
void set_oZ(double oZ)
Set the point oZ coordinate in the object frame.
void set_oX(double oX)
Set the point oX coordinate in the object frame.
double get_oY() const
Get the point oY coordinate in the object frame.
void setWorldCoordinates(double oX, double oY, double oZ)
void set_y(double y)
Set the point y coordinate in the image plane.
Defines a generic 2D polygon.
vpRect getBoundingBox() const
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
void setRansacMaxTrials(const int &rM)
void addPoint(const vpPoint &P)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
vpMatrix getCovarianceMatrix() const
std::vector< unsigned int > getRansacInlierIndex() const
void setCovarianceComputation(const bool &flag)
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, FuncCheckValidityPose func=nullptr)
std::vector< vpPoint > getRansacInliers() const
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
@ NO_FILTER
No filter is applied.
void setUseParallelRansac(bool use)
void setNbParallelRansacThreads(int nb)
void setRansacThreshold(const double &t)
Defines a rectangle in the plane.
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
std::string getDetectorName() const
double getMatchingRatioThreshold() const
std::string getExtractorName() const
void parse(const std::string &filename)
double getRansacReprojectionError() const
bool getUseRansacVVSPoseEstimation() const
double getMatchingFactorThreshold() const
int getNbRansacMinInlierCount() const
bool getUseRansacConsensusPercentage() const
std::string getMatcherName() const
int getNbRansacIterations() const
double getRansacConsensusPercentage() const
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
vpMatchingMethodEnum getMatchingMethod() const
double getRansacThreshold() const
VISP_EXPORT double measureTimeMs()