313#if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
314# if defined(HAVE_OPENCV_FEATURES)
322# if defined(HAVE_OPENCV_XFEATURES2D)
330# if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
334# if defined(HAVE_OPENCV_FEATURES2D)
341# if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
347# if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
350# if ((VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)) && defined(HAVE_OPENCV_FEATURES2D)
353#if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
356# if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
367#if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
368# if defined(HAVE_OPENCV_FEATURES)
372# if defined(HAVE_OPENCV_XFEATURES2D)
375 DESCRIPTOR_BoostDesc,
383# if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
387# if defined(HAVE_OPENCV_FEATURES2D)
390# if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
395# if defined(HAVE_OPENCV_XFEATURES2D)
401# if ((VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)) && defined(HAVE_OPENCV_FEATURES2D)
404# if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
407#if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
408 DESCRIPTOR_BoostDesc,
425 vpKeyPoint(
const vpFeatureDetectorType &detectorType,
const vpFeatureDescriptorType &descriptorType,
426 const std::string &matcherName,
const vpFilterMatchingType &filterType = ratioDistanceThreshold);
437 vpKeyPoint(
const std::string &detectorName =
"ORB",
const std::string &extractorName =
"ORB",
438 const std::string &matcherName =
"BruteForce-Hamming",
439 const vpFilterMatchingType &filterType = ratioDistanceThreshold);
450 vpKeyPoint(
const std::vector<std::string> &detectorNames,
const std::vector<std::string> &extractorNames,
451 const std::string &matcherName =
"BruteForce",
452 const vpFilterMatchingType &filterType = ratioDistanceThreshold);
472 unsigned int width) VP_OVERRIDE;
495 std::vector<cv::Point3f> &points3f,
bool append =
false,
int class_id = -1);
511 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
512 bool append =
false,
int class_id = -1);
555 std::vector<cv::Point3f> &points3f,
bool append =
false,
int class_id = -1);
570 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
571 bool append =
false,
int class_id = -1);
588 static void compute3D(
const cv::KeyPoint &candidate,
const std::vector<vpPoint> &roi,
const vpCameraParameters &cam,
626 std::vector<cv::KeyPoint> &candidates,
627 const std::vector<vpPolygon> &polygons,
628 const std::vector<std::vector<vpPoint> > &roisPt,
629 std::vector<cv::Point3f> &points, cv::Mat *descriptors =
nullptr);
648 std::vector<vpImagePoint> &candidates,
649 const std::vector<vpPolygon> &polygons,
650 const std::vector<std::vector<vpPoint> > &roisPt,
651 std::vector<vpPoint> &points, cv::Mat *descriptors =
nullptr);
670 std::vector<cv::KeyPoint> &candidates,
const std::vector<vpCylinder> &cylinders,
671 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
672 std::vector<cv::Point3f> &points, cv::Mat *descriptors =
nullptr);
691 std::vector<vpImagePoint> &candidates,
const std::vector<vpCylinder> &cylinders,
692 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
693 std::vector<vpPoint> &points, cv::Mat *descriptors =
nullptr);
708 bool computePose(
const std::vector<cv::Point2f> &imagePoints,
const std::vector<cv::Point3f> &objectPoints,
724 bool computePose(
const std::vector<vpPoint> &objectVpPoints,
vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
740 bool computePose(
const std::vector<vpPoint> &objectVpPoints,
vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
741 std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
813 void detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
const cv::Mat &mask = cv::Mat());
834 void detect(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
845 void detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
846 const cv::Mat &mask = cv::Mat());
862 void detectExtractAffine(
const vpImage<unsigned char> &I, std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
863 std::vector<cv::Mat> &listOfDescriptors,
882 void display(const
vpImage<
unsigned char> &ICurrent,
unsigned int size = 3, const
vpColor &color =
vpColor::green) VP_OVERRIDE;
913 void displayMatching(const
vpImage<
unsigned char> &IRef,
vpImage<
unsigned char> &IMatching,
unsigned int crossSize,
914 unsigned int lineThickness = 1, const
vpColor &color =
vpColor::green);
927 void displayMatching(const
vpImage<
unsigned char> &ICurrent,
vpImage<
unsigned char> &IMatching,
929 unsigned int crossSize = 3,
unsigned int lineThickness = 1);
942 void displayMatching(const
vpImage<
unsigned char> &IRef,
vpImage<
vpRGBa> &IMatching,
unsigned int crossSize,
943 unsigned int lineThickness = 1, const
vpColor &color =
vpColor::green);
957 unsigned int lineThickness = 1, const
vpColor &color =
vpColor::green);
972 unsigned int crossSize = 3,
unsigned int lineThickness = 1);
984 void extract(const
vpImage<
unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
985 std::vector<cv::Point3f> *trainPoints =
nullptr);
997 void extract(const
vpImage<
vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
998 std::vector<cv::Point3f> *trainPoints =
nullptr);
1010 void extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1011 std::vector<cv::Point3f> *trainPoints =
nullptr);
1024 void extract(const
vpImage<
unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1025 double &elapsedTime, std::vector<cv::Point3f> *trainPoints =
nullptr);
1038 void extract(const
vpImage<
vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1039 double &elapsedTime, std::vector<cv::Point3f> *trainPoints =
nullptr);
1052 void extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
double &elapsedTime,
1053 std::vector<cv::Point3f> *trainPoints =
nullptr);
1066 if (!m_computeCovariance) {
1067 std::cout <<
"Warning : The covariance matrix has not been computed. "
1068 <<
"See setCovarianceComputation() to do it."
1073 if (m_computeCovariance && !m_useRansacVVS) {
1074 std::cout <<
"Warning : The covariance matrix can only be computed "
1075 <<
"with a Virtual Visual Servoing approach." << std::endl
1076 <<
"Use setUseRansacVVS(true) to choose to use a pose "
1077 <<
"estimation method based on a Virtual Visual Servoing approach." << std::endl;
1081 return m_covarianceMatrix;
1100 std::map<vpFeatureDetectorType, std::string>::const_iterator it_name = m_mapOfDetectorNames.find(type);
1101 if (it_name == m_mapOfDetectorNames.end()) {
1102 std::cerr <<
"Internal problem with the feature type and the corresponding name!" << std::endl;
1105 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector =
1106 m_detectors.find(it_name->second);
1107 if (findDetector != m_detectors.end()) {
1108 return findDetector->second;
1111 std::cerr <<
"Cannot find: " << it_name->second << std::endl;
1112 return cv::Ptr<cv::FeatureDetector>();
1122 inline cv::Ptr<cv::FeatureDetector>
getDetector(
const std::string &name)
const
1124 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector = m_detectors.find(name);
1125 if (findDetector != m_detectors.end()) {
1126 return findDetector->second;
1129 std::cerr <<
"Cannot find: " << name << std::endl;
1130 return cv::Ptr<cv::FeatureDetector>();
1136 inline std::map<vpFeatureDetectorType, std::string>
getDetectorNames()
const {
return m_mapOfDetectorNames; }
1154 std::map<vpFeatureDescriptorType, std::string>::const_iterator it_name = m_mapOfDescriptorNames.find(type);
1155 if (it_name == m_mapOfDescriptorNames.end()) {
1156 std::cerr <<
"Internal problem with the feature type and the corresponding name!" << std::endl;
1159 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor =
1160 m_extractors.find(it_name->second);
1161 if (findExtractor != m_extractors.end()) {
1162 return findExtractor->second;
1165 std::cerr <<
"Cannot find: " << it_name->second << std::endl;
1166 return cv::Ptr<cv::DescriptorExtractor>();
1176 inline cv::Ptr<cv::DescriptorExtractor>
getExtractor(
const std::string &name)
const
1178 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor = m_extractors.find(name);
1179 if (findExtractor != m_extractors.end()) {
1180 return findExtractor->second;
1183 std::cerr <<
"Cannot find: " << name << std::endl;
1184 return cv::Ptr<cv::DescriptorExtractor>();
1190 inline std::map<vpFeatureDescriptorType, std::string>
getExtractorNames()
const {
return m_mapOfDescriptorNames; }
1211 inline cv::Ptr<cv::DescriptorMatcher>
getMatcher()
const {
return m_matcher; }
1219 inline std::vector<cv::DMatch>
getMatches()
const {
return m_filteredMatches; }
1230 std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > matchQueryToTrainKeyPoints(m_filteredMatches.size());
1231 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
1232 matchQueryToTrainKeyPoints.push_back(
1233 std::pair<cv::KeyPoint, cv::KeyPoint>(m_queryFilteredKeyPoints[
static_cast<size_t>(m_filteredMatches[i].queryIdx)],
1234 m_trainKeyPoints[
static_cast<size_t>(m_filteredMatches[i].trainIdx)]));
1236 return matchQueryToTrainKeyPoints;
1244 inline unsigned int getNbImages()
const {
return static_cast<unsigned int>(m_mapOfImages.size()); }
1253 void getObjectPoints(std::vector<cv::Point3f> &objectPoints)
const;
1262 void getObjectPoints(std::vector<vpPoint> &objectPoints)
const;
1287 void getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints,
bool matches =
true)
const;
1297 void getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints,
bool matches =
true)
const;
1326 void getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints)
const;
1333 void getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints)
const;
1341 void getTrainPoints(std::vector<cv::Point3f> &points)
const;
1349 void getTrainPoints(std::vector<vpPoint> &points)
const;
1356 void initMatcher(
const std::string &matcherName);
1402 void loadConfigFile(
const std::string &configFile);
1412 void loadLearningData(
const std::string &filename,
bool binaryMode =
false,
bool append =
false);
1422 void match(
const cv::Mat &trainDescriptors,
const cv::Mat &queryDescriptors, std::vector<cv::DMatch> &matches,
1423 double &elapsedTime);
1445 unsigned int width) VP_OVERRIDE;
1465 unsigned int matchPoint(
const std::vector<cv::KeyPoint> &queryKeyPoints,
const cv::Mat &queryDescriptors);
1522 const bool isPlanarObject =
true, std::vector<vpImagePoint> *imPts1 =
nullptr,
1523 std::vector<vpImagePoint> *imPts2 =
nullptr,
double *meanDescriptorDistance =
nullptr,
1524 double *detectionScore =
nullptr,
const vpRect &rectangle =
vpRect());
1546 double &error,
double &elapsedTime,
vpRect &boundingBox,
vpImagePoint ¢erOfGravity,
1569 unsigned int width);
1628 void saveLearningData(
const std::string &filename,
bool binaryMode =
false,
bool saveTrainingImages =
true);
1638 m_computeCovariance = flag;
1639 if (!m_useRansacVVS) {
1640 std::cout <<
"Warning : The covariance matrix can only be computed "
1641 <<
"with a Virtual Visual Servoing approach." << std::endl
1642 <<
"Use setUseRansacVVS(true) to choose to use a pose "
1643 <<
"estimation method based on a Virtual "
1644 <<
"Visual Servoing approach." << std::endl;
1662 m_detectorNames.clear();
1663 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
1664 m_detectors.clear();
1665 initDetector(m_mapOfDetectorNames[detectorType]);
1675 m_detectorNames.clear();
1676 m_detectorNames.push_back(detectorName);
1677 m_detectors.clear();
1678 initDetector(detectorName);
1681#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1690 template <
typename T1,
typename T2,
typename T3>
1691 inline void setDetectorParameter(
const T1 detectorName,
const T2 parameterName,
const T3 value)
1693 if (m_detectors.find(detectorName) != m_detectors.end()) {
1694 m_detectors[detectorName]->set(parameterName, value);
1707 m_detectorNames.clear();
1708 m_detectors.clear();
1709 m_detectorNames = detectorNames;
1710 initDetectors(m_detectorNames);
1720 m_extractorNames.clear();
1721 m_extractorNames.push_back(m_mapOfDescriptorNames[extractorType]);
1722 m_extractors.clear();
1723 initExtractor(m_mapOfDescriptorNames[extractorType]);
1734 m_extractorNames.clear();
1735 m_extractorNames.push_back(extractorName);
1736 m_extractors.clear();
1737 initExtractor(extractorName);
1740#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1749 template <
typename T1,
typename T2,
typename T3>
1750 inline void setExtractorParameter(
const T1 extractorName,
const T2 parameterName,
const T3 value)
1752 if (m_extractors.find(extractorName) != m_extractors.end()) {
1753 m_extractors[extractorName]->set(parameterName, value);
1766 m_extractorNames.clear();
1767 m_extractorNames = extractorNames;
1768 m_extractors.clear();
1769 initExtractors(m_extractorNames);
1796 m_matcherName = matcherName;
1824 m_filterType = filterType;
1831#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1832 if (m_matcher !=
nullptr && m_matcherName ==
"BruteForce") {
1835 m_matcher->set(
"crossCheck",
false);
1842#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1843 if (m_matcher !=
nullptr && m_matcherName ==
"BruteForce") {
1846 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
1861 m_matchingFactorThreshold = factor;
1875 if (ratio > 0.0 && (ratio < 1.0 || std::fabs(ratio - 1.0) < std::numeric_limits<double>::epsilon())) {
1876 m_matchingRatioThreshold = ratio;
1891 if (percentage > 0.0 &&
1892 (percentage < 100.0 || std::fabs(percentage - 100.0) < std::numeric_limits<double>::epsilon())) {
1893 m_ransacConsensusPercentage = percentage;
1914 m_nbRansacIterations = nbIter;
1945 if (reprojectionError > 0.0) {
1946 m_ransacReprojectionError = reprojectionError;
1950 "threshold must be positive "
1951 "as we deal with distance.");
1963 m_nbRansacMinInlierCount = minCount;
1978 if (threshold > 0.0) {
1979 m_ransacThreshold = threshold;
1995#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2002 inline void setUseBruteForceCrossCheck(
bool useCrossCheck)
2006 if (m_matcher !=
nullptr && !m_useKnn && m_matcherName ==
"BruteForce") {
2007 m_matcher->set(
"crossCheck", useCrossCheck);
2009 else if (m_matcher !=
nullptr && m_useKnn && m_matcherName ==
"BruteForce") {
2010 std::cout <<
"Warning, you try to set the crossCheck parameter with a "
2011 <<
"BruteForce matcher but knn is enabled"
2012 <<
" (the filtering method uses a ratio constraint)" << std::endl;
2054 bool m_computeCovariance;
2058 int m_currentImageId;
2061 vpDetectionMethodType m_detectionMethod;
2063 double m_detectionScore;
2066 double m_detectionThreshold;
2068 double m_detectionTime;
2070 std::vector<std::string> m_detectorNames;
2074 std::map<std::string, cv::Ptr<cv::FeatureDetector> > m_detectors;
2076 double m_extractionTime;
2078 std::vector<std::string> m_extractorNames;
2082 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> > m_extractors;
2084 std::vector<cv::DMatch> m_filteredMatches;
2086 vpFilterMatchingType m_filterType;
2088 vpImageFormatType m_imageFormat;
2091 std::vector<std::vector<cv::DMatch> > m_knnMatches;
2093 std::map<vpFeatureDescriptorType, std::string> m_mapOfDescriptorNames;
2095 std::map<vpFeatureDetectorType, std::string> m_mapOfDetectorNames;
2098 std::map<int, int> m_mapOfImageId;
2101 std::map<int, vpImage<unsigned char> > m_mapOfImages;
2104 cv::Ptr<cv::DescriptorMatcher> m_matcher;
2106 std::string m_matcherName;
2108 std::vector<cv::DMatch> m_matches;
2110 double m_matchingFactorThreshold;
2112 double m_matchingRatioThreshold;
2114 double m_matchingTime;
2116 std::vector<std::pair<cv::KeyPoint, cv::Point3f> > m_matchRansacKeyPointsToPoints;
2118 int m_nbRansacIterations;
2120 int m_nbRansacMinInlierCount;
2123 std::vector<cv::Point3f> m_objectFilteredPoints;
2128 cv::Mat m_queryDescriptors;
2130 std::vector<cv::KeyPoint> m_queryFilteredKeyPoints;
2132 std::vector<cv::KeyPoint> m_queryKeyPoints;
2135 double m_ransacConsensusPercentage;
2139 std::vector<vpImagePoint> m_ransacInliers;
2141 std::vector<vpImagePoint> m_ransacOutliers;
2143 bool m_ransacParallel;
2145 unsigned int m_ransacParallelNbThreads;
2148 double m_ransacReprojectionError;
2151 double m_ransacThreshold;
2155 cv::Mat m_trainDescriptors;
2157 std::vector<cv::KeyPoint> m_trainKeyPoints;
2160 std::vector<cv::Point3f> m_trainPoints;
2163 std::vector<vpPoint> m_trainVpPoints;
2166 bool m_useAffineDetection;
2167#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2171 bool m_useBruteForceCrossCheck;
2175 bool m_useConsensusPercentage;
2182 bool m_useMatchTrainToQuery;
2184 bool m_useRansacVVS;
2187 bool m_useSingleMatchFilter;
2201 void affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai);
2217 double computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
2223 void filterMatches();
2229 void init() VP_OVERRIDE;
2236 void initDetector(const std::
string &detectorNames);
2244 void initDetectors(const std::vector<std::
string> &detectorNames);
2251 void initExtractor(const std::
string &extractorName);
2259 void initExtractors(const std::vector<std::
string> &extractorNames);
2264 void initFeatureNames();
2266 inline
size_t myKeypointHash(const cv::KeyPoint &kp)
2268 size_t _val = 2166136261U, scale = 16777619U;
2271 _val = (scale * _val) ^ u.u;
2273 _val = (scale * _val) ^ u.u;
2275 _val = (scale * _val) ^ u.u;
2281 _val = (scale * _val) ^ u.u;
2282 _val = (scale * _val) ^ (
static_cast<size_t>(kp.octave));
2283 _val = (scale * _val) ^ (
static_cast<size_t>(kp.class_id));
2287#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2293 class PyramidAdaptedFeatureDetector :
public cv::FeatureDetector
2297 PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &detector,
int maxLevel = 2);
2300 virtual bool empty() const VP_OVERRIDE;
2303 virtual
void detect(cv::InputArray image, CV_OUT std::vector<cv::KeyPoint> &keypoints,
2304 cv::InputArray mask = cv::noArray()) VP_OVERRIDE;
2305 virtual
void detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
2306 const cv::Mat &mask = cv::Mat()) const;
2308 cv::Ptr<cv::FeatureDetector> m_detector;
2318 class KeyPointsFilter
2321 KeyPointsFilter() { }
2326 static void runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
int borderSize);
2330 static void runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize = FLT_MAX);
2334 static void runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask);
2338 static void removeDuplicated(std::vector<cv::KeyPoint> &keypoints);
2344 static void retainBest(std::vector<cv::KeyPoint> &keypoints,
int npoints);