AUV-Coop-Assembly
Master Thesis for Robotics Engineering. Cooperative peg-in-hole assembly with two underwater vehicles guided by vision
vision.cpp
1 #include "header/vision.h"
2 
3 
4 cv::Mat src;
5 int thresh = 10;
6 int thresh2 = 30;
7 int thresh_poly = 3;
8 cv::RNG rng(12345);
9 void thresh_callback(int, void* );
10 
11 
12 
13 
14 
15 
16 void MatchingMethod(int match_method, cv::Mat img, cv::Mat templ,
17  cv::Point *bestMatch, double *minMaxVal);
18 
27 int main(int argc, char** argv){
28 
29 
30  //TODO check if overwrite il w2D vecchio)
31 // std::ifstream srcL(initFileClickLeft, std::ios::binary);
32 // std::ofstream dstL(initFileClickLeft_w2D, std::ios::binary);
33 // std::ifstream srcR(initFileClickRight, std::ios::binary);
34 // std::ofstream dstR(initFileClickRight_w2D, std::ios::binary);
35 // dstL << srcL.rdbuf();
36 // dstR << srcR.rdbuf();
37 // srcL.close();
38 // srcR.close();
39 
40 
41  if (argc < 2){
42  std::cout << "[VISION] Please insert robot name for Vision"<< std::endl;
43  return -1;
44  }
45 
46  std::string robotName = argv[1];
47  std::string pathLog;
48  if (LOG && (argc > 2)){ //if flag log setted to 1 and path log is given
49  pathLog = argv[2];
50  }
51 
53  boost::filesystem::path sourcePath(__FILE__);
54  sourcePath.remove_filename();
55  std::cout << sourcePath << "\n";
56 
57 
58 
60  ros::init(argc, argv, robotName + "_Vision");
61  std::cout << "[" << robotName << "][VISION] Start" << std::endl;
62  ros::NodeHandle nh;
63 
68  InfosVision robVisInfo;
70 
72  RobotVisionInterface robotVisInterface(nh, robotName);
73  robotVisInterface.init();
74 
75  std::string cameraLName = "bowtechL_C";
76  std::string cameraRName = "bowtechR_C";
77  std::string holeName = "hole";
78  WorldInterface worldInterface("Vision2");
79  worldInterface.waitReady(holeName);
80 
81 
83  robotVisInterface.getwTv(&(robVisInfo.robotState.wTv_eigen));
84 
85  worldInterface.getT(&robVisInfo.robotStruct.vTcameraL, robotName, cameraLName);
86  worldInterface.getT(&robVisInfo.robotStruct.vTcameraR, robotName, cameraRName);
87  worldInterface.getT(&robVisInfo.robotStruct.cLTcR, cameraLName, cameraRName);
88  worldInterface.getT(&robVisInfo.robotStruct.cRTcL, cameraRName, cameraLName);
89 
90  // real hole pose to log errors of pose estimation
91  worldInterface.getwT(&(robVisInfo.transforms.wTh_eigen), holeName);
92 
93 
94 
95 
100  Logger logger;
101  if (pathLog.size() > 0){
102  logger = Logger(robotName, pathLog);
103  logger.createDirectoryForNode();
104  }
105 
106 
107 
112  vpImage<unsigned char> imageL_vp, imageR_vp;
114  cv::Mat imageL_cv, imageR_cv;
115 
116  robotVisInterface.getLeftImage(&imageL_cv);
117  robotVisInterface.getRightImage(&imageR_cv);
118  //imageL_cv.convertTo(imageL_cv, CV_8U); //TODO check if necessary convert in 8U
119  //imageR_cv.convertTo(imageR_cv, CV_8U); //TODO check if necessary convert in 8U
120 
121  vpImageConvert::convert(imageL_cv, imageL_vp);
122  vpImageConvert::convert(imageR_cv, imageR_vp);
123 
124 
126  std::vector<int> trackerTypes;
127 // trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER);
128 // trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER);
129  //trackerTypes.push_back(vpMbGenericTracker::KLT_TRACKER );
130 // trackerTypes.push_back(vpMbGenericTracker::KLT_TRACKER );
131  trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER);
132  trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER);
133 
134 
135  vpMbGenericTracker tracker(trackerTypes);
136  initTracker(&tracker, trackerTypes.size(), robVisInfo.robotStruct.cRTcL);
137 
138  vpDisplayOpenCV display_left;
139  vpDisplayOpenCV display_right;
140  display_left.setDownScalingFactor(vpDisplay::SCALE_AUTO);
141  display_right.setDownScalingFactor(vpDisplay::SCALE_AUTO);
142  display_left.init(imageL_vp, 100, 100, "Model-based tracker (Left)");
143  display_right.init(imageR_vp, 110 + (int)imageL_vp.getWidth(), 100,
144  "Model-based tracker (Right)");
145 
146  bool initByClick = false;
147  if (initByClick){ //init by click
148  switch (trackerTypes.size()){
149  case 1:
150  tracker.initClick(imageL_vp, initFileClickLeft, true);
151  break;
152  case 2:
153  tracker.initClick(imageL_vp, imageR_vp, initFileClickLeft, initFileClickRight, true);
154  break;
155  default:
156  std::cerr << "WRONG NUMBER OF TRACKERS\n";
157  }
158 
159  } else { //find point da solo COME??
160 
161 
162 
163 // ///cv corner detect TRY srivi che non รจ buono ***********************************************
164  // coor of rect are top-left corner
165 // src = imageL_cv;
166 // cv::namedWindow( source_window );
167 // cv::imshow( source_window, src );
168 // goodFeaturesToTrack_Demo( 0, 0 );
169 // cv::waitKey(0);
170 
172 // cv::Mat dst, cdst, cdstP;
173 // // Loads an image
174 // cv::Mat src = imageL_cv;
175 // // Check if image is loaded fine
176 
177 // // Edge detection
178 // cv::Canny(src, dst, 50, 200, 3);
179 // // Copy edges to the images that will display the results in BGR
180 // cv::cvtColor(dst, cdst, cv::COLOR_GRAY2BGR);
181 // cdstP = cdst.clone();
182 // // Standard Hough Line Transform
183 // std::vector<cv::Vec2f> lines; // will hold the results of the detection
184 // cv::HoughLines(dst, lines, 1, CV_PI/180, 60, 0, 0 ); // runs the actual detection
185 // // Draw the lines
186 // for( size_t i = 0; i < lines.size(); i++ )
187 // {
188 // float rho = lines[i][0], theta = lines[i][1];
189 // cv::Point pt1, pt2;
190 // double a = cos(theta), b = sin(theta);
191 // double x0 = a*rho, y0 = b*rho;
192 // pt1.x = cvRound(x0 + 1000*(-b));
193 // pt1.y = cvRound(y0 + 1000*(a));
194 // pt2.x = cvRound(x0 - 1000*(-b));
195 // pt2.y = cvRound(y0 - 1000*(a));
196 // cv::line( cdst, pt1, pt2, cv::Scalar(0,0,255), 3, cv::LINE_AA);
197 // }
198 // // Probabilistic Line Transform
199 // std::vector<cv::Vec4i> linesP; // will hold the results of the detection
200 // cv::HoughLinesP(dst, linesP, 1, CV_PI/180, 50, 50, 10 ); // runs the actual detection
201 // // Draw the lines
202 // for( size_t i = 0; i < linesP.size(); i++ )
203 // {
204 // cv::Vec4i l = linesP[i];
205 // cv::line( cdstP, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(0,0,255), 3, cv::LINE_AA);
206 // }
207 // // Show results
208 // cv::imshow("Source", src);
209 // cv::imshow("Standard Hough Line Transform", cdst);
210 // cv::imshow("Probabilistic Line Transform", cdstP);
211 // // Wait and Exit
212 // cv::waitKey();
214 
215 
217 // src = imageL_cv.clone();
218 // //cv::blur( src, src, cv::Size(3,3) );
219 // std::string source_window = "Source";
220 
221 // cv::namedWindow( source_window );
222 // cv::imshow( source_window, src );
223 // const int max_thresh = 255;
224 // cv::createTrackbar( "Canny thresh:", source_window, &thresh, max_thresh, thresh_callback );
225 // cv::createTrackbar("canny thres 2:", source_window, &thresh2, 500, thresh_callback );
226 
227 // cv::createTrackbar("poli thresh:", source_window, &thresh_poly, 30, thresh_callback );
228 // thresh_callback( 0, 0 );
229 // cv::waitKey(0);
231 
232 
234 // std::vector<std::vector<cv::Point> > squares, orderedSquares;
235 // cv::Mat image;
236 // cv::cvtColor(imageL_cv, image, cv::COLOR_GRAY2BGR); //actually not real conv in colors
237 // findSquares(image, squares);
238 // orderedSquares.resize(squares.size());
239 // orderAngles(squares, &orderedSquares);
240 // drawSquares(image, squares);
242 
243 
245  // https://docs.opencv.org/3.4.6/de/da9/tutorial_template_matching.html
246 
247 // const char* image_window = "Source Image";
248 
249 // cv::Mat templ;
250 // //= cv::imread("execPath + "templateFrontLittle.jpg", cv::IMREAD_COLOR);
251 // std::cout << templ.size() << "\n";
252 // cv::Mat img;
253 // //img = cv::imread("/home/tori/UWsim/Peg/source10.png", cv::IMREAD_COLOR);
254 // cv::cvtColor(imageL_cv, img, cv::COLOR_GRAY2BGR); //actually not real conv in colors
255 
256 // cv::Mat img_display = img.clone();
257 
258 // cv::namedWindow( image_window, cv::WINDOW_AUTOSIZE );
259 
260 // int templ_method = cv::TM_CCOEFF_NORMED;
261 // std::vector<double>scaleFactors = {1, 0.9, 0.8, 0.7, 0.75, 0.6, 0.65, 0.5,
262 // 0.48, 0.45, 0.42, 0.4, 0.38, 0.38, 0.32, 0.3,
263 // 0.28, 0.25, 0.22, 0.2, 0.15, 0.1};
264 // //std::vector<double>scaleFactors = {0.3};
265 
266 
267 // std::vector<double> minMaxValue(scaleFactors.size()),
268 // scaleUpY(scaleFactors.size()),
269 // scaleUpX(scaleFactors.size());
270 
271 // std::vector<cv::Point> bestMatch(scaleFactors.size());
272 // cv::Mat imgScaled = img.clone();
273 
274 // //last scaling is the last scaling, then if the for is "break" last scale is modified
275 // //it is used as index to consider only max/min values until the last scaling factor used
276 // int lastScale=scaleFactors.size();
277 // for (int i=0; i<scaleFactors.size(); i++){
278 
279 // cv::resize(img, imgScaled, cv::Size(), scaleFactors[i], scaleFactors[i]);
280 // std::cout << "img size y, x: " << img.rows << " " << img.cols << "\n";
281 // std::cout << "scaled img y, x: " << imgScaled.rows << " " <<imgScaled.cols << "\n";
282 // // actual scale can be approximated so calculate scaleUp in this way
283 // // is better than 1/scaleFactors[i]
284 // scaleUpY[i] = ((double)img.rows) / ((double)imgScaled.rows);
285 // scaleUpX[i] = ((double)img.cols) / ((double)imgScaled.cols);
286 
287 // //if scaled img little than template, break loop
288 // if (imgScaled.rows < templ.rows || imgScaled.cols < templ.cols){
289 // std::cout << "template bigger\n";
290 // lastScale = i-1;
291 // break;
292 // }
293 
294 
295 // MatchingMethod(templ_method, imgScaled, templ, &(bestMatch[i]), &(minMaxValue[i]));
296 
297 
306 
307 
317 
318 
319 
320 // }
321 
322 // int indexBest;
323 // if( templ_method == cv::TM_SQDIFF || templ_method == cv::TM_SQDIFF_NORMED ){
324 // indexBest = std::distance(minMaxValue.begin(),
325 // std::min_element(minMaxValue.begin(),
326 // minMaxValue.begin() + lastScale));
327 
328 // } else{
329 // indexBest = std::distance(minMaxValue.begin(),
330 // std::max_element(minMaxValue.begin(),
331 // minMaxValue.begin() + lastScale));
332 // }
333 
334 // std::cout << "BEST ITERATION: scaling factor " << scaleFactors[indexBest]
335 // <<"\n VALUE:" << minMaxValue.at(indexBest) << "\n\n";
336 
337 // cv::Point topLeft, bottomRight;
338 // topLeft.x = (int)(bestMatch[indexBest].x * scaleUpX[indexBest]);
339 // topLeft.y = (int)(bestMatch[indexBest].y * scaleUpY[indexBest]);
340 // bottomRight.x = (int)( (bestMatch[indexBest].x + templ.cols) * scaleUpX[indexBest]);
341 // bottomRight.y = (int)( (bestMatch[indexBest].y + templ.rows) * scaleUpY[indexBest]);
342 
343 // cv::rectangle( img_display, topLeft, bottomRight,
344 // cv::Scalar::all(0), 1, 8, 0 );
345 // cv::imshow( image_window, img_display);
346 // cv::waitKey(0);
347 
348 
349  //original not scaled
350  // cv::createTrackbar( trackbar_label, image_window, &match_method, max_Trackbar, MatchingMethod );
351  //MatchingMethod( 0, 0 ,);
352  //cv::waitKey(0);
353 
355 
356 
361 
362  //-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
363  cv::Mat img_object = cv::imread("/home/tori/UWsim/Peg/src/peg/src/vision/data/templateSideBorder.jpg",
364  cv::IMREAD_GRAYSCALE );
365 
366  cv::Mat img_scene = imageL_cv;
367  //cv::Canny(img_object, img_object, 50, 200, 3);
368  //cv::Canny(img_scene, img_scene, 50, 200, 3);
369 
370  cv::Mat imgd_scene = img_scene; //for intermiadate display
371  cv::Mat imgd_obj = img_object; //for intermiadate display
372 
373  cv::Ptr<cv::xfeatures2d::SURF> detectorSURF = cv::xfeatures2d::SURF::create(300);
374  cv::Ptr<cv::xfeatures2d::SURF> detectorSURF2 = cv::xfeatures2d::SURF::create(
375  100, 4, 3, true);
376  cv::Ptr<cv::xfeatures2d::SIFT> detectorSIFT = cv::xfeatures2d::SIFT::create(
377  0, 3, 0.04, 10);
378  cv::Ptr<cv::xfeatures2d::SIFT> detectorSIFT2 = cv::xfeatures2d::SIFT::create(
379  0, 3, 0.02, 15);
380  std::vector<cv::KeyPoint> keypoints_object, keypoints_scene;
381  cv::Mat descriptors_object, descriptors_scene;
382  detectorSURF2->detectAndCompute( img_object, cv::noArray(), keypoints_object, descriptors_object );
383  detectorSURF2->detectAndCompute( img_scene, cv::noArray(), keypoints_scene, descriptors_scene );
384  std::cout <<"point in template: " << keypoints_object.size() << "\n\n";
385  std::cout <<"point in scene: " << keypoints_scene.size() << "\n\n";
386  cv::drawKeypoints(img_scene, keypoints_scene, imgd_scene, cv::Scalar(255,0,0));
387  cv::drawKeypoints(img_object, keypoints_object, imgd_obj, cv::Scalar(255,0,0));
388  cv::imshow("scena keypoint", imgd_scene);
389  cv::imshow("temmplate keypoint", imgd_obj);
390  cv::waitKey();
391 
392 
393  //-- Step 2: Matching descriptor vectors with a FLANN based matcher
394  // Since SURF is a floating-point descriptor NORM_L2 is used
395  cv::Ptr<cv::DescriptorMatcher> matcher =
396  cv::DescriptorMatcher::create(cv::DescriptorMatcher::FLANNBASED);
397  std::vector< std::vector<cv::DMatch> > knn_matches;
398  matcher->knnMatch( descriptors_object, descriptors_scene, knn_matches, 2 );
399 
400  //-- Filter matches using the Lowe's ratio test
401  const float ratio_thresh = 0.95f;
402  std::vector<cv::DMatch> good_matches;
403  std::cout << "knn_matches matches: " << knn_matches.size() << "\n\n";
404 
405  for (size_t i = 0; i < knn_matches.size(); i++)
406  {
407  if (knn_matches[i][0].distance < ratio_thresh * knn_matches[i][1].distance)
408  {
409  good_matches.push_back(knn_matches[i][0]);
410  }
411  }
412 
413  std::cout << "goo matches: " << good_matches.size() << "\n\n";
414 
415  //-- Draw matches
416  cv::Mat img_matches;
417  cv::drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
418  good_matches, img_matches, cv::Scalar::all(-1),
419  cv::Scalar::all(-1), std::vector<char>(),
420  cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
421  //-- Localize the object
422  std::vector<cv::Point2f> obj;
423  std::vector<cv::Point2f> scene;
424  for( size_t i = 0; i < good_matches.size(); i++ )
425  {
426  //-- Get the keypoints from the good matches
427  obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
428  scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
429  }
430  std::cout << "to find homog template good match: " << obj.size() << "\n\n";
431  std::cout << "to find homog scene good match: " << scene.size() << "\n\n";
432 
433  cv::Mat H = cv::findHomography( obj, scene, cv::RANSAC, 3 );
434  std::cout << "mat result of findhomog: " << H.size() << "\n\n";
435  //-- Get the corners from the image_1 ( the object to be "detected" )
436  std::vector<cv::Point2f> obj_corners(4);
437  obj_corners[0] = cv::Point2f(0, 0);
438  obj_corners[1] = cv::Point2f( (float)img_object.cols, 0 );
439  obj_corners[2] = cv::Point2f( (float)img_object.cols, (float)img_object.rows );
440  obj_corners[3] = cv::Point2f( 0, (float)img_object.rows );
441  std::vector<cv::Point2f> scene_corners(4);
442  cv::perspectiveTransform( obj_corners, scene_corners, H);
443  //-- Draw lines between the corners (the mapped object in the scene - image_2 )
444  cv::line( img_matches, scene_corners[0] + cv::Point2f((float)img_object.cols, 0),
445  scene_corners[1] + cv::Point2f((float)img_object.cols, 0), cv::Scalar(0, 255, 0), 4 );
446  cv::line( img_matches, scene_corners[1] + cv::Point2f((float)img_object.cols, 0),
447  scene_corners[2] + cv::Point2f((float)img_object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
448  cv::line( img_matches, scene_corners[2] + cv::Point2f((float)img_object.cols, 0),
449  scene_corners[3] + cv::Point2f((float)img_object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
450  cv::line( img_matches, scene_corners[3] + cv::Point2f((float)img_object.cols, 0),
451  scene_corners[0] + cv::Point2f((float)img_object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
452  //-- Show detected matches
453  cv::imshow("Good Matches & Object detection", img_matches );
454  cv::waitKey();
455 
457 
458 
459 
460 
462  // in comune per tutti i metodi di visp
463  //vpDisplayOpenCV d(imageL_vp, 500, 500, "Tracking");
464  //vpDisplay::display(imageL_vp);
465  //vpDisplay::flush(imageL_vp);
466 
467 // /// method tracker *++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
468 // vpKltOpencv tracker;
469 // tracker.setMaxFeatures(200);
470 // tracker.setWindowSize(10);
471 // tracker.setQuality(0.015);
472 // tracker.setMinDistance(15);
473 // tracker.setHarrisFreeParameter(0.04);
474 // tracker.setBlockSize(9);
475 // tracker.setUseHarris(1);
476 // tracker.setPyramidLevels(3);
477 // tracker.initTracking(imageL_cv);
478 
479 // /// method keypoint ***********************************************************************
480 // vpKeyPoint keypoint_learning;
481 // keypoint_learning.loadConfigFile(configFileDetector);
482 
483 // while(ros::ok()){
484 // /// TRACKING keypoint 1
485 // //tracker.track(imageL_cv);
486 // //tracker.display(imageL_vp, vpColor::red);
487 
498 
499 
500 // vpDisplay::flush(imageL_vp);
501 // vpDisplay::getClick(imageL_vp);
502 // }
503 
504 
505 
506  }
507 
508 
509 
510 
512  objDetectionInit(imageL_vp, &tracker);
513  vpKeyPoint keypoint_detection;
514  keypoint_detection.loadConfigFile(configFileDetector);
515  keypoint_detection.loadLearningData(learnData, true);
516 
517 
521  ros::Rate loop_rate(10);
522  while (ros::ok()) {
523  robotVisInterface.getwTv(&(robVisInfo.robotState.wTv_eigen));
524  worldInterface.getwT(&(robVisInfo.transforms.wTh_eigen), holeName);
525 
526  robotVisInterface.getLeftImage(&imageL_cv);
527  robotVisInterface.getRightImage(&imageR_cv);
528  imageL_cv.convertTo(imageL_cv, CV_8U); //TODO check if necessary convert in 8U
529  imageR_cv.convertTo(imageR_cv, CV_8U); //TODO check if necessary convert in 8U
530  //cut top part of image where a piece of auv is visible and can distract cv algos
531  imageL_cv = imageL_cv(cv::Rect(0, 60, imageL_cv.cols, imageL_cv.rows-60));
532  imageR_cv = imageR_cv(cv::Rect(0, 60, imageR_cv.cols, imageR_cv.rows-60));
533  vpImageConvert::convert(imageL_cv, imageL_vp);
534  vpImageConvert::convert(imageR_cv, imageR_vp);
535 
536  vpDisplay::display(imageL_vp);
537  vpDisplay::display(imageR_vp);
538 
539  CMAT::TransfMatrix wThole_cmat =
540  CONV::matrix_eigen2cmat(robVisInfo.transforms.wTh_eigen);
541 
543 // vpDisplay::displayText(imageL_vp, 10, 10, "Detection and localization in process...", vpColor::red);
544 // vpHomogeneousMatrix cLThole;
545 // double ransacError = 0.0;
546 // double elapsedTime = 0.0;
547 // objDetection(imageL_vp, &tracker, &keypoint_detection, &cLThole,
548 // &ransacError, &elapsedTime);
549 
550 // vpCameraParameters cam_left;
551 // tracker.getCameraParameters(cam_left);
552 
553 // vpDisplay::displayFrame(imageL_vp, cLThole, cam_left, 0.25, vpColor::none, 3);
554 
555 
556 // robVisInfo.transforms.wTh_estimated_eigen =
557 // robVisInfo.robotState.wTv_eigen *
558 // robVisInfo.robotStruct.vTcameraL *
559 // CONV::matrix_visp2eigen(cLThole);
560 
561 // CMAT::TransfMatrix wTholeEstimated_cmat =
562 // CONV::matrix_eigen2cmat(robVisInfo.transforms.wTh_estimated_eigen);
563 
564 // CMAT::Vect6 swappedError =
565 // CMAT::CartError(wThole_cmat, wTholeEstimated_cmat);
566 // CMAT::Vect6 error;
567 // error.SetFirstVect3(swappedError.GetSecondVect3());
568 // error.SetSecondVect3(swappedError.GetFirstVect3());
569 // logger.logNumbers(error, "error");
570 
571 
575 
576 
578 
579  vpHomogeneousMatrix cLThole_st, cRThole_st;
580  stereoTracking(imageL_vp, imageR_vp, &tracker, &cLThole_st, &cRThole_st);
581 
582  // display
583  vpCameraParameters cam_left, cam_right;
584  tracker.getCameraParameters(cam_left, cam_right);
585  tracker.display(imageL_vp, imageR_vp, cLThole_st, cRThole_st, cam_left, cam_right, vpColor::red, 2);
586  vpDisplay::displayFrame(imageL_vp, cLThole_st, cam_left, 0.25, vpColor::none, 2);
587  vpDisplay::displayFrame(imageR_vp, cRThole_st, cam_right, 0.25, vpColor::none, 2);
588 
589 
590  Eigen::Matrix4d wTh_estimated_stereo_left =
591  robVisInfo.robotState.wTv_eigen *
592  robVisInfo.robotStruct.vTcameraL *
593  CONV::matrix_visp2eigen(cLThole_st);
594 
595  Eigen::Matrix4d wTh_estimated_stereo_right =
596  robVisInfo.robotState.wTv_eigen *
597  robVisInfo.robotStruct.vTcameraR *
598  CONV::matrix_visp2eigen(cRThole_st);
599 
600 
601  CMAT::TransfMatrix wTholeEstimated_stereoL_cmat =
602  CONV::matrix_eigen2cmat(wTh_estimated_stereo_left);
603  CMAT::TransfMatrix wTholeEstimated_stereoR_cmat =
604  CONV::matrix_eigen2cmat(wTh_estimated_stereo_right);
605 
606 
607  CMAT::Vect6 swappedError2 =
608  CMAT::CartError(wThole_cmat, wTholeEstimated_stereoL_cmat);
609  CMAT::Vect6 error2;
610  error2.SetFirstVect3(swappedError2.GetSecondVect3());
611  error2.SetSecondVect3(swappedError2.GetFirstVect3());
612  logger.logNumbers(error2, "errorStereoL");
613 
615  CMAT::Vect6 swappedError3 =
616  CMAT::CartError(wThole_cmat, wTholeEstimated_stereoR_cmat);
617  CMAT::Vect6 error3;
618  error3.SetFirstVect3(swappedError3.GetSecondVect3());
619  error3.SetSecondVect3(swappedError3.GetFirstVect3());
620  logger.logNumbers(error3, "errorStereoR");
621 
622  vpDisplay::displayText(imageL_vp, 30, 10, "A click to exit.", vpColor::red);
623  vpDisplay::flush(imageL_vp);
624  vpDisplay::flush(imageR_vp);
625 
626 
627 
628  if (vpDisplay::getClick(imageL_vp, false)) {
629  return 1;
630  }
631  ros::spinOnce();
632  loop_rate.sleep();
633  }
634 
635  return 0;
636 }
637 
638 int stereoTracking(vpImage<unsigned char> I_left, vpImage<unsigned char> I_right,
639  vpMbGenericTracker *tracker,
640  vpHomogeneousMatrix *cLeftTo, vpHomogeneousMatrix *cRightTo){
641  try {
642 
643  tracker->track(I_left, I_right);
644  tracker->getPose(*cLeftTo, *cRightTo);
645 
646  } catch (const vpException &e) {
647  std::cerr << "Catch a ViSP exception: " << e.what() << std::endl;
648  }
649  return 0;
650 }
651 
652 
653 
654 
655 int initTracker(vpMbGenericTracker *tracker, int nCameras, Eigen::Matrix4d cRTcL){
656 
657  switch (nCameras){
658  case 1: {
659  tracker->loadConfigFile(configFileL);
660  tracker->loadModel(caoModel);
661  }
662  break;
663  case 2: {
664  tracker->loadConfigFile(configFileL, configFileR);
665  tracker->loadModel(caoModel, caoModel);
666 
667  std::map<std::string, vpHomogeneousMatrix> mapCameraTransf;
668  mapCameraTransf["Camera1"] = vpHomogeneousMatrix(); //identity
669  // note: here it must be putted the transf from RIGTH to LEFT and not viceversa
670  mapCameraTransf["Camera2"] = CONV::transfMatrix_eigen2visp(cRTcL);
671 
672  tracker->setCameraTransformationMatrix(mapCameraTransf);
673 
674  }
675  break;
676  default:
677  std::cerr << "WRONG NUMBER OF TRACKERS\n";
678  }
679  tracker->setOgreVisibilityTest(false);
680  tracker->setDisplayFeatures(true);
681 
682  return 0;
683 }
684 
685 
686 
687 // returns sequence of squares detected on the image.
688 void findSquares( const cv::Mat& image, std::vector<std::vector<cv::Point> >& squares,
689  int thresh, int N)
690 {
691 
692  using namespace std;
693  using namespace cv;
694 
695  squares.clear();
696  Mat pyr, timg, gray0(image.size(), CV_8U), gray;
697  // down-scale and upscale the image to filter out the noise
698  pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
699  pyrUp(pyr, timg, image.size());
700  vector<vector<Point> > contours;
701  // find squares in every color plane of the image
702  for( int c = 0; c < 3; c++ )
703  {
704  int ch[] = {c, 0};
705  mixChannels(&timg, 1, &gray0, 1, ch, 1);
706  // try several threshold levels
707  for( int l = 0; l < N; l++ )
708  {
709  // hack: use Canny instead of zero threshold level.
710  // Canny helps to catch squares with gradient shading
711  if( l == 0 )
712  {
713  // apply Canny. Take the upper threshold
714  // and set the lower to 0 (which forces edges merging)
715  Canny(gray0, gray, 0, thresh, 5);
716  // dilate canny output to remove potential
717  // holes between edge segments
718  dilate(gray, gray, Mat(), Point(-1,-1));
719  }
720  else
721  {
722  // apply threshold if l!=0:
723  // tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
724  gray = gray0 >= (l+1)*255/N;
725  }
726  // find contours and store them all as a list
727  findContours(gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
728  vector<Point> approx;
729  // test each contour
730  for( size_t i = 0; i < contours.size(); i++ )
731  {
732  // approximate contour with accuracy proportional
733  // to the contour perimeter
734  approxPolyDP(contours[i], approx, arcLength(contours[i], true)*0.02, true);
735  // square contours should have 4 vertices after approximation
736  // relatively large area (to filter out noisy contours)
737  // and be convex.
738  // Note: absolute value of an area is used because
739  // area may be positive or negative - in accordance with the
740  // contour orientation
741  if( approx.size() == 4 &&
742  fabs(contourArea(approx)) > 1000 &&
743  isContourConvex(approx) )
744  {
745  double maxCosine = 0;
746  for( int j = 2; j < 5; j++ )
747  {
748  // find the maximum cosine of the angle between joint edges
749  double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
750  maxCosine = MAX(maxCosine, cosine);
751  }
752  // if cosines of all angles are small
753  // (all angles are ~90 degree) then write quandrange
754  // vertices to resultant sequence
755  if( maxCosine < 0.3 )
756  squares.push_back(approx);
757  }
758  }
759  }
760  }
761 }
762 
763 
764 // the function draws all the squares in the image
765 void drawSquares( cv::Mat& image, const std::vector<std::vector<cv::Point> >& squares,
766  const char* wndname)
767 {
768 
769  using namespace std;
770  using namespace cv;
771  for( size_t i = 0; i < squares.size(); i++ )
772  {
773  const Point* p = &squares[i][0];
774  int n = (int)squares[i].size();
775  std::cout << "number of point:" << n<< "\n";
776  polylines(image, &p, &n, 1, true, Scalar(0,255,0), 3, LINE_AA);
777  circle(image, squares[i][0], 5, Scalar(0,0,0), FILLED);
778  circle(image, squares[i][1], 5, Scalar(255,255,0), FILLED);
779  circle(image, squares[i][2], 5, Scalar(0,255,255), FILLED);
780  circle(image, squares[i][3], 5, Scalar(255,255,255), FILLED);
781 
782  }
783  imshow(wndname, image);
784 }
785 
795 double angle( cv::Point pt1, cv::Point pt2, cv::Point pt0 )
796 {
797  double dx1 = pt1.x - pt0.x;
798  double dy1 = pt1.y - pt0.y;
799  double dx2 = pt2.x - pt0.x;
800  double dy2 = pt2.y - pt0.y;
801  return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
802 }
803 
804 int orderAngles(std::vector<std::vector<cv::Point>> angles, std::vector<std::vector<cv::Point>> *orderedAngles){
805  for (int i=0; i< angles.size(); i++){
806  if (angles.at(i).size() != 4){
807  std::cerr << "Angles must be 4\n";
808  return -1;
809  }
810  orderAngles(angles.at(i), &(orderedAngles->at(i)));
811  }
812  return 0;
813 }
814 
815 int orderAngles(std::vector<cv::Point> angles, std::vector<cv::Point> *orderedAngles){
816 
817  if (angles.size() != 4){
818  std::cerr << "Angles must be 4\n";
819  return -1;
820  }
821 
822  cv::Point center = getCenter(angles);
823 
824  orderedAngles->resize(4);
825  for (int i=0; i<4; i++){
826 
827  if (angles.at(i).x < center.x){
828  if (angles.at(i).y < center.y){ //top left corner
829  orderedAngles->at(0) = angles.at(i);
830  } else { // bottom left corner
831  orderedAngles->at(3) = angles.at(i);
832  }
833 
834  } else {
835  if (angles.at(i).y < center.y){ //top right corner
836  orderedAngles->at(1) = angles.at(i);
837  } else { // bottom rigth corner
838  orderedAngles->at(2) = angles.at(i);
839  }
840  }
841  }
842 
843 }
844 
845 cv::Point getCenter(std::vector<cv::Point> points){
846 
847  cv::Point A = points.at(0);
848  cv::Point B = points.at(1);
849  cv::Point C = points.at(2);
850  cv::Point D = points.at(3);
851 
852  // Line AB represented as a1x + b1y = c1
853  double a1 = A.x - A.y;
854  double b1 = A.x - B.x;
855  double c1 = a1*(A.x) + b1*(A.y);
856 
857  // Line CD represented as a2x + b2y = c2
858  double a2 = D.y - C.y;
859  double b2 = C.x - D.x;
860  double c2 = a2*(C.x)+ b2*(C.y);
861 
862  double determinant = a1*b2 - a2*b1;
863 
864  if (determinant == 0)
865  {
867  }
868 
869  double x = (b2*c1 - b1*c2)/determinant;
870  double y = (a1*c2 - a2*c1)/determinant;
871  return cv::Point(x, y);
872 
873 }
874 
875 
876 void MatchingMethod(int match_method, cv::Mat img, cv::Mat templ,
877  cv::Point *bestMatch, double *minMaxVal){
878  using namespace std;
879  using namespace cv;
880 
881  Mat result;
882 
883  int result_cols = img.cols - templ.cols + 1;
884  int result_rows = img.rows - templ.rows + 1;
885 
886  result.create( result_rows, result_cols, CV_32FC1 );
887 
888  matchTemplate( img, templ, result, match_method);
889 
890  double minVal; double maxVal; Point minLoc; Point maxLoc;
891  minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc);
892  if( match_method == TM_SQDIFF || match_method == TM_SQDIFF_NORMED ) {
893  *bestMatch = minLoc;
894  *minMaxVal = minVal;
895  } else {
896  *bestMatch = maxLoc;
897  *minMaxVal = maxVal;
898  }
899 
900 
901  return;
902 }
903 
904 
919 //void thresh_callback(int, void* )
920 //{
921 // using namespace std;
922 // using namespace cv;
923 
924 // Mat canny_output;
925 // Canny( src, canny_output, thresh, thresh2, 3);
926 
927 // vector<vector<Point> > contours;
928 // findContours( canny_output, contours, RETR_TREE, CHAIN_APPROX_SIMPLE );
929 // vector<vector<Point> > contours_poly( contours.size() );
930 // vector<Rect> boundRect( contours.size() );
931 // vector<Point2f>centers( contours.size() );
932 // vector<float>radius( contours.size() );
933 // for( size_t i = 0; i < contours.size(); i++ )
934 // {
935 // approxPolyDP( contours[i], contours_poly[i], thresh_poly, true );
936 // boundRect[i] = boundingRect( contours_poly[i] );
937 // }
938 // Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
939 // std::cout<< "find " << contours.size() << " countors\n";
940 // for( size_t i = 0; i< contours.size(); i++ )
941 // {
942 // Scalar color = Scalar( rng.uniform(0, 256), rng.uniform(0,256), rng.uniform(0,256) );
943 // drawContours( drawing, contours_poly, (int)i, color );
944 // //rectangle( drawing, boundRect[i].tl(), boundRect[i].br(), color, 2 );
945 // }
946 // imshow( "Contours", drawing );
947 
948 //}
949 
950 
951 
957 
965 //void goodFeaturesToTrack_Demo( int, void* )
966 //{
967 // maxCorners = MAX(maxCorners, 1);
968 // std::vector<cv::Point2f> corners;
969 // double qualityLevel = 0.018;
970 // double minDistance = 50;
971 // int blockSize = 3, gradientSize = 3;
972 // bool useHarrisDetector = false;
973 // double k = 0.04;
974 // cv::Mat copy = src.clone();
975 // cv::goodFeaturesToTrack( src,
976 // corners,
977 // maxCorners,
978 // qualityLevel,
979 // minDistance,
980 // cv::Mat(),
981 // blockSize,
982 // gradientSize,
983 // useHarrisDetector,
984 // k );
985 // std::cout << "** Number of corners detected: " << corners.size() << std::endl;
986 // int radius = 4;
987 // for( size_t i = 0; i < corners.size(); i++ )
988 // {
989 // cv::circle( copy, corners[i], radius, cv::Scalar(
990 // rng.uniform(0,255), rng.uniform(0, 256), rng.uniform(0, 256)), cv::FILLED );
991 // }
992 // cv::namedWindow( source_window );
993 // cv::imshow( source_window, copy );
994 //}
995 
996 
997 
998 int objDetectionInit(vpImage<unsigned char> I, vpMbGenericTracker *tracker){
999 
1000  try {
1001 
1002  vpCameraParameters cam;
1003  vpHomogeneousMatrix cMo;
1004 
1005  vpDisplayX display;
1006 
1007  display.init(I, 300, 300, "Model-based edge tracker");
1008 
1009  tracker->getCameraParameters(cam);
1010 
1011  tracker->track(I);
1012 
1013  vpKeyPoint keypoint_learning;
1014  keypoint_learning.loadConfigFile(configFileDetector);
1015 
1016  std::vector<cv::KeyPoint> trainKeyPoints;
1017  double elapsedTime;
1018  keypoint_learning.detect(I, trainKeyPoints, elapsedTime);
1019  // display found points
1020  vpDisplay::display(I);
1021  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
1022  vpDisplay::displayCross(I, (int)it->pt.y, (int)it->pt.x, 4, vpColor::red);
1023  }
1024  vpDisplay::displayText(I, 10, 10, "All Keypoints...", vpColor::red);
1025  vpDisplay::displayText(I, 30, 10, "Click to continue with detection...", vpColor::red);
1026  vpDisplay::flush(I);
1027  vpDisplay::getClick(I, true);
1028 
1029  std::vector<vpPolygon> polygons;
1030  std::vector<std::vector<vpPoint> > roisPt;
1031  std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair
1032  = tracker->getPolygonFaces(false);
1033  polygons = pair.first;
1034  roisPt = pair.second;
1035  std::vector<cv::Point3f> points3f;
1036  tracker->getPose(cMo);
1037  vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints,
1038  polygons, roisPt, points3f);
1039  keypoint_learning.buildReference(I, trainKeyPoints, points3f);
1040  keypoint_learning.saveLearningData(learnData, true);
1041 
1042  // display found points
1043  vpDisplay::display(I);
1044  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
1045  vpDisplay::displayCross(I, (int)it->pt.y, (int)it->pt.x, 4, vpColor::red);
1046  }
1047  vpDisplay::displayText(I, 10, 10, "Keypoints only on block...", vpColor::red);
1048  vpDisplay::displayText(I, 30, 10, "Click to continue with detection...", vpColor::red);
1049  vpDisplay::flush(I);
1050  vpDisplay::getClick(I, true);
1051 
1052 
1053  } catch (vpException &e) {
1054  std::cout << "Catch an exception: " << e << std::endl;
1055  }
1056 
1057  return 0;
1058 }
1059 
1060 
1062 int objDetection(vpImage<unsigned char> I, vpMbGenericTracker *tracker,
1063  vpKeyPoint *keypoint_detection, vpHomogeneousMatrix *cMo,
1064  double *error, double* elapsedTime){
1065 
1066  try {
1067 
1068  vpCameraParameters cam;
1069  tracker->getCameraParameters(cam);
1070 
1071  if (keypoint_detection->matchPoint(I, cam, *cMo, *error, *elapsedTime)) {
1072  tracker->setPose(I, *cMo);
1073  tracker->display(I, *cMo, cam, vpColor::red, 2);
1074  }
1075 
1076  } catch (vpException &e) {
1077  std::cout << "Catch an exception: " << e << std::endl;
1078  }
1079 
1080  return 0;
1081 
1082 }
1083 
1084 
1085 
1086 
1087 
1088 
1089 // /// KEYPOINT DETECTOR
1090 // vpImage<unsigned char> I;
1091 // vpImageIo::read(I, "/home/tori/UWsim/Peg/cameraL_front.png");
1092 
1099 
1100 
1101 // const std::string detectorName = "ORB";
1102 // const std::string extractorName = "ORB";
1103 // // Hamming distance must be used with ORB
1104 // const std::string matcherName = "BruteForce-Hamming";
1105 // vpKeyPoint::vpFilterMatchingType filterType = vpKeyPoint::ratioDistanceThreshold;
1106 // vpKeyPoint keypoint(detectorName, extractorName, matcherName, filterType);
1107 // std::cout << "Reference keypoints=" << keypoint.buildReference(I) << std::endl;
1108 
1109 // vpImage<unsigned char> Idisp;
1110 // Idisp.resize(I.getHeight(), 2 * I.getWidth());
1111 // Idisp.insert(I, vpImagePoint(0, 0));
1112 // Idisp.insert(I, vpImagePoint(0, I.getWidth()));
1113 // vpDisplayOpenCV d(Idisp, 0, 0, "Matching keypoints with ORB keypoints");
1114 // vpDisplay::display(Idisp);
1115 // vpDisplay::flush(Idisp);
1116 
1117 // while (1) {
1118 // Idisp.insert(I, vpImagePoint(0, I.getWidth()));
1119 // vpDisplay::display(Idisp);
1120 // vpDisplay::displayLine(Idisp, vpImagePoint(0, I.getWidth()), vpImagePoint(I.getHeight(), I.getWidth()),
1121 // vpColor::white, 2);
1122 // unsigned int nbMatch = keypoint.matchPoint(I);
1123 // std::cout << "Matches=" << nbMatch << std::endl;
1124 // vpImagePoint iPref, iPcur;
1125 // for (unsigned int i = 0; i < nbMatch; i++) {
1126 // keypoint.getMatchedPoints(i, iPref, iPcur);
1127 // vpDisplay::displayLine(Idisp, iPref, iPcur + vpImagePoint(0, I.getWidth()), vpColor::green);
1128 // }
1129 // vpDisplay::flush(Idisp);
1130 // if (vpDisplay::getClick(Idisp, false))
1131 // break;
1132 // }
1133 // vpDisplay::getClick(Idisp);
1134 
1135 
1136 
1137 
1138 
1139 
1140 
1141 
1142 
1143 
1144 
1145 
1146 
1147 
1148 
1149  // segui object detection tutorial
1150 
1151 
1152 
1166 
1168 // try {
1169 // vpImage<unsigned char> I;
1170 
1171 // vpImageIo::read(I, "/home/tori/UWsim/Peg/cameraL_front.png");
1172 // vpDisplayX d(I, 0, 0, "Camera view");
1173 
1174 // vpDisplay::display(I);
1175 // vpDisplay::flush(I);
1176 // vpMe me;
1177 // me.setRange(25);
1178 // me.setThreshold(15000);
1179 // me.setSampleStep(10);
1180 // vpMeEllipse ellipse;
1181 // ellipse.setMe(&me);
1182 // ellipse.setDisplay(vpMeSite::RANGE_RESULT);
1183 // ellipse.initTracking(I);
1184 // while (1) {
1185 // vpDisplay::display(I);
1186 // ellipse.track(I);
1187 // ellipse.display(I, vpColor::red);
1188 // vpDisplay::flush(I);
1189 // }
1190 // } catch (const vpException &e) {
1191 // std::cout << "Catch an exception: " << e << std::endl;
1192 // }
1193 
1194 
1195 
1196 
1197 
1198 
1199 
1216 //void MatchingMethod( int, void*)
1217 //{
1218 // using namespace std;
1219 // using namespace cv;
1220 
1221 // Mat img_display;
1222 // img.copyTo( img_display );
1223 
1224 // int result_cols = img.cols - templ.cols + 1;
1225 // int result_rows = img.rows - templ.rows + 1;
1226 
1227 // result.create( result_rows, result_cols, CV_32FC1 );
1228 
1229 // bool method_accepts_mask = (TM_SQDIFF == match_method || match_method == TM_CCORR_NORMED);
1230 // if (use_mask && method_accepts_mask)
1231 // { matchTemplate( img, templ, result, match_method, mask); }
1232 // else
1233 // { matchTemplate( img, templ, result, match_method); }
1234 
1235 // normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );
1236 // double minVal; double maxVal; Point minLoc; Point maxLoc;
1237 // Point matchLoc;
1238 // minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
1239 // if( match_method == TM_SQDIFF || match_method == TM_SQDIFF_NORMED )
1240 // { matchLoc = minLoc; }
1241 // else
1242 // { matchLoc = maxLoc; }
1243 
1244 // rectangle( img_display, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
1245 // rectangle( result, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
1246 // imshow( image_window, img_display );
1247 // imshow( result_window, result );
1248 // return;
1249 //}
1250 
1251 
1252 
1253 
1257 
1258 //bool opt_init_by_click = true;
1259 
1260 //vpDisplayOpenCV d(imageL_vp, 0, 0, "Klt tracking");
1261 //vpDisplay::display(imageL_vp);
1262 //vpDisplay::flush(imageL_vp);
1263 //vpKltOpencv trackerEasy;
1264 //trackerEasy.setMaxFeatures(200);
1265 //trackerEasy.setWindowSize(10);
1266 //trackerEasy.setQuality(0.01);
1267 //trackerEasy.setMinDistance(15);
1268 //trackerEasy.setHarrisFreeParameter(0.04);
1269 //trackerEasy.setBlockSize(9);
1270 //trackerEasy.setUseHarris(1);
1271 //trackerEasy.setPyramidLevels(3);
1273 //if (opt_init_by_click) {
1274 // vpMouseButton::vpMouseButtonType button = vpMouseButton::button1;
1275 
1276 // std::vector<cv::Point2f> feature;
1277 
1278 // vpImagePoint ip;
1279 // do {
1280 // vpDisplay::displayText(imageL_vp, 10, 10, "Left click to select a point, right to start tracking", vpColor::red);
1281 // if (vpDisplay::getClick(imageL_vp, ip, button, false)) {
1282 // if (button == vpMouseButton::button1) {
1283 // feature.push_back(cv::Point2f((float)ip.get_u(), (float)ip.get_v()));
1284 // vpDisplay::displayCross(imageL_vp, ip, 12, vpColor::green);
1285 // }
1286 // }
1287 // vpDisplay::flush(imageL_vp);
1288 // vpTime::wait(20);
1289 // } while (button != vpMouseButton::button3);
1290 
1291 // trackerEasy.initTracking(imageL_cv, feature);
1292 
1293 //} else {
1294 // std::cout << "BEFORE: " << imageL_cv.size << "\n";
1295 // trackerEasy.initTracking(imageL_cv);
1296 //}
1297 //std::cout << "Tracker initialized with " << trackerEasy.getNbFeatures() << " features" << std::endl;
1298 
1299 
1301 
1302 // if (opt_init_by_click) { /// SERVE PER REINIZIALIZZARE
1303 // vpMouseButton::vpMouseButtonType button = vpMouseButton::button1;
1304 
1305 // std::vector<cv::Point2f> feature;
1306 // vpImagePoint ip;
1307 // do {
1308 // vpDisplay::displayText(I, 10, 10, "Left click to select a point, right to start tracking", vpColor::red);
1309 // if (vpDisplay::getClick(I, ip, button, false)) {
1310 // if (button == vpMouseButton::button1) {
1311 // feature.push_back(cv::Point2f((float)ip.get_u(), (float)ip.get_v()));
1312 // vpDisplay::displayCross(I, ip, 12, vpColor::green);
1313 // }
1314 // }
1315 // vpDisplay::flush(I);
1316 // vpTime::wait(20);
1317 // } while (button != vpMouseButton::button3);
1318 
1319 // tracker.initTracking(cvI, feature);
1320 // }
1321 
1322 
1323 // trackerEasy.track(imageL_cv);
1324 // trackerEasy.display(imageL_vp, vpColor::red);
1325 // vpDisplay::flush(imageL_vp);
1326 
1327 
1328 
1329 
Definition: logger.h:16
robotInterface: a ros node responsible of taken info from simulator and robot sensors, and of given commands back. It is the intermiate layer between robot and mission manager ("main") This "Vision" Version is specialized for a robot with no arm and horizontal cameras used to detect hole and help the insertion phase.
The WorldInterface class to get position from world to anything else.
Eigen::Matrix4d wTv_eigen
Transforms.
Definition: infosVision.h:29