AUV-Coop-Assembly
Master Thesis for Robotics Engineering. Cooperative peg-in-hole assembly with two underwater vehicles guided by vision
missionManagerVision.cpp
1 #include "header/missionManagerVision.h"
2 
3 
14 int main(int argc, char** argv){
15 
16  if (argc < 2){
17  std::cout << "[MISSION_MANAGER] Please insert robot name for Vision"<< std::endl;
18  return -1;
19  }
20 
22  boost::filesystem::path path(__FILE__);
23  path.remove_filename();
24  std::string sourcePath = path.string();
25 
26  std::string robotName = argv[1];
27  std::string pathLog;
28  if (LOG && (argc > 2)){ //if flag log setted to 1 and path log is given
29  pathLog = argv[2];
30  }
31 
33  ros::init(argc, argv, robotName + "_MissionManagerVision");
34  std::cout << "[" << robotName << "][MISSION_MANAGER] Start" << std::endl;
35  ros::NodeHandle nh;
36 
41  InfosVision robVisInfo;
43 
45  RobotVisionInterface robotVisInterface(nh, robotName);
46  robotVisInterface.init();
47 
48  std::string cameraLName = "bowtechL_C";
49  std::string cameraRName = "bowtechR_C";
50  std::string cameraRangeRName = "bowtechRangeR_C";
51  std::string holeName = "hole";
52  WorldInterface worldInterface(robotName);
53  worldInterface.waitReady(holeName);
54 
55  VisionInterfaceVision visionInterface(nh, robotName);
56 
57 
59  robotVisInterface.getwTv(&(robVisInfo.robotState.wTv_eigen));
60 
61  worldInterface.getT(&robVisInfo.robotStruct.vTcameraL, robotName, cameraLName);
62  worldInterface.getT(&robVisInfo.robotStruct.vTcameraR, robotName, cameraRName);
63  worldInterface.getT(&robVisInfo.robotStruct.cLTcR, cameraLName, cameraRName);
64  worldInterface.getT(&robVisInfo.robotStruct.cRTcL, cameraRName, cameraLName);
65  worldInterface.getT(&robVisInfo.robotStruct.vTcameraRangeR, robotName, cameraRangeRName);
66  worldInterface.getT(&robVisInfo.robotStruct.cRangeRTcL, cameraRangeRName, cameraLName);
67 
68  // real hole pose to log errors of pose estimation
69  worldInterface.getwT(&(robVisInfo.transforms.wTh_eigen), holeName);
70 
71 
72 
73 
78  Logger logger;
79  if (pathLog.size() > 0){
80  logger = Logger(robotName, pathLog);
81  logger.createDirectoryForNode();
82  }
83 
84 
85 
90  vpImage<unsigned char> imageL_vp, imageR_vp, imageRangeR_vp;
92  cv::Mat imageL_cv, imageR_cv, imageRangeR_cv;
93 
94  robotVisInterface.getLeftImage(&imageL_cv);
95  robotVisInterface.getRightImage(&imageR_cv);
96  imageL_cv.convertTo(imageL_cv, CV_8U); //TODO check if necessary convert in 8U
97  imageR_cv.convertTo(imageR_cv, CV_8U); //TODO check if necessary convert in 8U
98 
99  vpImageConvert::convert(imageL_cv, imageL_vp);
100  vpImageConvert::convert(imageR_cv, imageR_vp);
101 
102  //DEPTH camera
103  pcl::PointCloud<pcl::PointXYZ>::Ptr pointcloud(new pcl::PointCloud<pcl::PointXYZ>());
104  if (USE_DEPTH){
105 
106  robotVisInterface.getRangeRightImage(&imageRangeR_cv);
107  imageRangeR_cv.convertTo(imageRangeR_cv, CV_16UC1);
108  // convert function of visp dont convert from cv to uint16_t visp matrix)
109  vpImage<uint16_t> imageRangeR_vp_raw;
110  imageRangeR_vp_raw.resize(imageRangeR_cv.rows, imageRangeR_cv.cols);
111  for (int i=0; i<imageRangeR_cv.rows; i++){
112  for (int j=0; j<imageRangeR_cv.cols; j++){
113  imageRangeR_vp_raw[i][j] = imageRangeR_cv.at<unsigned short>(i,j);
114  }
115  }
116  //DCAM::makePointCloud(imageRangeR_vp_raw, pointcloud);
117  vpImageConvert::createDepthHistogram(imageRangeR_vp_raw, imageRangeR_vp);
118 
119  DCAM::makePointCloud(imageRangeR_cv, pointcloud);
120  }
121 
122 
124  vpDisplayOpenCV display_left;
125  vpDisplayOpenCV display_right;
126  vpDisplayOpenCV display_rangeRight;
127  display_left.setDownScalingFactor(vpDisplay::SCALE_AUTO);
128  display_right.setDownScalingFactor(vpDisplay::SCALE_AUTO);
129  display_rangeRight.setDownScalingFactor(vpDisplay::SCALE_AUTO);
130  display_left.init(imageL_vp, 70, 750, "Model-based tracker (Left)");
131  display_right.init(imageR_vp, 440, 750, "Model-based tracker (Right)");
132  if (USE_DEPTH){
133  display_rangeRight.init(imageRangeR_vp, 120+(int)imageL_vp.getWidth()+(int)imageR_vp.getWidth(),
134  100, "Model-based tracker (Range Right)");
135  vpDisplay::display(imageRangeR_vp);
136  vpDisplay::flush(imageRangeR_vp);
137  }
138 
140  std::vector<std::string> cameraNames(2); //for config files
141  cameraNames.at(0) = "left";
142  if (USE_DEPTH){
143  cameraNames.at(1) = "rangeRight";
144  } else {
145  cameraNames.at(1) = "right";
146  }
147 
148  MonoTracker* monoTrackerL;
149  MonoTracker* monoTrackerR;
150  StereoTracker* stereoTracker;
151  std::map<std::string, vpHomogeneousMatrix> mapCameraTransf;
152  mapCameraTransf[cameraNames.at(0)] = vpHomogeneousMatrix(); //identity
153 
154  // note: here it must be putted the transf from RIGTH to LEFT and not viceversa
155  if (!USE_DEPTH){
156  mapCameraTransf[cameraNames.at(1)] =
157  CONV::transfMatrix_eigen2visp(robVisInfo.robotStruct.cRTcL);
158  } else {
159  mapCameraTransf[cameraNames.at(1)] =
160  CONV::transfMatrix_eigen2visp(robVisInfo.robotStruct.cRangeRTcL);
161  }
162 
163  std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
164  mapOfImages[cameraNames.at(0)] = &imageL_vp;
165  if (!USE_DEPTH){
166  mapOfImages[cameraNames.at(1)] = &imageR_vp;
167  } else {
168  mapOfImages[cameraNames.at(1)] = &imageRangeR_vp;
169  }
170 
171  // initialize trasformation matrix from camera to object
172  std::map<std::string, vpHomogeneousMatrix> mapOfcameraToObj;
173  mapOfcameraToObj[cameraNames.at(0)] = vpHomogeneousMatrix();
174  mapOfcameraToObj[cameraNames.at(1)] = vpHomogeneousMatrix();
175 
176  if (TRACK_METHOD == 0){
177  monoTrackerL = new MonoTracker(robotName, cameraNames.at(0),
178  vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER);
179  monoTrackerR = new MonoTracker(robotName, cameraNames.at(1),
180  vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER);
181 
182  } else if (TRACK_METHOD == 1) {
183  std::vector<int> trackerTypes;
184  trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER);
185  if (!USE_DEPTH){
186  trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER);
187  } else {
188  trackerTypes.push_back(vpMbGenericTracker::DEPTH_DENSE_TRACKER);
189  }
190 
191  stereoTracker = new StereoTracker(robotName, cameraNames, mapCameraTransf, trackerTypes);
192  } else {
193  std::cerr << "[" << robotName << "][MISSION_MANAGER] NOT RECOGNIZED METHOD " << TRACK_METHOD <<
194  " for tracking Phase"<< std::endl;
195  return -1;
196  }
197 
199 
200  std::vector<std::vector<cv::Point>> found4CornersVectorL, found4CornersVectorR; //used if initclick false
201  if (DETECT_METHOD == 0){ //init by click method
202  if (TRACK_METHOD == 0){ //mono
203  monoTrackerL->initTrackingByClick(&imageL_vp);
204  monoTrackerR->initTrackingByClick(&imageR_vp);
205 
206  } else { //stereo (depth or not depth here is the same
207  stereoTracker->initTrackingByClick(mapOfImages);
208  }
209 
210 
211  } else { //find point without click //TODO, parla anche di altri metodi provati
212 
213  if (USE_DEPTH){
214  if (DETECT_METHOD == 1){
215  Detector::findSquare(imageL_cv, &found4CornersVectorL);
216  Detector::drawSquares(imageL_cv, found4CornersVectorL, "left");
217 
218  } else if (DETECT_METHOD == 2){
219  cv::Mat templL1 = cv::imread(sourcePath + templName);
220  found4CornersVectorL.resize(1);
221  Detector::templateMatching(imageL_cv, templL1, &(found4CornersVectorL.at(0)));
222 
223  } else {
224  std::cerr << "[" << robotName << "][MISSION_MANAGER] NOT RECOGNIZED METHOD " << DETECT_METHOD <<
225  " for detection Phase"<< std::endl;
226  return -1;
227 
228  }
229 
230  append2Dto3Dfile(found4CornersVectorL.at(0), sourcePath);
231 
232  } else { // no depth use
233 
234  if (DETECT_METHOD == 1){
235  //https://docs.opencv.org/3.4/db/d00/samples_2cpp_2squares_8cpp-example.html#a20
236  Detector::findSquare(imageL_cv, &found4CornersVectorL);
237  Detector::findSquare(imageR_cv, &found4CornersVectorR);
238 
240  cv::imshow("Left Detection", imageL_cv);
241  cv::moveWindow("Left Detection", 60,80);
242  cv::imshow("Right Detection", imageR_cv);
243  cv::moveWindow("Right Detection", 430,80);
244  cv::waitKey(0);
245 
246  Detector::drawSquares(imageL_cv, found4CornersVectorL, "Left Detection");
247  Detector::drawSquares(imageR_cv, found4CornersVectorR, "Right Detection");
248 
249  } else if (DETECT_METHOD == 2){
250 
251  // https://docs.opencv.org/3.4.6/de/da9/tutorial_template_matching.html
252  //TODO usare diversi template da diverse angolazioni...
253  // scrivi che template va anche bene però se simoa storti rispetto
254  // al hole non riusciamo a fare un buon contorno perchè la regione visibile
255  // non è un quadrato giusto e non possiamo sapere le dimensioni dei lati
256  // in pixel nella immagine che si vede dalle camere... ci vorrebbe homography
257  // per rectify immagini...
258  //TODO rectify?
259  cv::Mat templL1 = cv::imread(sourcePath + templName);
260  cv::Mat templR1 = cv::imread(sourcePath + templName);
261  found4CornersVectorL.resize(1);
262  found4CornersVectorR.resize(1);
263  Detector::templateMatching(imageL_cv, templL1, &(found4CornersVectorL.at(0)));
264  Detector::templateMatching(imageR_cv, templR1, &(found4CornersVectorR.at(0)));
265 
266  } else {
267  std::cerr << "[" << robotName << "][MISSION_MANAGER] NOT RECOGNIZED METHOD " << DETECT_METHOD <<
268  " for detection Phase"<< std::endl;
269  return -1;
270  }
271 
272 
273  //TODO if found4CornersVector contain more than one element, pick the best...
274  if (found4CornersVectorR.size() < 1){
275  std::cout << "[" << robotName << "][MISSION_MANAGER] ERROR: NOT FOUND ANY SQUARE IN RIGHT IMAGE" << std::endl;
276  return -1;
277  }
278  if (found4CornersVectorL.size() < 1){
279  std::cout << "[" << robotName << "][MISSION_MANAGER] ERROR: NOT FOUND ANY SQUARE IN LEFT IMAGE" << std::endl;
280  return -1;
281  }
282 
283  append2Dto3Dfile(found4CornersVectorL.at(0), found4CornersVectorR.at(0), sourcePath);
284  }
285 
287 
288 
290  if (TRACK_METHOD == 0){
291  monoTrackerL->initTrackingByPoint(&imageL_vp);
292  monoTrackerR->initTrackingByPoint(&imageR_vp);
293 
294  } else { //stereo, depth or not does not matter for this initiTracking
295  stereoTracker->initTrackingByPoint(mapOfImages);
296 
297  }
298  }
299 
300 
305  //get camera param for display purpose
306  vpCameraParameters cam_left, cam_right;
307  if (TRACK_METHOD == 0){
308  monoTrackerL->getCameraParams(&cam_left);
309  monoTrackerR->getCameraParams(&cam_right);
310 
311  } else { //stereo
312  std::map<std::string, vpCameraParameters> mapOfCamParams;
313  stereoTracker->getCamerasParams(&mapOfCamParams);
314  cam_left = mapOfCamParams.at(cameraNames.at(0));
315  cam_right = mapOfCamParams.at(cameraNames.at(1));
316  }
317 
318  std::cout << "[" << robotName << "][MISSION_MANAGER] Starting loop" << std::endl;
319  ros::Rate loop_rate(10);
320  while (ros::ok()) {
321  robotVisInterface.getwTv(&(robVisInfo.robotState.wTv_eigen));
322 
323  robotVisInterface.getLeftImage(&imageL_cv);
324  robotVisInterface.getRightImage(&imageR_cv);
325  imageL_cv.convertTo(imageL_cv, CV_8U); //TODO check if necessary convert in 8U
326  imageR_cv.convertTo(imageR_cv, CV_8U); //TODO check if necessary convert in 8U
327  vpImageConvert::convert(imageL_cv, imageL_vp);
328  vpImageConvert::convert(imageR_cv, imageR_vp);
329 
330  vpDisplay::display(imageL_vp);
331  vpDisplay::display(imageR_vp);
332 
333  if (USE_DEPTH){
334  robotVisInterface.getRangeRightImage(&imageRangeR_cv);
335  imageRangeR_cv.convertTo(imageRangeR_cv, CV_16UC1);
336 
337  // convert function of visp dont convert from cv to uint16_t visp matrix)
338  vpImage<uint16_t> imageRangeR_vp_raw;
339  imageRangeR_vp_raw.resize(imageRangeR_cv.rows, imageRangeR_cv.cols);
340  for (int i=0; i<imageRangeR_cv.rows; i++){
341  for (int j=0; j<imageRangeR_cv.cols; j++){
342  imageRangeR_vp_raw[i][j] = imageRangeR_cv.at<unsigned short>(i,j);
343  }
344  }
345  //DCAM::makePointCloud(imageRangeR_vp_raw, pointcloud);
346  vpImageConvert::createDepthHistogram(imageRangeR_vp_raw, imageRangeR_vp);
347 
348  DCAM::makePointCloud(imageRangeR_cv, pointcloud);
349  vpDisplay::display(imageRangeR_vp);
350  }
351 
352  vpHomogeneousMatrix cLThole, cRThole;
353  if (TRACK_METHOD == 0){
354 
355  double ransacErrorL = 0.0;
356  double elapsedTimeL = 0.0;
357  double ransacErrorR = 0.0;
358  double elapsedTimeR = 0.0;
359 
360  monoTrackerL->monoTrack(&imageL_vp, &cLThole, &ransacErrorL, &elapsedTimeL);
361  monoTrackerR->monoTrack(&imageR_vp, &cRThole, &ransacErrorR, &elapsedTimeR);
362 
363  monoTrackerL->display(&imageL_vp);
364  monoTrackerR->display(&imageR_vp);
365 
366 
367  } else{
368 
369  //update map of img
370  mapOfImages.at(cameraNames.at(0)) = &imageL_vp;
371 
372  if (!USE_DEPTH){
373  mapOfImages.at(cameraNames.at(1)) = &imageR_vp;
374  stereoTracker->stereoTrack(mapOfImages, &mapOfcameraToObj);
375  } else {
376 
377  // the image for the second (range) camera is not needed. Instead we
378  // provide the pointcloud
379  std::map<std::string, pcl::PointCloud< pcl::PointXYZ >::ConstPtr> mapOfPointclouds;
380  mapOfPointclouds[cameraNames.at(1)] = pointcloud;
381  stereoTracker->stereoTrack(mapOfImages, mapOfPointclouds, &mapOfcameraToObj);
382  }
383  cLThole = mapOfcameraToObj.at(cameraNames.at(0));
384 
387  cRThole = mapOfcameraToObj.at(cameraNames.at(1));
388 
389  stereoTracker->display(mapOfImages);
390  }
391 
395 
396 
397  vpDisplay::displayFrame(imageL_vp, cLThole, cam_left, 0.25, vpColor::none, 2);
398  if (!USE_DEPTH){
399  vpDisplay::displayFrame(imageR_vp, cRThole, cam_right, 0.25, vpColor::none, 2);
400  } else {
401  vpDisplay::displayFrame(imageRangeR_vp, cRThole, cam_right, 0.25, vpColor::none, 2);
402  }
403 
404  // store estimation in struct, TODO mean tra right e left for monoTracker?
405  Eigen::Matrix4d wTh_estimated_left =
406  robVisInfo.robotState.wTv_eigen *
407  robVisInfo.robotStruct.vTcameraL *
408  CONV::matrix_visp2eigen(cLThole);
409  Eigen::Matrix4d wTh_estimated_right =
410  robVisInfo.robotState.wTv_eigen *
411  robVisInfo.robotStruct.vTcameraR *
412  CONV::matrix_visp2eigen(cRThole);
413 
414 
415 
416  robVisInfo.transforms.wTh_estimated_eigen = wTh_estimated_left;
417 
419  std::cout << "PUBLISHING:\n" << robVisInfo.transforms.wTh_estimated_eigen <<"\n";
420  visionInterface.publishHoleTransform(robVisInfo.transforms.wTh_estimated_eigen);
421 
422 
423  vpDisplay::displayText(imageL_vp, 30, 10, "A click to exit.", vpColor::red);
424  vpDisplay::flush(imageL_vp);
425  vpDisplay::flush(imageR_vp);
426  if (USE_DEPTH){
427  vpDisplay::flush(imageRangeR_vp);
428  }
429  //vpDisplay::getKeyboardEvent(imageL_vp, 'd');
430  if (vpDisplay::getClick(imageL_vp, false)) {
431  return 1;
432  }
433 
434 
435  if (pathLog.size() != 0){
436  worldInterface.getwT(&(robVisInfo.transforms.wTh_eigen), holeName);
437  CMAT::TransfMatrix wThole_cmat =
438  CONV::matrix_eigen2cmat(robVisInfo.transforms.wTh_eigen);
439 
440  if (TRACK_METHOD == 0){
441 
442  logger.logCartError(robVisInfo.transforms.wTh_eigen,
443  wTh_estimated_left, "errorMonoL");
444  logger.logCartError(robVisInfo.transforms.wTh_eigen,
445  wTh_estimated_right, "errorMonoR");
446 
447  } else{
448 
449  logger.logCartError(robVisInfo.transforms.wTh_eigen,
450  wTh_estimated_left, "errorStereoL");
452  logger.logCartError(robVisInfo.transforms.wTh_eigen,
453  wTh_estimated_right, "errorStereoR");
454  }
455  } //END LOGGING
456 
457 
458  ros::spinOnce();
459  loop_rate.sleep();
460  }
461 
462 
463  if (TRACK_METHOD == 0){
464  delete monoTrackerL;
465  delete monoTrackerR;
466  } else {
467  delete stereoTracker;
468 
469  }
470 
471  return 0;
472 }
473 
474 
475 
476 
491 void append2Dto3Dfile(std::vector<cv::Point> found4CornersL, std::vector<cv::Point> found4CornersR,
492  std::string sourcePath){
493 
494  // copy original files, if destination exist, it will be overwrite (that is what we want)
495  std::ifstream srcL(sourcePath+"/vision/"+initFileClick+"left.init", std::ios::binary);
496  std::ofstream dstL(sourcePath+"/vision/"+initFile_w2D+"left.init", std::ios::binary);
497  dstL << srcL.rdbuf();
498  srcL.close();
499 
500  std::ifstream srcR(sourcePath+"/vision/"+initFileClick+"right.init", std::ios::binary);
501  std::ofstream dstR(sourcePath+"/vision/"+initFile_w2D+"right.init", std::ios::binary);
502  dstR << srcR.rdbuf();
503  srcR.close();
504 
505  //write on the new file
506  dstL << "#Generate by code: append 2D points" << std::endl
507  << "4" << std::endl // number of points
508  << found4CornersL.at(0).y << " " << found4CornersL.at(0).x << std::endl
509  << found4CornersL.at(1).y << " " << found4CornersL.at(1).x << std::endl
510  << found4CornersL.at(2).y << " " << found4CornersL.at(2).x << std::endl
511  << found4CornersL.at(3).y << " " << found4CornersL.at(3).x << std::endl ;
512  dstL.close();
513  dstR << "#Generate by code: append 2D points" << std::endl
514  << "4" << std::endl // number of points
515  << found4CornersR.at(0).y << " " << found4CornersR.at(0).x << std::endl
516  << found4CornersR.at(1).y << " " << found4CornersR.at(1).x << std::endl
517  << found4CornersR.at(2).y << " " << found4CornersR.at(2).x << std::endl
518  << found4CornersR.at(3).y << " " << found4CornersR.at(3).x << std::endl ;
519  dstR.close();
520 
521  return;
522 }
523 
538 void append2Dto3Dfile(std::vector<cv::Point> found4CornersL, std::string sourcePath){
539 
540  // copy original files, if destination exist, it will be overwrite (that is what we want)
541  std::ifstream srcL(sourcePath+"/vision/"+initFileClick+"left.init", std::ios::binary);
542  std::ofstream dstL(sourcePath+"/vision/"+initFile_w2D+"left.init", std::ios::binary);
543  dstL << srcL.rdbuf();
544  srcL.close();
545 
546  //write on the new file
547  dstL << "#Generate by code: append 2D points" << std::endl
548  << "4" << std::endl // number of points
549  << found4CornersL.at(0).y << " " << found4CornersL.at(0).x << std::endl
550  << found4CornersL.at(1).y << " " << found4CornersL.at(1).x << std::endl
551  << found4CornersL.at(2).y << " " << found4CornersL.at(2).x << std::endl
552  << found4CornersL.at(3).y << " " << found4CornersL.at(3).x << std::endl ;
553  dstL.close();
554 
555  return;
556 }
Definition: logger.h:16
static int findSquare(cv::Mat &image, std::vector< std::vector< cv::Point >> *found4CornersVector, int threshLevels=11, int cannyThresh=50)
Detector::findSquare find all square in images, exploiting functions of opencv. In practice is a blob...
Definition: detector.cpp:28
static void drawSquares(cv::Mat image, const std::vector< std::vector< cv::Point > > squares, const char *wndname="Square Detection Demo")
Detector::drawSquares function to draw square in images.
Definition: detector.cpp:53
static int templateMatching(cv::Mat img, std::vector< cv::Mat > templVector, std::vector< std::vector< cv::Point >> *found4CornersVector, std::vector< double > *bestValues, int templ_method=cv::TM_SQDIFF, std::vector< double > scaleFactors=std::vector< double >(), bool showDisplay=true)
Detector::templateMatching check function below, this one is used if multiple templates want be used...
Definition: detector.cpp:269
robotInterface: a ros node responsible of taken info from simulator and robot sensors, and of given commands back. It is the intermiate layer between robot and mission manager ("main") This "Vision" Version is specialized for a robot with no arm and horizontal cameras used to detect hole and help the insertion phase.
The WorldInterface class to get position from world to anything else.
void logCartError(Eigen::Matrix4d goal, Eigen::Matrix4d base, std::string fileName)
Logger::logCartError log cartesian error for two frames The result is the error that brings in2 towar...
Definition: logger.cpp:106
Eigen::Matrix4d wTv_eigen
Transforms.
Definition: infosVision.h:29