1 #include "header/missionManagerVision.h" 14 int main(
int argc,
char** argv){
17 std::cout <<
"[MISSION_MANAGER] Please insert robot name for Vision"<< std::endl;
22 boost::filesystem::path path(__FILE__);
23 path.remove_filename();
24 std::string sourcePath = path.string();
26 std::string robotName = argv[1];
28 if (LOG && (argc > 2)){
33 ros::init(argc, argv, robotName +
"_MissionManagerVision");
34 std::cout <<
"[" << robotName <<
"][MISSION_MANAGER] Start" << std::endl;
46 robotVisInterface.init();
48 std::string cameraLName =
"bowtechL_C";
49 std::string cameraRName =
"bowtechR_C";
50 std::string cameraRangeRName =
"bowtechRangeR_C";
51 std::string holeName =
"hole";
53 worldInterface.waitReady(holeName);
59 robotVisInterface.getwTv(&(robVisInfo.robotState.
wTv_eigen));
61 worldInterface.getT(&robVisInfo.robotStruct.vTcameraL, robotName, cameraLName);
62 worldInterface.getT(&robVisInfo.robotStruct.vTcameraR, robotName, cameraRName);
63 worldInterface.getT(&robVisInfo.robotStruct.cLTcR, cameraLName, cameraRName);
64 worldInterface.getT(&robVisInfo.robotStruct.cRTcL, cameraRName, cameraLName);
65 worldInterface.getT(&robVisInfo.robotStruct.vTcameraRangeR, robotName, cameraRangeRName);
66 worldInterface.getT(&robVisInfo.robotStruct.cRangeRTcL, cameraRangeRName, cameraLName);
69 worldInterface.getwT(&(robVisInfo.transforms.wTh_eigen), holeName);
79 if (pathLog.size() > 0){
80 logger =
Logger(robotName, pathLog);
81 logger.createDirectoryForNode();
90 vpImage<unsigned char> imageL_vp, imageR_vp, imageRangeR_vp;
92 cv::Mat imageL_cv, imageR_cv, imageRangeR_cv;
94 robotVisInterface.getLeftImage(&imageL_cv);
95 robotVisInterface.getRightImage(&imageR_cv);
96 imageL_cv.convertTo(imageL_cv, CV_8U);
97 imageR_cv.convertTo(imageR_cv, CV_8U);
99 vpImageConvert::convert(imageL_cv, imageL_vp);
100 vpImageConvert::convert(imageR_cv, imageR_vp);
103 pcl::PointCloud<pcl::PointXYZ>::Ptr pointcloud(
new pcl::PointCloud<pcl::PointXYZ>());
106 robotVisInterface.getRangeRightImage(&imageRangeR_cv);
107 imageRangeR_cv.convertTo(imageRangeR_cv, CV_16UC1);
109 vpImage<uint16_t> imageRangeR_vp_raw;
110 imageRangeR_vp_raw.resize(imageRangeR_cv.rows, imageRangeR_cv.cols);
111 for (
int i=0; i<imageRangeR_cv.rows; i++){
112 for (
int j=0; j<imageRangeR_cv.cols; j++){
113 imageRangeR_vp_raw[i][j] = imageRangeR_cv.at<
unsigned short>(i,j);
117 vpImageConvert::createDepthHistogram(imageRangeR_vp_raw, imageRangeR_vp);
119 DCAM::makePointCloud(imageRangeR_cv, pointcloud);
124 vpDisplayOpenCV display_left;
125 vpDisplayOpenCV display_right;
126 vpDisplayOpenCV display_rangeRight;
127 display_left.setDownScalingFactor(vpDisplay::SCALE_AUTO);
128 display_right.setDownScalingFactor(vpDisplay::SCALE_AUTO);
129 display_rangeRight.setDownScalingFactor(vpDisplay::SCALE_AUTO);
130 display_left.init(imageL_vp, 70, 750,
"Model-based tracker (Left)");
131 display_right.init(imageR_vp, 440, 750,
"Model-based tracker (Right)");
133 display_rangeRight.init(imageRangeR_vp, 120+(
int)imageL_vp.getWidth()+(int)imageR_vp.getWidth(),
134 100,
"Model-based tracker (Range Right)");
135 vpDisplay::display(imageRangeR_vp);
136 vpDisplay::flush(imageRangeR_vp);
140 std::vector<std::string> cameraNames(2);
141 cameraNames.at(0) =
"left";
143 cameraNames.at(1) =
"rangeRight";
145 cameraNames.at(1) =
"right";
151 std::map<std::string, vpHomogeneousMatrix> mapCameraTransf;
152 mapCameraTransf[cameraNames.at(0)] = vpHomogeneousMatrix();
156 mapCameraTransf[cameraNames.at(1)] =
157 CONV::transfMatrix_eigen2visp(robVisInfo.robotStruct.cRTcL);
159 mapCameraTransf[cameraNames.at(1)] =
160 CONV::transfMatrix_eigen2visp(robVisInfo.robotStruct.cRangeRTcL);
163 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
164 mapOfImages[cameraNames.at(0)] = &imageL_vp;
166 mapOfImages[cameraNames.at(1)] = &imageR_vp;
168 mapOfImages[cameraNames.at(1)] = &imageRangeR_vp;
172 std::map<std::string, vpHomogeneousMatrix> mapOfcameraToObj;
173 mapOfcameraToObj[cameraNames.at(0)] = vpHomogeneousMatrix();
174 mapOfcameraToObj[cameraNames.at(1)] = vpHomogeneousMatrix();
176 if (TRACK_METHOD == 0){
177 monoTrackerL =
new MonoTracker(robotName, cameraNames.at(0),
178 vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER);
179 monoTrackerR =
new MonoTracker(robotName, cameraNames.at(1),
180 vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER);
182 }
else if (TRACK_METHOD == 1) {
183 std::vector<int> trackerTypes;
184 trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER);
186 trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER);
188 trackerTypes.push_back(vpMbGenericTracker::DEPTH_DENSE_TRACKER);
191 stereoTracker =
new StereoTracker(robotName, cameraNames, mapCameraTransf, trackerTypes);
193 std::cerr <<
"[" << robotName <<
"][MISSION_MANAGER] NOT RECOGNIZED METHOD " << TRACK_METHOD <<
194 " for tracking Phase"<< std::endl;
200 std::vector<std::vector<cv::Point>> found4CornersVectorL, found4CornersVectorR;
201 if (DETECT_METHOD == 0){
202 if (TRACK_METHOD == 0){
203 monoTrackerL->initTrackingByClick(&imageL_vp);
204 monoTrackerR->initTrackingByClick(&imageR_vp);
207 stereoTracker->initTrackingByClick(mapOfImages);
214 if (DETECT_METHOD == 1){
218 }
else if (DETECT_METHOD == 2){
219 cv::Mat templL1 = cv::imread(sourcePath + templName);
220 found4CornersVectorL.resize(1);
224 std::cerr <<
"[" << robotName <<
"][MISSION_MANAGER] NOT RECOGNIZED METHOD " << DETECT_METHOD <<
225 " for detection Phase"<< std::endl;
230 append2Dto3Dfile(found4CornersVectorL.at(0), sourcePath);
234 if (DETECT_METHOD == 1){
240 cv::imshow(
"Left Detection", imageL_cv);
241 cv::moveWindow(
"Left Detection", 60,80);
242 cv::imshow(
"Right Detection", imageR_cv);
243 cv::moveWindow(
"Right Detection", 430,80);
249 }
else if (DETECT_METHOD == 2){
259 cv::Mat templL1 = cv::imread(sourcePath + templName);
260 cv::Mat templR1 = cv::imread(sourcePath + templName);
261 found4CornersVectorL.resize(1);
262 found4CornersVectorR.resize(1);
267 std::cerr <<
"[" << robotName <<
"][MISSION_MANAGER] NOT RECOGNIZED METHOD " << DETECT_METHOD <<
268 " for detection Phase"<< std::endl;
274 if (found4CornersVectorR.size() < 1){
275 std::cout <<
"[" << robotName <<
"][MISSION_MANAGER] ERROR: NOT FOUND ANY SQUARE IN RIGHT IMAGE" << std::endl;
278 if (found4CornersVectorL.size() < 1){
279 std::cout <<
"[" << robotName <<
"][MISSION_MANAGER] ERROR: NOT FOUND ANY SQUARE IN LEFT IMAGE" << std::endl;
283 append2Dto3Dfile(found4CornersVectorL.at(0), found4CornersVectorR.at(0), sourcePath);
290 if (TRACK_METHOD == 0){
291 monoTrackerL->initTrackingByPoint(&imageL_vp);
292 monoTrackerR->initTrackingByPoint(&imageR_vp);
295 stereoTracker->initTrackingByPoint(mapOfImages);
306 vpCameraParameters cam_left, cam_right;
307 if (TRACK_METHOD == 0){
308 monoTrackerL->getCameraParams(&cam_left);
309 monoTrackerR->getCameraParams(&cam_right);
312 std::map<std::string, vpCameraParameters> mapOfCamParams;
313 stereoTracker->getCamerasParams(&mapOfCamParams);
314 cam_left = mapOfCamParams.at(cameraNames.at(0));
315 cam_right = mapOfCamParams.at(cameraNames.at(1));
318 std::cout <<
"[" << robotName <<
"][MISSION_MANAGER] Starting loop" << std::endl;
319 ros::Rate loop_rate(10);
321 robotVisInterface.getwTv(&(robVisInfo.robotState.
wTv_eigen));
323 robotVisInterface.getLeftImage(&imageL_cv);
324 robotVisInterface.getRightImage(&imageR_cv);
325 imageL_cv.convertTo(imageL_cv, CV_8U);
326 imageR_cv.convertTo(imageR_cv, CV_8U);
327 vpImageConvert::convert(imageL_cv, imageL_vp);
328 vpImageConvert::convert(imageR_cv, imageR_vp);
330 vpDisplay::display(imageL_vp);
331 vpDisplay::display(imageR_vp);
334 robotVisInterface.getRangeRightImage(&imageRangeR_cv);
335 imageRangeR_cv.convertTo(imageRangeR_cv, CV_16UC1);
338 vpImage<uint16_t> imageRangeR_vp_raw;
339 imageRangeR_vp_raw.resize(imageRangeR_cv.rows, imageRangeR_cv.cols);
340 for (
int i=0; i<imageRangeR_cv.rows; i++){
341 for (
int j=0; j<imageRangeR_cv.cols; j++){
342 imageRangeR_vp_raw[i][j] = imageRangeR_cv.at<
unsigned short>(i,j);
346 vpImageConvert::createDepthHistogram(imageRangeR_vp_raw, imageRangeR_vp);
348 DCAM::makePointCloud(imageRangeR_cv, pointcloud);
349 vpDisplay::display(imageRangeR_vp);
352 vpHomogeneousMatrix cLThole, cRThole;
353 if (TRACK_METHOD == 0){
355 double ransacErrorL = 0.0;
356 double elapsedTimeL = 0.0;
357 double ransacErrorR = 0.0;
358 double elapsedTimeR = 0.0;
360 monoTrackerL->monoTrack(&imageL_vp, &cLThole, &ransacErrorL, &elapsedTimeL);
361 monoTrackerR->monoTrack(&imageR_vp, &cRThole, &ransacErrorR, &elapsedTimeR);
363 monoTrackerL->display(&imageL_vp);
364 monoTrackerR->display(&imageR_vp);
370 mapOfImages.at(cameraNames.at(0)) = &imageL_vp;
373 mapOfImages.at(cameraNames.at(1)) = &imageR_vp;
374 stereoTracker->stereoTrack(mapOfImages, &mapOfcameraToObj);
379 std::map<std::string, pcl::PointCloud< pcl::PointXYZ >::ConstPtr> mapOfPointclouds;
380 mapOfPointclouds[cameraNames.at(1)] = pointcloud;
381 stereoTracker->stereoTrack(mapOfImages, mapOfPointclouds, &mapOfcameraToObj);
383 cLThole = mapOfcameraToObj.at(cameraNames.at(0));
387 cRThole = mapOfcameraToObj.at(cameraNames.at(1));
389 stereoTracker->display(mapOfImages);
397 vpDisplay::displayFrame(imageL_vp, cLThole, cam_left, 0.25, vpColor::none, 2);
399 vpDisplay::displayFrame(imageR_vp, cRThole, cam_right, 0.25, vpColor::none, 2);
401 vpDisplay::displayFrame(imageRangeR_vp, cRThole, cam_right, 0.25, vpColor::none, 2);
405 Eigen::Matrix4d wTh_estimated_left =
407 robVisInfo.robotStruct.vTcameraL *
408 CONV::matrix_visp2eigen(cLThole);
409 Eigen::Matrix4d wTh_estimated_right =
411 robVisInfo.robotStruct.vTcameraR *
412 CONV::matrix_visp2eigen(cRThole);
416 robVisInfo.transforms.wTh_estimated_eigen = wTh_estimated_left;
419 std::cout <<
"PUBLISHING:\n" << robVisInfo.transforms.wTh_estimated_eigen <<
"\n";
420 visionInterface.publishHoleTransform(robVisInfo.transforms.wTh_estimated_eigen);
423 vpDisplay::displayText(imageL_vp, 30, 10,
"A click to exit.", vpColor::red);
424 vpDisplay::flush(imageL_vp);
425 vpDisplay::flush(imageR_vp);
427 vpDisplay::flush(imageRangeR_vp);
430 if (vpDisplay::getClick(imageL_vp,
false)) {
435 if (pathLog.size() != 0){
436 worldInterface.getwT(&(robVisInfo.transforms.wTh_eigen), holeName);
437 CMAT::TransfMatrix wThole_cmat =
438 CONV::matrix_eigen2cmat(robVisInfo.transforms.wTh_eigen);
440 if (TRACK_METHOD == 0){
443 wTh_estimated_left,
"errorMonoL");
445 wTh_estimated_right,
"errorMonoR");
450 wTh_estimated_left,
"errorStereoL");
453 wTh_estimated_right,
"errorStereoR");
463 if (TRACK_METHOD == 0){
467 delete stereoTracker;
491 void append2Dto3Dfile(std::vector<cv::Point> found4CornersL, std::vector<cv::Point> found4CornersR,
492 std::string sourcePath){
495 std::ifstream srcL(sourcePath+
"/vision/"+initFileClick+
"left.init", std::ios::binary);
496 std::ofstream dstL(sourcePath+
"/vision/"+initFile_w2D+
"left.init", std::ios::binary);
497 dstL << srcL.rdbuf();
500 std::ifstream srcR(sourcePath+
"/vision/"+initFileClick+
"right.init", std::ios::binary);
501 std::ofstream dstR(sourcePath+
"/vision/"+initFile_w2D+
"right.init", std::ios::binary);
502 dstR << srcR.rdbuf();
506 dstL <<
"#Generate by code: append 2D points" << std::endl
508 << found4CornersL.at(0).y <<
" " << found4CornersL.at(0).x << std::endl
509 << found4CornersL.at(1).y <<
" " << found4CornersL.at(1).x << std::endl
510 << found4CornersL.at(2).y <<
" " << found4CornersL.at(2).x << std::endl
511 << found4CornersL.at(3).y <<
" " << found4CornersL.at(3).x << std::endl ;
513 dstR <<
"#Generate by code: append 2D points" << std::endl
515 << found4CornersR.at(0).y <<
" " << found4CornersR.at(0).x << std::endl
516 << found4CornersR.at(1).y <<
" " << found4CornersR.at(1).x << std::endl
517 << found4CornersR.at(2).y <<
" " << found4CornersR.at(2).x << std::endl
518 << found4CornersR.at(3).y <<
" " << found4CornersR.at(3).x << std::endl ;
538 void append2Dto3Dfile(std::vector<cv::Point> found4CornersL, std::string sourcePath){
541 std::ifstream srcL(sourcePath+
"/vision/"+initFileClick+
"left.init", std::ios::binary);
542 std::ofstream dstL(sourcePath+
"/vision/"+initFile_w2D+
"left.init", std::ios::binary);
543 dstL << srcL.rdbuf();
547 dstL <<
"#Generate by code: append 2D points" << std::endl
549 << found4CornersL.at(0).y <<
" " << found4CornersL.at(0).x << std::endl
550 << found4CornersL.at(1).y <<
" " << found4CornersL.at(1).x << std::endl
551 << found4CornersL.at(2).y <<
" " << found4CornersL.at(2).x << std::endl
552 << found4CornersL.at(3).y <<
" " << found4CornersL.at(3).x << std::endl ;
static int findSquare(cv::Mat &image, std::vector< std::vector< cv::Point >> *found4CornersVector, int threshLevels=11, int cannyThresh=50)
Detector::findSquare find all square in images, exploiting functions of opencv. In practice is a blob...
static void drawSquares(cv::Mat image, const std::vector< std::vector< cv::Point > > squares, const char *wndname="Square Detection Demo")
Detector::drawSquares function to draw square in images.
static int templateMatching(cv::Mat img, std::vector< cv::Mat > templVector, std::vector< std::vector< cv::Point >> *found4CornersVector, std::vector< double > *bestValues, int templ_method=cv::TM_SQDIFF, std::vector< double > scaleFactors=std::vector< double >(), bool showDisplay=true)
Detector::templateMatching check function below, this one is used if multiple templates want be used...
robotInterface: a ros node responsible of taken info from simulator and robot sensors, and of given commands back. It is the intermiate layer between robot and mission manager ("main") This "Vision" Version is specialized for a robot with no arm and horizontal cameras used to detect hole and help the insertion phase.
The WorldInterface class to get position from world to anything else.
void logCartError(Eigen::Matrix4d goal, Eigen::Matrix4d base, std::string fileName)
Logger::logCartError log cartesian error for two frames The result is the error that brings in2 towar...
Eigen::Matrix4d wTv_eigen
Transforms.