1 #include "header/vision.h" 9 void thresh_callback(
int,
void* );
16 void MatchingMethod(
int match_method, cv::Mat img, cv::Mat templ,
17 cv::Point *bestMatch,
double *minMaxVal);
27 int main(
int argc,
char** argv){
42 std::cout <<
"[VISION] Please insert robot name for Vision"<< std::endl;
46 std::string robotName = argv[1];
48 if (LOG && (argc > 2)){
53 boost::filesystem::path sourcePath(__FILE__);
54 sourcePath.remove_filename();
55 std::cout << sourcePath <<
"\n";
60 ros::init(argc, argv, robotName +
"_Vision");
61 std::cout <<
"[" << robotName <<
"][VISION] Start" << std::endl;
73 robotVisInterface.init();
75 std::string cameraLName =
"bowtechL_C";
76 std::string cameraRName =
"bowtechR_C";
77 std::string holeName =
"hole";
79 worldInterface.waitReady(holeName);
83 robotVisInterface.getwTv(&(robVisInfo.robotState.
wTv_eigen));
85 worldInterface.getT(&robVisInfo.robotStruct.vTcameraL, robotName, cameraLName);
86 worldInterface.getT(&robVisInfo.robotStruct.vTcameraR, robotName, cameraRName);
87 worldInterface.getT(&robVisInfo.robotStruct.cLTcR, cameraLName, cameraRName);
88 worldInterface.getT(&robVisInfo.robotStruct.cRTcL, cameraRName, cameraLName);
91 worldInterface.getwT(&(robVisInfo.transforms.wTh_eigen), holeName);
101 if (pathLog.size() > 0){
102 logger =
Logger(robotName, pathLog);
103 logger.createDirectoryForNode();
112 vpImage<unsigned char> imageL_vp, imageR_vp;
114 cv::Mat imageL_cv, imageR_cv;
116 robotVisInterface.getLeftImage(&imageL_cv);
117 robotVisInterface.getRightImage(&imageR_cv);
121 vpImageConvert::convert(imageL_cv, imageL_vp);
122 vpImageConvert::convert(imageR_cv, imageR_vp);
126 std::vector<int> trackerTypes;
131 trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER);
132 trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER);
135 vpMbGenericTracker tracker(trackerTypes);
136 initTracker(&tracker, trackerTypes.size(), robVisInfo.robotStruct.cRTcL);
138 vpDisplayOpenCV display_left;
139 vpDisplayOpenCV display_right;
140 display_left.setDownScalingFactor(vpDisplay::SCALE_AUTO);
141 display_right.setDownScalingFactor(vpDisplay::SCALE_AUTO);
142 display_left.init(imageL_vp, 100, 100,
"Model-based tracker (Left)");
143 display_right.init(imageR_vp, 110 + (
int)imageL_vp.getWidth(), 100,
144 "Model-based tracker (Right)");
146 bool initByClick =
false;
148 switch (trackerTypes.size()){
150 tracker.initClick(imageL_vp, initFileClickLeft,
true);
153 tracker.initClick(imageL_vp, imageR_vp, initFileClickLeft, initFileClickRight,
true);
156 std::cerr <<
"WRONG NUMBER OF TRACKERS\n";
363 cv::Mat img_object = cv::imread(
"/home/tori/UWsim/Peg/src/peg/src/vision/data/templateSideBorder.jpg",
364 cv::IMREAD_GRAYSCALE );
366 cv::Mat img_scene = imageL_cv;
370 cv::Mat imgd_scene = img_scene;
371 cv::Mat imgd_obj = img_object;
373 cv::Ptr<cv::xfeatures2d::SURF> detectorSURF = cv::xfeatures2d::SURF::create(300);
374 cv::Ptr<cv::xfeatures2d::SURF> detectorSURF2 = cv::xfeatures2d::SURF::create(
376 cv::Ptr<cv::xfeatures2d::SIFT> detectorSIFT = cv::xfeatures2d::SIFT::create(
378 cv::Ptr<cv::xfeatures2d::SIFT> detectorSIFT2 = cv::xfeatures2d::SIFT::create(
380 std::vector<cv::KeyPoint> keypoints_object, keypoints_scene;
381 cv::Mat descriptors_object, descriptors_scene;
382 detectorSURF2->detectAndCompute( img_object, cv::noArray(), keypoints_object, descriptors_object );
383 detectorSURF2->detectAndCompute( img_scene, cv::noArray(), keypoints_scene, descriptors_scene );
384 std::cout <<
"point in template: " << keypoints_object.size() <<
"\n\n";
385 std::cout <<
"point in scene: " << keypoints_scene.size() <<
"\n\n";
386 cv::drawKeypoints(img_scene, keypoints_scene, imgd_scene, cv::Scalar(255,0,0));
387 cv::drawKeypoints(img_object, keypoints_object, imgd_obj, cv::Scalar(255,0,0));
388 cv::imshow(
"scena keypoint", imgd_scene);
389 cv::imshow(
"temmplate keypoint", imgd_obj);
395 cv::Ptr<cv::DescriptorMatcher> matcher =
396 cv::DescriptorMatcher::create(cv::DescriptorMatcher::FLANNBASED);
397 std::vector< std::vector<cv::DMatch> > knn_matches;
398 matcher->knnMatch( descriptors_object, descriptors_scene, knn_matches, 2 );
401 const float ratio_thresh = 0.95f;
402 std::vector<cv::DMatch> good_matches;
403 std::cout <<
"knn_matches matches: " << knn_matches.size() <<
"\n\n";
405 for (
size_t i = 0; i < knn_matches.size(); i++)
407 if (knn_matches[i][0].distance < ratio_thresh * knn_matches[i][1].distance)
409 good_matches.push_back(knn_matches[i][0]);
413 std::cout <<
"goo matches: " << good_matches.size() <<
"\n\n";
417 cv::drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
418 good_matches, img_matches, cv::Scalar::all(-1),
419 cv::Scalar::all(-1), std::vector<char>(),
420 cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
422 std::vector<cv::Point2f> obj;
423 std::vector<cv::Point2f> scene;
424 for(
size_t i = 0; i < good_matches.size(); i++ )
427 obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
428 scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
430 std::cout <<
"to find homog template good match: " << obj.size() <<
"\n\n";
431 std::cout <<
"to find homog scene good match: " << scene.size() <<
"\n\n";
433 cv::Mat H = cv::findHomography( obj, scene, cv::RANSAC, 3 );
434 std::cout <<
"mat result of findhomog: " << H.size() <<
"\n\n";
436 std::vector<cv::Point2f> obj_corners(4);
437 obj_corners[0] = cv::Point2f(0, 0);
438 obj_corners[1] = cv::Point2f( (
float)img_object.cols, 0 );
439 obj_corners[2] = cv::Point2f( (
float)img_object.cols, (
float)img_object.rows );
440 obj_corners[3] = cv::Point2f( 0, (
float)img_object.rows );
441 std::vector<cv::Point2f> scene_corners(4);
442 cv::perspectiveTransform( obj_corners, scene_corners, H);
444 cv::line( img_matches, scene_corners[0] + cv::Point2f((
float)img_object.cols, 0),
445 scene_corners[1] + cv::Point2f((
float)img_object.cols, 0), cv::Scalar(0, 255, 0), 4 );
446 cv::line( img_matches, scene_corners[1] + cv::Point2f((
float)img_object.cols, 0),
447 scene_corners[2] + cv::Point2f((
float)img_object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
448 cv::line( img_matches, scene_corners[2] + cv::Point2f((
float)img_object.cols, 0),
449 scene_corners[3] + cv::Point2f((
float)img_object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
450 cv::line( img_matches, scene_corners[3] + cv::Point2f((
float)img_object.cols, 0),
451 scene_corners[0] + cv::Point2f((
float)img_object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
453 cv::imshow(
"Good Matches & Object detection", img_matches );
512 objDetectionInit(imageL_vp, &tracker);
513 vpKeyPoint keypoint_detection;
514 keypoint_detection.loadConfigFile(configFileDetector);
515 keypoint_detection.loadLearningData(learnData,
true);
521 ros::Rate loop_rate(10);
523 robotVisInterface.getwTv(&(robVisInfo.robotState.
wTv_eigen));
524 worldInterface.getwT(&(robVisInfo.transforms.wTh_eigen), holeName);
526 robotVisInterface.getLeftImage(&imageL_cv);
527 robotVisInterface.getRightImage(&imageR_cv);
528 imageL_cv.convertTo(imageL_cv, CV_8U);
529 imageR_cv.convertTo(imageR_cv, CV_8U);
531 imageL_cv = imageL_cv(cv::Rect(0, 60, imageL_cv.cols, imageL_cv.rows-60));
532 imageR_cv = imageR_cv(cv::Rect(0, 60, imageR_cv.cols, imageR_cv.rows-60));
533 vpImageConvert::convert(imageL_cv, imageL_vp);
534 vpImageConvert::convert(imageR_cv, imageR_vp);
536 vpDisplay::display(imageL_vp);
537 vpDisplay::display(imageR_vp);
539 CMAT::TransfMatrix wThole_cmat =
540 CONV::matrix_eigen2cmat(robVisInfo.transforms.wTh_eigen);
579 vpHomogeneousMatrix cLThole_st, cRThole_st;
580 stereoTracking(imageL_vp, imageR_vp, &tracker, &cLThole_st, &cRThole_st);
583 vpCameraParameters cam_left, cam_right;
584 tracker.getCameraParameters(cam_left, cam_right);
585 tracker.display(imageL_vp, imageR_vp, cLThole_st, cRThole_st, cam_left, cam_right, vpColor::red, 2);
586 vpDisplay::displayFrame(imageL_vp, cLThole_st, cam_left, 0.25, vpColor::none, 2);
587 vpDisplay::displayFrame(imageR_vp, cRThole_st, cam_right, 0.25, vpColor::none, 2);
590 Eigen::Matrix4d wTh_estimated_stereo_left =
592 robVisInfo.robotStruct.vTcameraL *
593 CONV::matrix_visp2eigen(cLThole_st);
595 Eigen::Matrix4d wTh_estimated_stereo_right =
597 robVisInfo.robotStruct.vTcameraR *
598 CONV::matrix_visp2eigen(cRThole_st);
601 CMAT::TransfMatrix wTholeEstimated_stereoL_cmat =
602 CONV::matrix_eigen2cmat(wTh_estimated_stereo_left);
603 CMAT::TransfMatrix wTholeEstimated_stereoR_cmat =
604 CONV::matrix_eigen2cmat(wTh_estimated_stereo_right);
607 CMAT::Vect6 swappedError2 =
608 CMAT::CartError(wThole_cmat, wTholeEstimated_stereoL_cmat);
610 error2.SetFirstVect3(swappedError2.GetSecondVect3());
611 error2.SetSecondVect3(swappedError2.GetFirstVect3());
612 logger.logNumbers(error2,
"errorStereoL");
615 CMAT::Vect6 swappedError3 =
616 CMAT::CartError(wThole_cmat, wTholeEstimated_stereoR_cmat);
618 error3.SetFirstVect3(swappedError3.GetSecondVect3());
619 error3.SetSecondVect3(swappedError3.GetFirstVect3());
620 logger.logNumbers(error3,
"errorStereoR");
622 vpDisplay::displayText(imageL_vp, 30, 10,
"A click to exit.", vpColor::red);
623 vpDisplay::flush(imageL_vp);
624 vpDisplay::flush(imageR_vp);
628 if (vpDisplay::getClick(imageL_vp,
false)) {
638 int stereoTracking(vpImage<unsigned char> I_left, vpImage<unsigned char> I_right,
639 vpMbGenericTracker *tracker,
640 vpHomogeneousMatrix *cLeftTo, vpHomogeneousMatrix *cRightTo){
643 tracker->track(I_left, I_right);
644 tracker->getPose(*cLeftTo, *cRightTo);
646 }
catch (
const vpException &e) {
647 std::cerr <<
"Catch a ViSP exception: " << e.what() << std::endl;
655 int initTracker(vpMbGenericTracker *tracker,
int nCameras, Eigen::Matrix4d cRTcL){
659 tracker->loadConfigFile(configFileL);
660 tracker->loadModel(caoModel);
664 tracker->loadConfigFile(configFileL, configFileR);
665 tracker->loadModel(caoModel, caoModel);
667 std::map<std::string, vpHomogeneousMatrix> mapCameraTransf;
668 mapCameraTransf[
"Camera1"] = vpHomogeneousMatrix();
670 mapCameraTransf[
"Camera2"] = CONV::transfMatrix_eigen2visp(cRTcL);
672 tracker->setCameraTransformationMatrix(mapCameraTransf);
677 std::cerr <<
"WRONG NUMBER OF TRACKERS\n";
679 tracker->setOgreVisibilityTest(
false);
680 tracker->setDisplayFeatures(
true);
688 void findSquares(
const cv::Mat& image, std::vector<std::vector<cv::Point> >& squares,
696 Mat pyr, timg, gray0(image.size(), CV_8U), gray;
698 pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
699 pyrUp(pyr, timg, image.size());
700 vector<vector<Point> > contours;
702 for(
int c = 0; c < 3; c++ )
705 mixChannels(&timg, 1, &gray0, 1, ch, 1);
707 for(
int l = 0; l < N; l++ )
715 Canny(gray0, gray, 0, thresh, 5);
718 dilate(gray, gray, Mat(), Point(-1,-1));
724 gray = gray0 >= (l+1)*255/N;
727 findContours(gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
728 vector<Point> approx;
730 for(
size_t i = 0; i < contours.size(); i++ )
734 approxPolyDP(contours[i], approx, arcLength(contours[i],
true)*0.02,
true);
741 if( approx.size() == 4 &&
742 fabs(contourArea(approx)) > 1000 &&
743 isContourConvex(approx) )
745 double maxCosine = 0;
746 for(
int j = 2; j < 5; j++ )
749 double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
750 maxCosine = MAX(maxCosine, cosine);
755 if( maxCosine < 0.3 )
756 squares.push_back(approx);
765 void drawSquares( cv::Mat& image,
const std::vector<std::vector<cv::Point> >& squares,
771 for(
size_t i = 0; i < squares.size(); i++ )
773 const Point* p = &squares[i][0];
774 int n = (int)squares[i].size();
775 std::cout <<
"number of point:" << n<<
"\n";
776 polylines(image, &p, &n, 1,
true, Scalar(0,255,0), 3, LINE_AA);
777 circle(image, squares[i][0], 5, Scalar(0,0,0), FILLED);
778 circle(image, squares[i][1], 5, Scalar(255,255,0), FILLED);
779 circle(image, squares[i][2], 5, Scalar(0,255,255), FILLED);
780 circle(image, squares[i][3], 5, Scalar(255,255,255), FILLED);
783 imshow(wndname, image);
795 double angle( cv::Point pt1, cv::Point pt2, cv::Point pt0 )
797 double dx1 = pt1.x - pt0.x;
798 double dy1 = pt1.y - pt0.y;
799 double dx2 = pt2.x - pt0.x;
800 double dy2 = pt2.y - pt0.y;
801 return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
804 int orderAngles(std::vector<std::vector<cv::Point>> angles, std::vector<std::vector<cv::Point>> *orderedAngles){
805 for (
int i=0; i< angles.size(); i++){
806 if (angles.at(i).size() != 4){
807 std::cerr <<
"Angles must be 4\n";
810 orderAngles(angles.at(i), &(orderedAngles->at(i)));
815 int orderAngles(std::vector<cv::Point> angles, std::vector<cv::Point> *orderedAngles){
817 if (angles.size() != 4){
818 std::cerr <<
"Angles must be 4\n";
822 cv::Point center = getCenter(angles);
824 orderedAngles->resize(4);
825 for (
int i=0; i<4; i++){
827 if (angles.at(i).x < center.x){
828 if (angles.at(i).y < center.y){
829 orderedAngles->at(0) = angles.at(i);
831 orderedAngles->at(3) = angles.at(i);
835 if (angles.at(i).y < center.y){
836 orderedAngles->at(1) = angles.at(i);
838 orderedAngles->at(2) = angles.at(i);
845 cv::Point getCenter(std::vector<cv::Point> points){
847 cv::Point A = points.at(0);
848 cv::Point B = points.at(1);
849 cv::Point C = points.at(2);
850 cv::Point D = points.at(3);
853 double a1 = A.x - A.y;
854 double b1 = A.x - B.x;
855 double c1 = a1*(A.x) + b1*(A.y);
858 double a2 = D.y - C.y;
859 double b2 = C.x - D.x;
860 double c2 = a2*(C.x)+ b2*(C.y);
862 double determinant = a1*b2 - a2*b1;
864 if (determinant == 0)
869 double x = (b2*c1 - b1*c2)/determinant;
870 double y = (a1*c2 - a2*c1)/determinant;
871 return cv::Point(x, y);
876 void MatchingMethod(
int match_method, cv::Mat img, cv::Mat templ,
877 cv::Point *bestMatch,
double *minMaxVal){
883 int result_cols = img.cols - templ.cols + 1;
884 int result_rows = img.rows - templ.rows + 1;
886 result.create( result_rows, result_cols, CV_32FC1 );
888 matchTemplate( img, templ, result, match_method);
890 double minVal;
double maxVal; Point minLoc; Point maxLoc;
891 minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc);
892 if( match_method == TM_SQDIFF || match_method == TM_SQDIFF_NORMED ) {
998 int objDetectionInit(vpImage<unsigned char> I, vpMbGenericTracker *tracker){
1002 vpCameraParameters cam;
1003 vpHomogeneousMatrix cMo;
1007 display.init(I, 300, 300,
"Model-based edge tracker");
1009 tracker->getCameraParameters(cam);
1013 vpKeyPoint keypoint_learning;
1014 keypoint_learning.loadConfigFile(configFileDetector);
1016 std::vector<cv::KeyPoint> trainKeyPoints;
1018 keypoint_learning.detect(I, trainKeyPoints, elapsedTime);
1020 vpDisplay::display(I);
1021 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
1022 vpDisplay::displayCross(I, (
int)it->pt.y, (
int)it->pt.x, 4, vpColor::red);
1024 vpDisplay::displayText(I, 10, 10,
"All Keypoints...", vpColor::red);
1025 vpDisplay::displayText(I, 30, 10,
"Click to continue with detection...", vpColor::red);
1026 vpDisplay::flush(I);
1027 vpDisplay::getClick(I,
true);
1029 std::vector<vpPolygon> polygons;
1030 std::vector<std::vector<vpPoint> > roisPt;
1031 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair
1032 = tracker->getPolygonFaces(
false);
1033 polygons = pair.first;
1034 roisPt = pair.second;
1035 std::vector<cv::Point3f> points3f;
1036 tracker->getPose(cMo);
1037 vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints,
1038 polygons, roisPt, points3f);
1039 keypoint_learning.buildReference(I, trainKeyPoints, points3f);
1040 keypoint_learning.saveLearningData(learnData,
true);
1043 vpDisplay::display(I);
1044 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
1045 vpDisplay::displayCross(I, (
int)it->pt.y, (
int)it->pt.x, 4, vpColor::red);
1047 vpDisplay::displayText(I, 10, 10,
"Keypoints only on block...", vpColor::red);
1048 vpDisplay::displayText(I, 30, 10,
"Click to continue with detection...", vpColor::red);
1049 vpDisplay::flush(I);
1050 vpDisplay::getClick(I,
true);
1053 }
catch (vpException &e) {
1054 std::cout <<
"Catch an exception: " << e << std::endl;
1062 int objDetection(vpImage<unsigned char> I, vpMbGenericTracker *tracker,
1063 vpKeyPoint *keypoint_detection, vpHomogeneousMatrix *cMo,
1064 double *error,
double* elapsedTime){
1068 vpCameraParameters cam;
1069 tracker->getCameraParameters(cam);
1071 if (keypoint_detection->matchPoint(I, cam, *cMo, *error, *elapsedTime)) {
1072 tracker->setPose(I, *cMo);
1073 tracker->display(I, *cMo, cam, vpColor::red, 2);
1076 }
catch (vpException &e) {
1077 std::cout <<
"Catch an exception: " << e << std::endl;
robotInterface: a ros node responsible of taken info from simulator and robot sensors, and of given commands back. It is the intermiate layer between robot and mission manager ("main") This "Vision" Version is specialized for a robot with no arm and horizontal cameras used to detect hole and help the insertion phase.
The WorldInterface class to get position from world to anything else.
Eigen::Matrix4d wTv_eigen
Transforms.