1 #include "header/detector.h" 29 int threshLevels,
int cannyThresh){
31 std::vector<std::vector<cv::Point> > squares;
35 Detector::_findSquares(image, squares, threshLevels, cannyThresh);
39 found4CornersVector->resize(squares.size());
40 Detector::orderAngles(squares, found4CornersVector);
61 for(
size_t i = 0; i < squares.size(); i++ )
63 const Point* p = &squares[i][0];
64 int n = (int)squares[i].size();
66 polylines(image, &p, &n, 1,
true, Scalar(0,255,0), 2, LINE_AA);
67 circle(image, squares[i][0], 5, Scalar(0,0,0), FILLED);
68 circle(image, squares[i][1], 5, Scalar(255,255,0), FILLED);
69 circle(image, squares[i][2], 5, Scalar(0,255,255), FILLED);
70 circle(image, squares[i][3], 5, Scalar(255,255,255), FILLED);
73 imshow(wndname, image);
78 void Detector::_findSquares(
const cv::Mat& image, std::vector<std::vector<cv::Point> >& squares,
86 Mat pyr, timg, gray0(image.size(), CV_8U), gray;
88 pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
89 pyrUp(pyr, timg, image.size());
90 vector<vector<Point> > contours;
92 int channels = image.channels();
94 for(
int c = 0; c < channels; c++ )
97 mixChannels(&timg, 1, &gray0, 1, ch, 1);
99 for(
int l = 0; l < N; l++ )
107 Canny(gray0, gray, 0, thresh, 5);
110 dilate(gray, gray, Mat(), Point(-1,-1));
116 gray = gray0 >= (l+1)*255/N;
119 findContours(gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
120 vector<Point> approx;
122 for(
size_t i = 0; i < contours.size(); i++ )
126 approxPolyDP(contours[i], approx, arcLength(contours[i],
true)*0.02,
true);
133 if( approx.size() == 4 &&
134 fabs(contourArea(approx)) > 1000 &&
135 isContourConvex(approx) )
137 double maxCosine = 0;
138 for(
int j = 2; j < 5; j++ )
141 double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
142 maxCosine = MAX(maxCosine, cosine);
147 if( maxCosine < 0.3 )
148 squares.push_back(approx);
167 double Detector::angle( cv::Point pt1, cv::Point pt2, cv::Point pt0 )
169 double dx1 = pt1.x - pt0.x;
170 double dy1 = pt1.y - pt0.y;
171 double dx2 = pt2.x - pt0.x;
172 double dy2 = pt2.y - pt0.y;
173 return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
176 int Detector::orderAngles(std::vector<std::vector<cv::Point>> angles, std::vector<std::vector<cv::Point>> *orderedAngles){
177 for (
int i=0; i< angles.size(); i++){
178 if (angles.at(i).size() != 4){
179 std::cerr <<
"[DETECTOR] orderAngles Angles must be 4\n";
182 orderAngles(angles.at(i), &(orderedAngles->at(i)));
187 int Detector::orderAngles(std::vector<cv::Point> angles, std::vector<cv::Point> *orderedAngles){
189 if (angles.size() != 4){
190 std::cerr <<
"[DETECTOR] orderAngles Angles must be 4\n";
195 cv::Point center = getCenter(angles);
196 orderedAngles->resize(4);
197 for (
int i=0; i<4; i++){
199 if (angles.at(i).x < center.x){
200 if (angles.at(i).y < center.y){
201 orderedAngles->at(0) = angles.at(i);
203 orderedAngles->at(3) = angles.at(i);
207 if (angles.at(i).y < center.y){
208 orderedAngles->at(1) = angles.at(i);
211 orderedAngles->at(2) = angles.at(i);
219 cv::Point Detector::getCenter(std::vector<cv::Point> points){
221 cv::Point A = points.at(0);
222 cv::Point B = points.at(2);
223 cv::Point C = points.at(3);
224 cv::Point D = points.at(1);
227 double a1 = A.x - A.y;
228 double b1 = A.x - B.x;
229 double c1 = a1*(A.x) + b1*(A.y);
232 double a2 = D.y - C.y;
233 double b2 = C.x - D.x;
234 double c2 = a2*(C.x)+ b2*(C.y);
236 double determinant = a1*b2 - a2*b1;
238 if (determinant == 0)
243 double x = (b2*c1 - b1*c2)/determinant;
244 double y = (a1*c2 - a2*c1)/determinant;
245 return cv::Point(x, y);
270 std::vector<std::vector<cv::Point>> *found4CornersVector, std::vector<double> *bestValues,
271 int templ_method, std::vector<double> scaleFactors,
bool showDisplay){
273 for (
int i=0; i< templVector.size(); i++){
276 templ_method, scaleFactors, showDisplay);
316 std::vector<cv::Point> *found4Corners,
double* bestValue,
318 std::vector<double> scaleFactors,
bool showDisplay){
321 if (img.channels() == 1){
322 cv::cvtColor(img, img, cv::COLOR_GRAY2BGR);
325 const char* image_window;
328 image_window =
"Template Matching result";
329 img_display = img.clone();
330 cv::namedWindow( image_window, cv::WINDOW_AUTOSIZE );
333 if (scaleFactors.size() == 0){
334 scaleFactors = {0.75, 0.6, 0.65, 0.5,
335 0.48, 0.45, 0.42, 0.4, 0.38, 0.38, 0.32, 0.3,
336 0.28, 0.25, 0.24, 0.23, 0.22, 0.21, 0.20, 0.19, 0.18, 0.15, 0.1};
340 std::vector<double> minMaxValue(scaleFactors.size()),
341 scaleUpY(scaleFactors.size()),
342 scaleUpX(scaleFactors.size());
344 std::vector<cv::Point> bestMatch(scaleFactors.size());
345 cv::Mat imgScaled = img.clone();
349 int lastScale=scaleFactors.size();
350 for (
int i=0; i<scaleFactors.size(); i++){
352 cv::resize(img, imgScaled, cv::Size(), scaleFactors[i], scaleFactors[i]);
357 scaleUpY[i] = ((double)img.rows) / ((double)imgScaled.rows);
358 scaleUpX[i] = ((double)img.cols) / ((double)imgScaled.cols);
361 if (imgScaled.rows < templ.rows || imgScaled.cols < templ.cols){
368 Detector::MatchingMethod(templ_method, imgScaled, templ, &(bestMatch[i]), &(minMaxValue[i]));
396 if( templ_method == cv::TM_SQDIFF || templ_method == cv::TM_SQDIFF_NORMED ){
397 indexBest = std::distance(minMaxValue.begin(),
398 std::min_element(minMaxValue.begin(),
399 minMaxValue.begin() + lastScale));
402 indexBest = std::distance(minMaxValue.begin(),
403 std::max_element(minMaxValue.begin(),
404 minMaxValue.begin() + lastScale));
407 std::cout <<
"[DETECTOR][TEMPLATE_MATCHING] BEST ITERATION: scaling factor " << scaleFactors[indexBest]
408 <<
"\n \t with value: " << minMaxValue.at(indexBest) <<
"\n";
410 cv::Point topLeft, bottomRight;
411 topLeft.x = (int)(bestMatch[indexBest].x * scaleUpX[indexBest]);
412 topLeft.y = (int)(bestMatch[indexBest].y * scaleUpY[indexBest]);
413 bottomRight.x = (int)( (bestMatch[indexBest].x + templ.cols) * scaleUpX[indexBest]);
414 bottomRight.y = (int)( (bestMatch[indexBest].y + templ.rows) * scaleUpY[indexBest]);
418 cv::rectangle( img_display, topLeft, bottomRight,
419 cv::Scalar(0,255,0), 1, 8, 0);
420 cv::imshow( image_window, img_display);
424 found4Corners->resize(4);
425 found4Corners->at(0) = topLeft;
426 found4Corners->at(1) = cv::Point(bottomRight.x, topLeft.y);
427 found4Corners->at(2) = bottomRight;
428 found4Corners->at(3) = cv::Point(topLeft.x, bottomRight.y);
430 if (bestValue != NULL){
431 *bestValue = minMaxValue.at(indexBest);
439 void Detector::MatchingMethod(
int match_method, cv::Mat img, cv::Mat templ,
440 cv::Point *bestMatch,
double *minMaxVal){
446 int result_cols = img.cols - templ.cols + 1;
447 int result_rows = img.rows - templ.rows + 1;
449 result.create( result_rows, result_cols, CV_32FC1 );
451 matchTemplate( img, templ, result, match_method);
453 double minVal;
double maxVal; Point minLoc; Point maxLoc;
454 minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc);
455 if( match_method == TM_SQDIFF || match_method == TM_SQDIFF_NORMED ) {
static int findSquare(cv::Mat &image, std::vector< std::vector< cv::Point >> *found4CornersVector, int threshLevels=11, int cannyThresh=50)
Detector::findSquare find all square in images, exploiting functions of opencv. In practice is a blob...
static void drawSquares(cv::Mat image, const std::vector< std::vector< cv::Point > > squares, const char *wndname="Square Detection Demo")
Detector::drawSquares function to draw square in images.
static int templateMatching(cv::Mat img, std::vector< cv::Mat > templVector, std::vector< std::vector< cv::Point >> *found4CornersVector, std::vector< double > *bestValues, int templ_method=cv::TM_SQDIFF, std::vector< double > scaleFactors=std::vector< double >(), bool showDisplay=true)
Detector::templateMatching check function below, this one is used if multiple templates want be used...