OpenCV 2.4.1-2.4.4를 사용하여 사각형 탐지를 포트하려했지만 결과가 매우 느리게 보입니다. 나는 주어진 새로운 기능 때문에 OpenCV의 새로운 버전으로 옮기기를 열망했지만 매우 느린 결과를 얻고있다.WebCam 대 OpenCV 2.1.X와의 사각형 감지에서 OpenCV 2.4.X 속도가 느려짐
버전 2.4.x에서 대한 나의 OpenCV의 코드는 다음과 같습니다
이// The "Square Detector" program.
// It loads several images sequentially and tries to find squares in
// each image
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <math.h>
#include <string.h>
using namespace cv;
using namespace std;
int thresh = 50, N = 11;
const char* wndname = "Square Detection Demo";
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
static double angle(Point pt1, Point pt2, Point pt0)
{
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
static void findSquares(const Mat& image, vector<vector<Point> >& squares)
{
squares.clear();
Mat pyr, timg, gray0(image.size(), CV_8U), gray;
// down-scale and upscale the image to filter out the noise
pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
pyrUp(pyr, timg, image.size());
vector<vector<Point> > contours;
// find squares in every color plane of the image
for(int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&timg, 1, &gray0, 1, ch, 1);
// try several threshold levels
for(int l = 0; l < N; l++)
{
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if(l == 0)
{
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
Canny(gray0, gray, 0, thresh, 5);
// dilate canny output to remove potential
// holes between edge segments
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
// apply threshold if l!=0:
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
gray = gray0 >= (l+1)*255/N;
}
// find contours and store them all as a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for(size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if(approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)))
{
double maxCosine = 0;
for(int j = 2; j < 5; j++)
{
// find the maximum cosine of the angle between joint edges
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if(maxCosine < 0.3)
squares.push_back(approx);
}
}
}
}
}
// the function draws all the squares in the image
static void drawSquares(Mat& image, const vector<vector<Point> >& squares)
{
for(size_t i = 0; i < squares.size(); i++)
{
const Point* p = &squares[i][0];
int n = (int)squares[i].size();
polylines(image, &p, &n, 1, true, Scalar(0,255,0), 3, CV_AA);
}
imshow(wndname, image);
}
int main()
{
VideoCapture cap;
cap.open(0);
Mat frame,image;
namedWindow("Square Detection Demo", 1);
vector<vector<Point> > squares;
for(;;)
{
cap >> frame;
if(frame.empty()){
break;
}
frame.copyTo(image);
if(image.empty())
{
cout << "Couldn't load image" << endl;
continue;
}
findSquares(image, squares);
drawSquares(image, squares);
//imshow("Window", image);
int c = waitKey(1);
if((char)c == 27)
break;
}
return 0;
}
당신은 코드가 OpenCV의 2.4.X.하여 웹캠 시각화의 단순한 혼합 제공된 사각형 코드 모두는 것을 알 수 있습니다 그러나
, 지금 넣어 것입니다 OpenCV의 버전 2.1에 해당하는 코드는 훨씬 빠릅니다 :
#include <cv.h>
#include <highgui.h>
int thresh = 50;
IplImage* img = 0;
IplImage* img0 = 0;
CvMemStorage* storage = 0;
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
double angle(CvPoint* pt1, CvPoint* pt2, CvPoint* pt0)
{
double dx1 = pt1->x - pt0->x;
double dy1 = pt1->y - pt0->y;
double dx2 = pt2->x - pt0->x;
double dy2 = pt2->y - pt0->y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
CvSeq* findSquares4(IplImage* img, CvMemStorage* storage)
{
CvSeq* contours;
int i, c, l, N = 11;
CvSize sz = cvSize(img->width & -2, img->height & -2);
IplImage* timg = cvCloneImage(img); // make a copy of input image
IplImage* gray = cvCreateImage(sz, 8, 1);
IplImage* pyr = cvCreateImage(cvSize(sz.width/2, sz.height/2), 8, 3);
IplImage* tgray;
CvSeq* result;
double s, t;
// create empty sequence that will contain points -
// 4 points per square (the square's vertices)
CvSeq* squares = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvPoint), storage);
// select the maximum ROI in the image
// with the width and height divisible by 2
cvSetImageROI(timg, cvRect(0, 0, sz.width, sz.height));
//cvSetImageROI(timg, cvRect(0,0,50, 50));
// down-scale and upscale the image to filter out the noise
cvPyrDown(timg, pyr, 7);
cvPyrUp(pyr, timg, 7);
tgray = cvCreateImage(sz, 8, 1);
// find squares in every color plane of the image
for(c = 0; c < 3; c++)
{
// extract the c-th color plane
cvSetImageCOI(timg, c+1);
cvCopy(timg, tgray, 0);
// try several threshold levels
for(l = 0; l < N; l++)
{
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if(l == 0)
{
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
cvCanny(tgray, gray, 0, thresh, 5);
// dilate canny output to remove potential
// holes between edge segments
cvDilate(gray, gray, 0, 1);
}
else
{
// apply threshold if l!=0:
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
cvThreshold(tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY);
}
// find contours and store them all as a list
cvFindContours(gray, storage, &contours, sizeof(CvContour),
CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
// test each contour
while(contours)
{
// approximate contour with accuracy proportional
// to the contour perimeter
result = cvApproxPoly(contours, sizeof(CvContour), storage,
CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if(result->total == 4 &&
cvContourArea(result,CV_WHOLE_SEQ,0) > 1000 &&
cvCheckContourConvexity(result))
{
s = 0;
for(i = 0; i < 5; i++)
{
// find minimum angle between joint
// edges (maximum of cosine)
if(i >= 2)
{
t = fabs(angle(
(CvPoint*)cvGetSeqElem(result, i),
(CvPoint*)cvGetSeqElem(result, i-2),
(CvPoint*)cvGetSeqElem(result, i-1)));
s = s > t ? s : t;
}
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if(s < 0.3)
for(i = 0; i < 4; i++)
cvSeqPush(squares,
(CvPoint*)cvGetSeqElem(result, i));
}
// take the next contour
contours = contours->h_next;
}
}
}
// release all the temporary images
cvReleaseImage(&gray);
cvReleaseImage(&pyr);
cvReleaseImage(&tgray);
cvReleaseImage(&timg);
return squares;
}
// the function draws all the squares in the image
void drawSquares(IplImage* img, CvSeq* squares)
{
CvSeqReader reader;
IplImage* cpy = cvCloneImage(img);
int i;
// initialize reader of the sequence
cvStartReadSeq(squares, &reader, 0);
// read 4 sequence elements at a time (all vertices of a square)
for(i = 0; i < squares->total; i += 4)
{
CvPoint pt[4], *rect = pt;
int count = 4;
// read 4 vertices
CV_READ_SEQ_ELEM(pt[0], reader);
CV_READ_SEQ_ELEM(pt[1], reader);
CV_READ_SEQ_ELEM(pt[2], reader);
CV_READ_SEQ_ELEM(pt[3], reader);
// draw the square as a closed polyline
cvPolyLine(cpy, &rect, &count, 1, 1, CV_RGB(0,255,0), 3, CV_AA, 0);
}
// show the resultant image
cvShowImage("Squares", cpy);
cvReleaseImage(&cpy);
}
int main(int argc, char** argv){
// Crea una ventana llamada Original Image con un tamaño predeterminado.
cvNamedWindow("Original Image", CV_WINDOW_AUTOSIZE);
cvNamedWindow("Squares", CV_WINDOW_AUTOSIZE);
// Crea la conexion con la Webcam.
CvCapture* capture = cvCreateCameraCapture(0);
if(!capture){
throw "Error when reading steam_avi";
}
storage = cvCreateMemStorage(0);
while(true)
{
// Pongo el frame capturado dentro de la imagen originalImg.
img0 = cvQueryFrame(capture);
if(!img0){
break;
}
img = cvCloneImage(img0);
// find and draw the squares
drawSquares(img, findSquares4(img, storage));
cvShowImage("Original Image", img0);
cvReleaseImage(&img);
// clear memory storage - reset free space position
cvClearMemStorage(storage);
// Espero a que me pulsen el ESC para salir del bucle infinito.
char c = cvWaitKey(10);
if(c == 27) break;
}
//cvReleaseImage(&img);
cvReleaseImage(&img0);
// clear memory storage - reset free space position
cvClearMemStorage(storage);
// Destruye la ventana “Original Image”.
cvDestroyWindow("Original Image");
cvDestroyWindow("Squares");
// Libera la memoria utilizada por la variable capture.
cvReleaseCapture(&capture);
}
내가 다른 PARAMS을 X3 속도를, 변경 한 색상 채널을 사용할 수 있음을 알고있다 속도를 높이기 위해 동일한 코드가 다른 실행 시간을주는 이유를 생각해보십시오.
¿ 누락 된 기본 사항이 있습니까? OpenCV의 2.4.x에서 속도가 느린 난이 도움이되기를 바랍니다, 또는 경우 ...
을 :
나는 모두가 시도하는 등의 막연한 질문 시간 anybodys을 낭비하지 않도록, 작업 코드까지를 넣어 시도 누구나 동일한 행동을 관찰했습니다.
OpenCV를 사용하여 캡처 및 처리하는 것처럼 보입니다. 어느 쪽이 느린 지 아십니까? 마찬가지로, 처리 비트를 주석 처리하고 캡처 절반이 예상 속도로 실행되는지 여부를 알 수 있습니까? –
안녕하세요, Chris 님, 먼저 답장을 보내 주셔서 감사합니다. 캡처 절반은 정상적으로 작동합니다. 코드를 시도 할 때 OpenCV2.4.4에서 느린 부분은 Canny (회색 0, 회색, 0, thresh, 5)입니다. 다음과 같이 논평하면 합리적인 속도로 진행되므로주의를 집중해야한다면이 시점에 있어야합니다. 코드에 주석을 달고 간단한 복사본을 만들었으므로 다음과 같이 작동합니다. // Canny (gray0, gray, 0, thresh, 5); \t \t \t \t \t gray0.copyto (회색); 그러나 이제 작은 프레임이 표시되지만 전체 프레임을 가능한 사각형으로 가져옵니다. – dlisin