6
votes

I am a bit confused with the parameters of getPerspectiveTransform as I cannot see a proper image. Here is my code. The original_image variable is the image that contains a square object (and some others) that I want to crop and create a new image (something like this Android OpenCV Find Largest Square or Rectangle). The variables p1, p2, p3, and p4 are the coordinates of the corners of the largest square/rectangle in the image. p1 is the upper left, p2 is the upper right, p3 is the lower right, and p4 is the lower left (clockwise assigning).

    Mat src = new Mat(4,1,CvType.CV_32FC2);
    src.put((int)p1.y,(int)p1.x, (int)p2.y,(int)p2.x, (int)p4.y,(int)p4.x, (int)p3.y,(int)p3.x);
    Mat dst = new Mat(4,1,CvType.CV_32FC2);
    dst.put(0,0, 0,original_image.width(), original_image.height(),original_image.width(), original_image.height(),0);

    Mat perspectiveTransform = Imgproc.getPerspectiveTransform(src, dst);
    Mat cropped_image = original_image.clone();
    Imgproc.warpPerspective(untouched, cropped_image, perspectiveTransform, new Size(512,512));

When I try to display cropped_image, I get a "I don't know what it is" image. I think my parameters in getPerspectiveTransform() are incorrect (or is it). Please help. Thanks!

Update: When I debugged my code, I found out that the edges of my square/rectangle are incorrect, well some are quite right except for p4. This is my code to detect the edges of the square or rectangle in the image. My image is all black except for the contour of the largest square/rectangle which has a white outline.

    //we will find the edges of the new_image (corners of the square/rectangle)
    Point p1 = new Point(10000, 10000); //upper left; minX && minY
    Point p2 = new Point(0, 10000); //upper right; maxX && minY
    Point p3 = new Point(0, 0); //lower right; maxX && maxY
    Point p4 = new Point(10000, 0); //lower left; minX && maxY
    double[] temp_pixel_color;
    for (int x=0; x<new_image.rows(); x++) {
        for (int y=0; y<new_image.cols(); y++) {
            temp_pixel_color = new_image.get(x, y); //we have a black and white image so we only have one color channel
            if (temp_pixel_color[0] > 200) { //we found a white pixel
                if (x<=p1.x && y<=p1.y) { //for p1, minX && minY
                    p1.x = x;
                    p1.y = y;
                }
                else if (x>=p2.x && y<=p2.y) { //for p2, maxX && minY
                    p2.x = x;
                    p2.y = y;
                }
                else if (x>=p3.x && y>=p3.y) { //for p3, maxX && maxY
                    p3.x = x;
                    p3.y = y;
                }
                else if (x<=(int)p4.x && y>=(int)p4.y) { //for p4, minX && maxY
                    p4.x = x;
                    p4.y = y;
                }
            }
        }
    }

Here is my sample image. ignore the colored circles as they are drawn after the edges are detected:

enter image description here

Update: July 16, 2013 I can detect the corners now using only the approxCurve of the maximum 4-pointed contour. Here is my code:

private Mat findLargestRectangle(Mat original_image) {
    Mat imgSource = original_image;
    //Mat untouched = original_image.clone();

    //convert the image to black and white
    Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BGR2GRAY);

    //convert the image to black and white does (8 bit)
    Imgproc.Canny(imgSource, imgSource, 50, 50);

    //apply gaussian blur to smoothen lines of dots
    Imgproc.GaussianBlur(imgSource, imgSource, new Size(5, 5), 5);      

    //find the contours
    List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
    Imgproc.findContours(imgSource, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);

    double maxArea = -1;
    int maxAreaIdx = -1;
    MatOfPoint temp_contour = contours.get(0); //the largest is at the index 0 for starting point
    MatOfPoint2f approxCurve = new MatOfPoint2f();
    MatOfPoint2f maxCurve = new MatOfPoint2f();
    List<MatOfPoint> largest_contours = new ArrayList<MatOfPoint>();
    for (int idx = 0; idx < contours.size(); idx++) {
        temp_contour = contours.get(idx);
        double contourarea = Imgproc.contourArea(temp_contour);
        //compare this contour to the previous largest contour found
        if (contourarea > maxArea) {
            //check if this contour is a square
            MatOfPoint2f new_mat = new MatOfPoint2f( temp_contour.toArray() );
            int contourSize = (int)temp_contour.total();
            Imgproc.approxPolyDP(new_mat, approxCurve, contourSize*0.05, true);
            if (approxCurve.total() == 4) {
                maxCurve = approxCurve;
                maxArea = contourarea;
                maxAreaIdx = idx;
                largest_contours.add(temp_contour);
            }
        }
    }

    //create the new image here using the largest detected square
    Mat new_image = new Mat(imgSource.size(), CvType.CV_8U); //we will create a new black blank image with the largest contour
    Imgproc.cvtColor(new_image, new_image, Imgproc.COLOR_BayerBG2RGB);
    Imgproc.drawContours(new_image, contours, maxAreaIdx, new Scalar(255, 255, 255), 1); //will draw the largest square/rectangle

    double temp_double[] = maxCurve.get(0, 0);
    Point p1 = new Point(temp_double[0], temp_double[1]);
    Core.circle(new_image, new Point(p1.x, p1.y), 20, new Scalar(255, 0, 0), 5); //p1 is colored red
    String temp_string = "Point 1: (" + p1.x + ", " + p1.y + ")";

    temp_double = maxCurve.get(1, 0);
    Point p2 = new Point(temp_double[0], temp_double[1]);
    Core.circle(new_image, new Point(p2.x, p2.y), 20, new Scalar(0, 255, 0), 5); //p2 is colored green
    temp_string += "\nPoint 2: (" + p2.x + ", " + p2.y + ")";

    temp_double = maxCurve.get(2, 0);       
    Point p3 = new Point(temp_double[0], temp_double[1]);
    Core.circle(new_image, new Point(p3.x, p3.y), 20, new Scalar(0, 0, 255), 5); //p3 is colored blue
    temp_string += "\nPoint 3: (" + p3.x + ", " + p3.y + ")";

    temp_double = maxCurve.get(3, 0);
    Point p4 = new Point(temp_double[0], temp_double[1]);
    Core.circle(new_image, new Point(p4.x, p4.y), 20, new Scalar(0, 255, 255), 5); //p1 is colored violet
    temp_string += "\nPoint 4: (" + p4.x + ", " + p4.y + ")";

    TextView temp_text = (TextView)findViewById(R.id.temp_text);
    temp_text.setText(temp_string);

    return new_image;
}

Here is the sample result image:

enter image description here

I have drawn circles for the corners of the square/rectangle and I also added a textview to display all the four points.

1
OK, I will go over your code when I have time, but until then I'm gonna tell you a simpler approach. If you use findContours with method "CV_CHAIN_APPROX_SIMPLE", you can get the corner points right away.baci
Also, those are not "edges", those are "corners" :)baci
are you sure "src" and "dst" are created correctly? I think it would be better if you use a "Point2f" array, instead of Mat.baci
also, do not look for "white" pixel. Just use the contour. Check all contour points, save the point having minimum x.baci
thanks Baci... I don't think Java/Android has a Point2f data type. And in Java or Android, the Imgproc.getPerspectiveTransform() accepts two Mat parameters. Am I missing something? thanks againJames Arnold

1 Answers

3
votes

This worked for me. in the src_mat.put you should have 0,0 at first and then the float values for the coordinates.

    Mat mat=Highgui.imread("inputImage.jpg");
    Mat src_mat=new Mat(4,1,CvType.CV_32FC2);
    Mat dst_mat=new Mat(4,1,CvType.CV_32FC2);


    src_mat.put(0,0,407.0,74.0,1606.0,74.0,420.0,2589.0,1698.0,2589.0);
    dst_mat.put(0,0,0.0,0.0,1600.0,0.0, 0.0,2500.0,1600.0,2500.0);
    Mat perspectiveTransform=Imgproc.getPerspectiveTransform(src_mat, dst_mat);

    Mat dst=mat.clone();

    Imgproc.warpPerspective(mat, dst, perspectiveTransform, new Size(1600,2500));
    Highgui.imwrite("resultImage.jpg", dst);