2
votes

Im using two point Grey Chameleon3 mono camera set up as Master Salve and synchronized so can work as stereo camera. Then was following the OpenCV tutorial to calibrate the cameras. I got 70 samples for the calibration and got the Calibration results. I have all Camera parameters as have access to them and obtained intrinsic and extrinsic parameters from calibration

So I took the left and right images, undistort them using the camera matrix and distortion coefficients, and rectify them into epipolar form using the translation and rotation matrices. Then used use calib3d module's StereoSGBM class to create a disparity map. Here is my code

#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <vector>
#include <string>
#include <iostream>
#include <iterator>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <math.h>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d.hpp>

using namespace cv;
using namespace std;

int main( int argc, char** argv )
{
     int i, j, lr;

    String imageName_left( "/home/OpenCVCode/left_7m.pgm" ); // by default
        if( argc > 1)
        {
        imageName_left = argv[1];
        }

        Mat image_left;
        image_left = imread( imageName_left, IMREAD_COLOR ); // Read the file
        if( image_left.empty() )                      // Check for invalid input
        {
        cout <<  "Could not open or find the image" << std::endl ;
        return -1;
        }

    String imageName_right( "/home/OpenCVCode/right_7m.pgm" ); // by default
        if( argc > 1)
        {
        imageName_right = argv[1];
        }

        Mat image_right;
        image_right = imread( imageName_right, IMREAD_COLOR ); // Read the file
        if( image_right.empty() )                      // Check for invalid input
        {
        cout <<  "Could not open or find the image" << std::endl ;
        return -1;
        }
    cv::Size imageSize;
    imageSize = image_left.size();
    //Mat outputImage = image.clone();

    vector<cv::Point3f> lines[2];
    //double err = 0;

    Mat R1=Mat1d(3, 3);
    Mat R2=Mat1d(3, 3);
    Mat P1=Mat1d(3, 4);
    Mat P2=Mat1d(3, 4);
    Mat Q=Mat1d(4, 4);
    Rect validRoi[2];

    double R_data[] = {0.9997286422545486, 0.000835337139108146, -0.023279692174526096,
           0.0008925647184101439, 0.9998880281815514, -0.00244797956076857,
           0.02327756826002481, 0.0024680939144444804, 0.9997259941245548};
    double T_data[] = {-0.13808630092854335, -0.0016795993989732654, -0.005964101318274714};

    cv::Mat R(3, 3, CV_64F, R_data);
    cv::Mat T(3, 1, CV_64F, T_data);

    Mat Camera_Matrix_Left = (Mat1d(3, 3) << 446.441726, 0, 266.768096, 0, 448.805499, 186.652251, 0, 0, 1);
    Mat Distortion_Coefficients_Left = (Mat1d(1, 5) << -0.174502, 0.079787, 0.001010, 0.000899, 0);
    Mat Camera_Matrix_Right = (Mat1d(3, 3) << 440.825465, 0, 277.733688, 0, 442.324307, 198.863384, 0, 0, 1);
    Mat Distortion_Coefficients_Right = (Mat1d(1, 5) << -0.226564, 0.290791, 0.000979, 0.003149, 0);

    Mat Matrix_R = (Mat1d(3, 3) << 0.9997286422545486, 0.000835337139108146, -0.023279692174526096,
           0.0008925647184101439, 0.9998880281815514, -0.00244797956076857,
           0.02327756826002481, 0.0024680939144444804, 0.9997259941245548);
    Mat Matrix_T = (Mat1d(3, 1) << -0.13808630092854335, -0.0016795993989732654, -0.005964101318274714);

            //undistort(image, outputImage, Camera_Matrix, Distortion_Coefficients);
        stereoRectify(Camera_Matrix_Left, Distortion_Coefficients_Left, Camera_Matrix_Right, Distortion_Coefficients_Right, image_left.size(), R, T, R1, R2, P1, P2, Q, 0, 1, imageSize, &validRoi[0]);
                    FileStorage fs1("4m2.yml", FileStorage::WRITE);
                    fs1 << "R1" << R1;
                    fs1 << "R2" << R2;
                    fs1 << "P1" << P1;
                    fs1 << "P2" << P2;
                    fs1 << "Q" << Q;
                    fs1.release();



        // Maps for AP View
        //cv::stereoRectify(camera_matrix_ap, distortion_ap, camera_matrix_lat, distortion_lat, rectimg_ap.size(), R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, 1, rectimg_ap.size(), &validRoi[0], &validRoi[1] );
        Mat map1x(image_left.size(), CV_32FC1, 255.0);
        Mat map2x(image_left.size(), CV_32FC1, 255.0);
        // Maps for LAT View
        Mat map1y(image_right.size(), CV_32FC1, 255.0);
        Mat map2y(image_right.size(), CV_32FC1, 255.0);

        cv::initUndistortRectifyMap(Camera_Matrix_Left, Distortion_Coefficients_Left, R1, P1, image_left.size(), CV_32FC1, map1x, map1y);
        cv::initUndistortRectifyMap(Camera_Matrix_Right, Distortion_Coefficients_Right, R2, P2, image_right.size(), CV_32FC1, map2x, map2y);

        cv::Mat tmp1, tmp2;
        cv::remap(image_left, tmp1, map1x, map1y, INTER_LINEAR);
        cv::remap(image_right, tmp2, map2x, map2y, INTER_LINEAR);
        namedWindow( "Display window1", WINDOW_AUTOSIZE ); // Create a window for display.
            imshow( "Display window1", tmp2);                // Show our image inside it.
        namedWindow( "Display window2", WINDOW_AUTOSIZE ); // Create a window for display.
            imshow( "Display window2", tmp2 );   

        //Dispaity Map
        cv::Mat pair; 
        pair.create(imageSize.height, imageSize.width * 2, CV_8UC3);
        cv::Ptr<cv::StereoSGBM> stereo = cv::StereoSGBM::create(
        -64, 128, 11, 100, 1000, 32, 0, 15, 1000, 16, cv::StereoSGBM::MODE_HH);

        cv::Mat img1 = cv::imread(imageName_left, 0);
        cv::Mat img2 = cv::imread(imageName_right, 0);
        cv::Mat img1r, img2r, disp, vdisp;
        cv::remap(img1, img1r, map1x, map1y, cv::INTER_LINEAR);
        cv::remap(img2, img2r, map2x, map2y, cv::INTER_LINEAR);
        stereo->compute(img1r, img2r, disp);
        cv::normalize(disp, vdisp, 0, 256, cv::NORM_MINMAX, CV_8U);
        cv::imshow("Vertical disparity", vdisp);

        cv::Mat part = pair.colRange(0, imageSize.width);
        cvtColor(img1r, part, cv::COLOR_GRAY2BGR);
        part = pair.colRange(imageSize.width, imageSize.width * 2);
        cvtColor(img2r, part, cv::COLOR_GRAY2BGR);
        for (j = 0; j < imageSize.height; j += 16)
        cv::line(pair, cv::Point(0, j), cv::Point(imageSize.width * 2, j),cv::Scalar(0, 255, 0));       
        cv::imshow("Vertical rectified", pair);


    waitKey(); // Wait for a keystroke in the window
        //return 0;

}

Here are the left and right raw images. Left Image Right Image

Here the link to the left left raw image and to the right image right raw image

Rectified Images and disparity Map disparity Map

I have the rectified images and disparity map. I want to compute the real distance from the camera to the chessboard in m. How can be done ? I know the ground true distance from the camera to that object but would like to see how accurate is my camera. Any help? Thanks

1

1 Answers

1
votes

Since you did all the hard work and got your disparity map, calculating depth from here is the easy part. To calculate depth of each pixel, you need three things:
1- Disparity value of pixel
2- Distance between your cameras
3- Focal length (if for some reason your cameras have different focal length, you can use average)

Depth = (focal length * distance between cameras) / (disparity value)

Apply this equation to each pixel of your disparity map. If units of both your focal length and disparity value are pixels, you are left with the unit of distance between cameras (cm, mm...)

//  assuming dmap is the single channel disparity map Mat
//  f:focal length, b:distance between cameras
depthImage = cv::Mat(dmap.size(), CV_32FC1);
for (int row = 0; row < dmap.rows; ++row)
{
    for (int col = 0; col < dmap.cols; ++col)
    {
        int disparityValue = (int) dmap.at<uchar>(row, col);
        depthImage.at<float>(row, col) = (f * b) / disparityValue;
    }
}

Or you can just use available opencv function for that, probably better to do so.