I tried using the code to find the disparity given in the link: link
However the disparity map appears to be wrong in some areas. Some objects far from the camera appear to be brighter than the closer objects. I tried to calculate the actual depth by multiplying the disparity value by calibration matrix Q. The depth calculated are way off from the real world measured values(off by 20-25 cm). I am confident that the matrix Q is correct since my rectified image seems to be good. My square size value for calibration was also accurate (0.05 meters). My disparity images are attached:
This is the added code for calculating the actual depth from the filtered disparity image stored in filtered_disp_vis.
fs1["Q"] >> Q;
Mat Image;
Mat V = Mat(4, 1, CV_64FC1);
Mat pos = Mat(4, 1, CV_64FC1);
vector< Point3d > points;
//float fMaxDistance = static_cast<float>((1. / Q.at<double>(3, 2)) *
Q.at<double>(2, 3));
//filtered_disp_vis.convertTo(filtered_disp_vis, CV_64FC1, 1.0 / 16.0, 0.0);
//imshow("filtered disparity", filtered_disp_vis);
// outputDisparityValue is single 16-bit value from disparityMap
// DISP_SCALE = 16
//float fDisparity = outputDisparityValue /
(float)StereoMatcher::DISP_SCALE;
//float fDistance = fMaxDistance / fDisparity;
reprojectImageTo3D(filtered_disp_vis, Image, Q, false, CV_32F);
//cout << Image;
for (int i = 0; i < filtered_disp_vis.cols; i++)
{
for (int j = 0; j < filtered_disp_vis.rows; j++)
{
int d = filtered_disp_vis.at<uchar>(j, i);
//filtered_disp_vis.convertTo(filtered_disp_vis, CV_32F, 1.0 / 16.0, 0.0);
//int l = img_left.at<uchar>(j, i);
//cout << "(" << j << "," << i << ")" << "=" << d;
//out << endl;
// if low disparity, then ignore
/*if (d < 2) {
continue;
}*/
// V is the vector to be multiplied to Q to get
// the 3D homogenous coordinates of the image point
V.at<double>(0, 0) = (double)(i);
V.at<double>(1, 0) = (double)(j);
V.at<double>(2, 0) = (double)d;
V.at<double>(3, 0) = 1.;
pos = Q * V; // 3D homogeneous coordinate
double X = pos.at<double>(0, 0) / pos.at<double>(3, 0);
double Y = pos.at<double>(1, 0) / pos.at<double>(3, 0);
double Z = pos.at<double>(2, 0) / pos.at<double>(3, 0);
if (i == 446 && j == 362)
{
cout << "(" << j << "," << i << ")" << " = ";
cout << X << " " << Y << " " << Z << " " << d;
cout << endl;
}
Mat point3d_cam = Mat(3, 1, CV_64FC1);
point3d_cam.at<double>(0, 0) = X;
point3d_cam.at<double>(1, 0) = Y;
point3d_cam.at<double>(2, 0) = Z;
// transform 3D point from camera frame to robot frame
//Mat point3d_robot = XR * point3d_cam + XT;
points.push_back(Point3d(point3d_cam));
}
Where am I going wrong? Any modifications to my snippet or different recommendations to get proper disparity maps with accurate depth values will be appreciated.