To improve the results of your disparity map, you can implement post-filtering, here is a tutorial (https://docs.opencv.org/master/d3/d14/tutorial_ximgproc_disparity_filtering.html). I used also an extra speckle filter and the option to fill in missing disparities. The python implementation is as follows:
stereoProcessor = cv2.StereoSGBM_create(
minDisparity=0,
numDisparities = max_disparity, # max_disp has to be dividable by 16 f. E. HH 192, 256
blockSize=window_size,
P1 = p1, # 8*number_of_image_channels*SADWindowSize*SADWindowSize
P2 = p2, # 32*number_of_image_channels*SADWindowSize*SADWindowSize
disp12MaxDiff=disp12Maxdiff,
uniquenessRatio= uniquenessRatio,
speckleWindowSize=speckle_window,
speckleRange=speckle_range,
preFilterCap=prefiltercap,
# mode=cv2.STEREO_SGBM_MODE_HH# numDisparities = max_disparity, # max_disp has to be dividable by 16 f. E. HH 192, 256
)
#stereoProcessor = cv2.StereoBM_create(numDisparities=16, blockSize=15)
# set up left to right + right to left left->right + right->left matching +
# weighted least squares filtering (not used by default)
left_matcher = stereoProcessor
right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)
#Image information
height, width, channels = I.shape
frameL= I[:,0:int(width/2),:]
frameR = I[:,int(width/2):width,:]
# remember to convert to grayscale (as the disparity matching works on grayscale)
grayL = cv2.cvtColor(frameL,cv2.COLOR_BGR2GRAY)
grayR = cv2.cvtColor(frameR,cv2.COLOR_BGR2GRAY)
# perform preprocessing - raise to the power, as this subjectively appears
# to improve subsequent disparity calculation
grayL = np.power(grayL, 0.75).astype('uint8')
grayR = np.power(grayR, 0.75).astype('uint8')
# compute disparity image from undistorted and rectified versions
# (which for reasons best known to the OpenCV developers is returned scaled by 16)
if (wls_filter):
wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)
wls_filter.setLambda(wls_lambda)
wls_filter.setSigmaColor(wls_sigma)
displ = left_matcher.compute(cv2.UMat(grayL),cv2.UMat(grayR)) # .astype(np.float32)/16
dispr = right_matcher.compute(cv2.UMat(grayR),cv2.UMat(grayL)) # .astype(np.float32)/16
displ = np.int16(cv2.UMat.get(displ))
dispr = np.int16(cv2.UMat.get(dispr))
disparity = wls_filter.filter(displ, grayL, None, dispr)
else:
disparity_UMat = stereoProcessor.compute(cv2.UMat(grayL),cv2.UMat(grayR))
disparity = cv2.UMat.get(disparity_UMat)
speckleSize = math.floor((width * height) * 0.0005)
maxSpeckleDiff = (8 * 16) # 128
cv2.filterSpeckles(disparity, 0, speckleSize, maxSpeckleDiff)
# scale the disparity to 8-bit for viewing
# divide by 16 and convert to 8-bit image (then range of values should
# be 0 -> max_disparity) but in fact is (-1 -> max_disparity - 1)
# so we fix this also using a initial threshold between 0 and max_disparity
# as disparity=-1 means no disparity available
_, disparity = cv2.threshold(disparity,0, max_disparity * 16, cv2.THRESH_TOZERO)
disparity_scaled = (disparity / 16.).astype(np.uint8)
# fill disparity if requested
if (fill_missing_disparity):
_, mask = cv2.threshold(disparity_scaled,0, 1, cv2.THRESH_BINARY_INV)
mask[:,0:120] = 0
disparity_scaled = cv2.inpaint(disparity_scaled, mask, 2, cv2.INPAINT_NS)
# display disparity - which ** for display purposes only ** we re-scale to 0 ->255
disparity_to_display = (disparity_scaled * (256. / self.value_NumDisp)).astype(np.uint8)