0
votes

I am trying to develop a OCR system. I am trying to use MSER in order to extract character from an image and then passing the characters into a CNN to recognize those characters. Here is my code for character extraction:

import cv2
import numpy as np

# create MSER object
mser = cv2.MSER_create()

# read the image
img = cv2.imread('textArea01.png')

# convert to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# store copy of the image
vis = img.copy()

# detect regions in the image
regions,_ = mser.detectRegions(gray)

# find convex hulls of the regions and draw them onto the original image
hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]

cv2.polylines(vis, hulls, 1, (0, 255, 0))

# create mask for the detected region
mask = np.zeros((img.shape[0], img.shape[1], 1), dtype=np.uint8)
mask = cv2.dilate(mask, np.ones((150, 150), np.uint8))

for contour in hulls:

    cv2.drawContours(mask, [contour], -1, (255, 255, 255), -1)

    #this is used to find only text regions, remaining are ignored
    text_only = cv2.bitwise_and(img, img, mask=mask)


cv2.imshow('img', vis)
cv2.waitKey(0)
cv2.imshow('mask', mask)
cv2.waitKey(0)
cv2.imshow('text', text_only)
cv2.waitKey(0)

This is working fine for most images, but for some images like this: enter image description here

The outer border is also detected as a region and the contour is drawn in the mask such that all area inside the border is detected as text region. So, the contours inside have no effect. How do I prevent this so that only the text is detected? Hulls detected: enter image description here and the mask as a result: enter image description here

2

2 Answers

3
votes

My result using this code:

import cv2
import numpy as np

img = cv2.imread("img.png")

# grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('gray', gray)

# binary
# ret, thresh = cv2.threshold(gray, 250, 255, cv2.THRESH_BINARY_INV)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 35, 180)
cv2.imshow('threshold', thresh)

# dilation
kernel = np.ones((1, 1), np.uint8)
img_dilation = cv2.dilate(thresh, kernel, iterations=1)
cv2.imshow('dilated', img_dilation)

# find contours
# cv2.findCountours() function changed from OpenCV3 to OpenCV4: now it have only two parameters instead of 3
cv2MajorVersion = cv2.__version__.split(".")[0]
# check for contours on thresh
if int(cv2MajorVersion) >= 4:
    ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
else:
    im2, ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

# sort contours
sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])

for i, ctr in enumerate(sorted_ctrs):
    # Get bounding box
    x, y, w, h = cv2.boundingRect(ctr)

    # Getting ROI
    roi = img[y:y + h, x:x + w]

    # show ROI
    # cv2.imshow('segment no:'+str(i),roi)
    cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 1)

    # if you want to save the letters without green bounding box, comment the line above
    if w > 5:
        cv2.imwrite('C:\\Users\\PC\\Desktop\\output\\{}.png'.format(i), roi)

cv2.imshow('marked areas', img)

cv2.waitKey(0)

result

3
votes

You can have a threshold on the contour area so that it ignores all shapes that cover more than a certain area in the image.

for contour in hulls:
    if cv.contourArea(contour) < ThresholdArea:
        continue

    cv2.drawContours(mask, [contour], -1, (255, 255, 255), -1)        
    #this is used to find only text regions, remaining are ignored
    text_only = cv2.bitwise_and(img, img, mask=mask)