import numpy as np
import sys
import cv2
import easyocr
from PIL import ImageFont, ImageDraw, Image
from matplotlib import pyplot as plt
import pprint

class TextExtractor():
    ocr_reader = easyocr.Reader(['ch_sim', 'en'])

    def draw_bounding_box(self, img, weight, x_left, y_lower, x_right, y_upper):
        cv2.rectangle(img, (x_left, y_lower), (x_right, y_upper),
                      (200, 200, 0), weight)
        return img

    def draw_contour(self, img, weight, pts):
        # Polygon corner points coordinates
        pts = np.array(pts, np.int32)
        pts = pts.reshape((-1, 1, 2))

        # the last line connected to the first line.
        isClosed = True

        # Green color in BGR
        color = (200, 0, 0)

        # Using cv2.polylines() method
        # Draw a Blue polygon with
        # thickness of 1 px
        img = cv2.polylines(img, [pts], isClosed, color, weight)
        return img

    def draw_Chinese_text(self, image, text, weight, x_left, y_lower):
        fontpath = "fonts/simsun.ttc"  # 宋体字集
        font = ImageFont.truetype(fontpath, weight)
        img_pil = Image.fromarray(image)
        draw = ImageDraw.Draw(img_pil)

        r, g, b, a = 250, 0, 0, 0
        draw.text((x_left, y_lower - weight), text, font=font, fill=(r, g, b, a))
        texted_image = np.array(img_pil)

        return texted_image

    def display_image(self, image, title):
        plt.imshow(image)
        plt.title(title)
        plt.show(block=False)

        keyboardClick = False
        while keyboardClick != True:
            keyboardClick = plt.waitforbuttonpress()
        plt.close()


    def recognize_text(self, img) -> list:
        try:
            results = self.ocr_reader.readtext(img, detail=1, paragraph=False)

            print(" >> The results of EasyOCR reading text: ")
            pprint.pprint(results)
        except Exception as err:
            print(f" Exception while readtext(): {str(err)}")

        return results


    def display_results(self, img, rsts: list):
        assemble_img = img
        for rst in rsts:
            contour, text = rst[0], rst[1]
            assemble_img = self.draw_contour(assemble_img, 5, contour)

            #  draw_Chinese_text(self, image, text, weight, x_left, y_lower):
            assemble_img = self.draw_Chinese_text(assemble_img, text, 40, contour[0][0], contour[0][1])

        return assemble_img



def testrun_easyocr_detect_recognize():
    SOURCE = "images/"
    RESULT = "results/"
    img_file1 = "佛山乒乓比赛20221106.jpeg"
    img_file2 = "钱大妈.jpeg"
    img_file3 = "百果园_发票.JPG"
    img_file4 = "加州车牌.jpg"
    img_file5 = "七婆串串香.JPG"
    img_file6 = "化验单.jpeg"
    img_file7 = "磨刀石发票.png"
    img_file8 = "瑞幸咖啡.JPG"

    img_filename = img_file2
    image = cv2.imread(SOURCE + img_filename)
    assemble_img = image.copy()

    if image is None:
        print(f" !! Cannot read the image {img_filename} ")
        exit()

    text_extractor = TextExtractor()
    try:
        horizontal_list, free_list = text_extractor.ocr_reader.detect(image)
    except Exception as err:
        print(f" Exception while readtext(): {str(err)}")

    print(f" >> EasyOCR detection()->horizontal_list results: ")
    pprint.pprint(horizontal_list)
    for region in horizontal_list:
        for area in region:
            # draw_bounding_box(self, img, weight, x_left, y_lower, x_right, y_upper):
            assemble_img = text_extractor.draw_bounding_box(assemble_img, 5, area[0], area[2], area[1], area[3])

    print(f" >> EasyOCR detection()->free_list results: ")
    pprint.pprint(free_list)
    for region in free_list:
        for area in region:
            assemble_img = text_extractor.draw_contour(assemble_img, 5, area)




def testrun_easyocr_readtext():
    SOURCE = "images/"
    RESULT = "results/"
    img_file1 = "佛山乒乓比赛.jpeg"
    img_file2 = "钱大妈.jpeg"
    img_file3 = "核酸业务通知.JPG"

    img_filename = img_file1
    image = cv2.imread(SOURCE + img_filename)
    if image is None:
        print(f" !! Cannot read the image {img_filename} ")
        exit()
    text_extractor = TextExtractor()
    results = text_extractor.recognize_text(image)
    result_imgs = text_extractor.display_results(image, results)

    text_extractor.display_image(result_imgs, "Image with text recognition")
    cv2.imwrite(RESULT + img_filename, cv2.cvtColor(result_imgs, cv2.COLOR_RGB2BGR))



if __name__ == "__main__":
    testrun_easyocr_readtext()
    testrun_easyocr_detect_recognize()