#coding:utf-8
import re
import time
import pickle
import cv2 
from PIL import Image, ImageDraw, ImageFont
import numpy
import argparse
import os
import difflib
from multiprocessing import Process

from paddleocr import PaddleOCR
from paddlespeech.cli.tts import TTSExecutor

def draw_rectangle(frame,pos):
    cv2.line(frame, (int(pos[0][0][0]), int(pos[0][0][1])), (int(pos[0][1][0]), int(pos[0][1][1])), (0, 255, 0), 1, 4)
    cv2.line(frame, (int(pos[0][1][0]), int(pos[0][1][1])), (int(pos[0][2][0]), int(pos[0][2][1])), (0, 255, 0), 1, 4)
    cv2.line(frame, (int(pos[0][2][0]), int(pos[0][2][1])), (int(pos[0][3][0]), int(pos[0][3][1])), (0, 255, 0), 1, 4)
    cv2.line(frame, (int(pos[0][3][0]), int(pos[0][3][1])), (int(pos[0][0][0]), int(pos[0][0][1])), (0, 255, 0), 1, 4)

def cv2ImgAddText(img, text, left, top, textColor, textSize):
    if (isinstance(img, numpy.ndarray)):  # 判断是否OpenCV图片类型
        img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    draw = ImageDraw.Draw(img)
    # 字体的格式
    fontStyle = ImageFont.truetype("font/Songti.ttc", textSize, encoding="utf-8")
    # 绘制文本
    draw.text((left, top), text, textColor, font=fontStyle)
    # 转换回OpenCV格式
    return cv2.cvtColor(numpy.asarray(img), cv2.COLOR_RGB2BGR)

def tts_ocr(story_text):
    if story_text:
        tts = TTSExecutor()
        tts(text=story_text, output="result/output.wav", lang='zh') 
    os.system('afplay ' + "result/output.wav")

def ocr_camera():
    # 模型路径下必须含有model和params文件
    ocr = PaddleOCR(use_angle_cls=True,
                    use_gpu=False)  # det_model_dir='{your_det_model_dir}', rec_model_dir='{your_rec_model_dir}', rec_char_dict_path='{your_rec_char_dict_path}', cls_model_dir='{your_cls_model_dir}', use_angle_cls=True

    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FPS, 30)
    
    #记录上一帧识别结果
    preframe_ocr_txt : str = "leo"
    reading_txt : str = "leo" 

    start_time = time.time()
    index = 1
    while True:
        try:
            ret, frame = cap.read()
            if not ret:
                break
        
            print('detect frame:%d' % (index))

            result = ocr.ocr(frame, cls=True)

            index += 1
            if not result:
                print ('result is Empty')
                #time.sleep(0.01)
                cv2.imshow("storyteller", frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
                continue

            #判断上次识别的相似度
            text = [line[1][0] for line in result]
            story_text = "\n".join(text)
           
            diff_score = difflib.SequenceMatcher(None, preframe_ocr_txt, story_text).quick_ratio()
            print ('!!!! diff_score:{} \n'.format(diff_score))

            diff_prereaded = difflib.SequenceMatcher(None, reading_txt, story_text).quick_ratio()
            
            # 两次识别基本相同且与已朗读过的内容不同，则展示结果并朗读
            if 0.99 < diff_score and 0.6 > diff_prereaded:

                print('\n\n\n now start reading\n {}......\n\n\n'.format(story_text))
                
                for line in result:
                    draw_rectangle(frame,line)
                    frame = cv2ImgAddText(frame, line[1][0], int(line[0][0][0]+2), int(line[0][0][1]+2), (255, 0 , 0), 20)

                cv2.imshow("storyteller", frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
                
                # # 过滤出中文结果
                # paragraph = "".join(text)
                # pattern = re.compile(r'[^(\u4e00-\u9fa5)+，。？、]')
                # story_text = re.sub(pattern, '', paragraph)

                #记录已阅读内容
                reading_txt = story_text

                with open('result/story.txt', 'w') as sf:
                    sf.write(reading_txt)

                # if len(story_text) > 0:
                #     tts_ocr(story_text)
                # macos 可以使用自带tts
                #     os.system('say "{}"'.format(story_text))

                #     #开始朗读
                p = Process(target=tts_ocr,args=(reading_txt,))
                #p.daemon = True
        
                # 启动进程
                p.start()

            else:
                # 继续识别
                cv2.imshow("storyteller", frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

                preframe_ocr_txt = story_text

        except Exception as ex:
            print ('exception:' + str(ex))
            break
   
    #cv2.waitKey(0)
    cv2.destroyAllWindows()

    end_time = time.time()
    print("Average ocr time per frame:", (end_time - start_time) / index)


# def parse_args():
#     parser = argparse.ArgumentParser('camera detection and reader.')
#     parser.add_argument(
#         '--models_dir', type=str, default='', help='path of models.')
#     parser.add_argument(
#         '--img_paths', type=str, default='', help='path of images')
#     parser.add_argument(
#         '--video_path', type=str, default='', help='path of video.')
#     parser.add_argument(
#         '--camera_index',
#         type=int,
#         default=0,
#         help='switch camera in multi, default:0.')
#     parser.add_argument(
#         '--open_imshow',
#         type=bool,
#         default=False,
#         help='visualize video detection results in real time.')
#     parser.add_argument(
#         '--use_gpu',
#         type=bool,
#         default=False,
#         help='switch cpu/gpu, default:cpu.')
#     args = parser.parse_args()
#     return args

if __name__ == "__main__":
    ocr_camera()
