#coding:utf-8
import re
import time
import pickle
import cv2 
from PIL import Image, ImageDraw, ImageFont
import numpy
import argparse
import os
import difflib
from multiprocessing import Process

from paddleocr import PaddleOCR
from paddlespeech.cli.tts import TTSExecutor

#记录上一帧识别结果
preframe_ocr_txt : str = "leo"
reading_txt : str = "leo" 

def draw_rectangle(frame, pos):
    cv2.line(frame, (int(pos[0][0][0]), int(pos[0][0][1])), (int(pos[0][1][0]), int(pos[0][1][1])), (0, 255, 0), 1, 4)
    cv2.line(frame, (int(pos[0][1][0]), int(pos[0][1][1])), (int(pos[0][2][0]), int(pos[0][2][1])), (0, 255, 0), 1, 4)
    cv2.line(frame, (int(pos[0][2][0]), int(pos[0][2][1])), (int(pos[0][3][0]), int(pos[0][3][1])), (0, 255, 0), 1, 4)
    cv2.line(frame, (int(pos[0][3][0]), int(pos[0][3][1])), (int(pos[0][0][0]), int(pos[0][0][1])), (0, 255, 0), 1, 4)

def cv2ImgAddText(img, text, left, top, textColor, textSize):
    if (isinstance(img, numpy.ndarray)):  # 判断是否OpenCV图片类型
        img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    draw = ImageDraw.Draw(img)
    # 字体的格式
    fontStyle = ImageFont.truetype("font/Songti.ttc", textSize, encoding="utf-8")
    # 绘制文本
    draw.text((left, top), text, textColor, font=fontStyle)
    # 转换回OpenCV格式
    return cv2.cvtColor(numpy.asarray(img), cv2.COLOR_RGB2BGR)

def tts_ocr(story_text):
    if story_text:
        tts = TTSExecutor()
        tts(text=story_text, output="result/output.wav", lang='zh') 
    os.system('afplay ' + "result/output.wav")

#def take_ocr(cap, ocr, display_vedio, tk_info):
def take_ocr(obj):    
    cap = obj.cap
    ocr = obj.ocr
    display_vedio = obj.display_vedio
    tk_info = obj.tk_info

    # #记录上一帧识别结果
    # preframe_ocr_txt : str = "leo"
    # reading_txt : str = "leo" 
    global preframe_ocr_txt 
    global reading_txt 

    index = 1
    try:
        ret, frame = cap.read()
        #frame= cv2.flip(frame,1)
        if not ret:
            return False
    
        tk_info["text"] = ('detect frame:%d' % (index))

        result = None
        if obj.cnf.enable_mp:
            mp_results = obj.hands.process(frame)
            # if results.multi_handedness:
            #     for hand_label in results.multi_handedness:
            #         print(hand_label)
            if mp_results.multi_hand_landmarks:
                for hand_landmarks in mp_results.multi_hand_landmarks:
                    print('hand_landmarks:' + str(hand_landmarks))
                    # 关键点可视化
                    obj.mp_drawing.draw_landmarks(
                        frame, hand_landmarks, obj.mp_hands.HAND_CONNECTIONS)
                    # 绘制矩形
                    h, w, c = frame.shape
                    # 将landmark的比例坐标转换为在图像像元上的坐标
                    cx, cy = int(hand_landmarks.landmark[8].x * w), int(hand_landmarks.landmark[8].y * h)
                    frame = cv2.rectangle(frame,  (cx, cy), (cx + 1000, cy - 400), (0, 255, 0), 2)
                    
                    print('frame:' + str(frame))
                    cute_frame = frame[cy-400:cy, cx:cx+1000]
                    print ('cute_frame:' + str(cute_frame))
                    if numpy.array(cute_frame).size != 0:
                        result = ocr.ocr(cute_frame, cls=True)
        else:
            result = ocr.ocr(frame, cls=True)

        index += 1
        if not result:
            tk_info["text"] =  '识别中...'
            display_vedio(frame, '识别中...')
            return False

        #判断上次识别的相似度
        text = [line[1][0] for line in result]
        story_text = "\n".join(text)
    
        diff_score = difflib.SequenceMatcher(None, preframe_ocr_txt, story_text).quick_ratio()
        tk_info["text"] =  ('!!!! diff_score:{} \n'.format(diff_score))

        diff_prereaded = difflib.SequenceMatcher(None, reading_txt, story_text).quick_ratio()
        
        # 两次识别基本相同且与已朗读过的内容不同，则展示结果并朗读
        if 0.95 < diff_score and 0.6 > diff_prereaded:

            tk_info["text"] = ('\n\n\n now start reading\n {}......\n\n\n'.format(story_text))
            
            for line in result:
                draw_rectangle(frame,line)
                frame = cv2ImgAddText(frame, line[1][0], int(line[0][0][0]+2), int(line[0][0][1]+2), (255, 0 , 0), 20)

            display_vedio(frame, "识别成功，开始朗读！！")
            
            # # 过滤出中文结果
            # paragraph = "".join(text)
            # pattern = re.compile(r'[^(\u4e00-\u9fa5)+，。？、]')
            # story_text = re.sub(pattern, '', paragraph)

            #记录已阅读内容
            reading_txt = story_text
            with open('result/story.txt', 'w') as sf:
                sf.write(reading_txt)

            #     #开始朗读
            p = Process(target=tts_ocr,args=(reading_txt,))
            #p.daemon = True
            # 启动进程
            p.start()

        else:
            # 继续识别
            display_vedio(frame, "识别中...")

            preframe_ocr_txt = story_text

    except Exception as ex:
        tk_info["text"] =  ('exception:' + str(ex))
        return False