from hand_writing import *
from recognize import baiduOCR
import sys
import LogIn
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import recognize_character
import preFunction
import mediapipe as mp
import numpy as np
import autopy

class MyWindow(QMainWindow,LogIn.Ui_LogIn):

    # 百度AI（api）初始化
    def __init__(self):
        super(MyWindow, self).__init__()
        self.setupUi(self)
        self.initail_condition() #初始化条件

        self.my_thread = MyThread(self)  # 实例化线程对象
        self.my_thread.my_signal.connect(self.insert_word) #绑定线程的信号与槽
        self.my_thread.my_signal2.connect(self.thread_warning)  # 绑定线程的信号与槽


        self.timer_camera.timeout.connect(self.show_camera)  #绑定信号与槽

        # 密码框下确定，删除的初始化
        self.pushButton_ok.clicked.connect(self.password_handin)
        self.pushButton_delete.clicked.connect(self.password_clear)

    # 与手势识别相关的初始化
    def initail_condition(self):
        self.wScr, self.hScr = autopy.screen.size() #获取屏幕宽和高
        self.wCam, self.hCam = 640, 480 #获取摄像头宽和高

        self.smoothening = 7          #用于平滑鼠标移动
        self.plocX, self.plocY = 0, 0
        self.clocX, self.clocY = 0, 0

        self.mp_hands = mp.solutions.hands   #实例化手部检测
        self.hands = self.mp_hands.Hands(static_image_mode=False,
                               max_num_hands=1,   #最多检测几只手
                               min_detection_confidence=0.7,  #检测出手的置信度
                               min_tracking_confidence=0.5)   #追踪是否为同一只手的置信度
        self.mpDraw = mp.solutions.drawing_utils

        self.cap = cv2.VideoCapture()
        self.timer_camera = QTimer()
        self.timer_camera.start(30)     #设置定时器，30ms更新一次图片
        self.cap.open(0)
        self.gesture_lines = []      #用与存放手写文字的每个点
        self.img_black = np.zeros((self.hCam, self.wCam, 3), dtype='uint8')    #创建一个黑底图片，将文字写在上面


    def insert_word(self,word):
        if word:

            # 如果识别出来，那就输入框插入这个字
            self.lineEdit.insert(word)

        else:
            QMessageBox.warning(self, '警告', '写的太丑了请重新输入')

        # 线程终止
        self.my_thread.terminate()

    def thread_warning(self):
        QMessageBox.warning(self, '警告', '百度文字识别api识别错误')

    # 主程序
    def show_camera(self):

        flag, self.image = self.cap.read()  # 从视频流中读取
        h, w, c = self.image.shape[0], self.image.shape[1], self.image.shape[2]
        # print(h,w,c)

        # 镜像翻转
        frame = cv2.flip(self.image, 1)
        # 转化数据格式
        img_RGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)


        results = self.hands.process(img_RGB)  #获取图片的手的信息
        if results.multi_hand_landmarks:
            self.mpDraw.draw_landmarks(frame, results.multi_hand_landmarks[0], self.mp_hands.HAND_CONNECTIONS)
            #画出检测出的第一只手的信息（也只检测出一只）

            handpoint_list = hand_point(results, h, w) #获取手部每个关键点的坐标
            hand_pose = judge_handpose(handpoint_list) #通过各个关键点的位置判断手是什么姿势


            #竖起大拇指
            if hand_pose == 'Thumb_up' and len(self.gesture_lines) > 10:
                cv2.imwrite('character.jpg', self.img_black)
                self.my_thread.start() #开启识别文字图片的线程
                self.img_black = np.zeros((h, w, c), dtype='uint8') #重置黑底图片
                self.gesture_lines = [] #重置文字各个点的坐标

            #单指鼠标移动
            elif hand_pose == 'Index_up':
                index_x, index_y = handpoint_list[8]
                screen_x = np.interp(index_x, (0, self.wCam), (0, self.wScr)) #将摄像头的长和宽映射到屏幕的长和宽
                screen_y = np.interp(index_y, (0, self.hCam), (0, self.hScr))
                self.clocX = self.plocX + (screen_x - self.plocX) / self.smoothening #平滑鼠标的移动
                self.clocY = self.plocY + (screen_y - self.plocY) / self.smoothening
                autopy.mouse.move(self.clocX, self.clocY) #鼠标移动
                cv2.circle(frame, (index_x, index_y), 10, (255, 0, 255), cv2.FILLED)
                self.plocX, self.plocY = self.clocX, self.clocY


            #食指和中指合并，表示鼠标左键点击
            elif hand_pose == 'Index_middle_up':
                if p_to_p_distance(handpoint_list[8], handpoint_list[12]) < 50:
                    index_x, index_y = handpoint_list[8]
                    middle_x, middle_y = handpoint_list[12]
                    click_x, click_y = int((index_x + middle_x) / 2), int((index_y + middle_y) / 2)
                    cv2.circle(frame, (click_x, click_y), 10, (0, 255, 0), cv2.FILLED)
                    autopy.mouse.click() #鼠标点击

            #小指表示橡皮檫
            elif hand_pose == 'Pinky_up':
                pinky_x, pinky_y = handpoint_list[20]
                cv2.circle(frame, (pinky_x, pinky_y), 15, (0, 255, 0), cv2.FILLED)
                cv2.circle(self.img_black, (pinky_x, pinky_y), 15, (0, 0, 0), cv2.FILLED) #通过涂抹黑色，来实现橡皮擦功能

            # elif hand_pose == 'Fingers_together':
            #     gray = cv2.cvtColor(self.img_black, cv2.COLOR_BGR2GRAY)
            #     _, imgBinary = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY)
            #     contours, _ = cv2.findContours(imgBinary, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
            #     if contours:
            #         contours_sorted = flit_sort_area_contours(contours, 1)
            #         self.img_black = cv2.drawContours(self.img_black, contours_sorted, -1, (0, 0, 255), cv2.FILLED)


            frame, hand_list = index_thumb_pt(frame, results)
            if hand_list['click']:
                draw_character(self.gesture_lines, self.img_black)  #在黑底图片上将文字的各个点连起来，实现写字的功能
                if len(self.gesture_lines)>1 and p_to_p_distance(self.gesture_lines[-1],hand_list["pt"]) < 30:
                    hand_list["pt"] = list(hand_list["pt"])
                    hand_list["pt"][0] = int(self.gesture_lines[-1][0] + (hand_list["pt"][0] - self.gesture_lines[-1][0]) / 2)  # 平滑写字的移动
                    hand_list["pt"][1] = int(self.gesture_lines[-1][1] + (hand_list["pt"][1] - self.gesture_lines[-1][1]) / 2)
                    hand_list["pt"] = tuple(hand_list["pt"])

                self.gesture_lines.append(hand_list["pt"])

        img_gray = cv2.cvtColor(self.img_black, cv2.COLOR_BGR2GRAY)
        _, imgInv = cv2.threshold(img_gray, 50, 255, cv2.THRESH_BINARY_INV)
        imgInv = cv2.cvtColor(imgInv, cv2.COLOR_GRAY2BGR)
        img = cv2.bitwise_and(frame, imgInv)
        img = cv2.bitwise_or(img, self.img_black)      #这一部分是利用bitwise_and，bitwise_or将在黑底图片上写出来的字贴到摄像头的每一帧上面

        new_width, new_height = preFunction.resize_picture(img, width=self.label_cap.width(),
                                                           height=self.label_cap.height())
        #获取qypt中展示摄像头控件的宽和高

        qt_img_detect = preFunction.cvimg_to_qtimg(img) #转换图片格式
        new_img = qt_img_detect.scaled(new_width, new_height, Qt.KeepAspectRatio)
        self.label_cap.setPixmap(QPixmap.fromImage(new_img))
        self.label_cap.setAlignment(Qt.AlignCenter)  #在控件里显示图片


    def password_handin(self):
        if self.lineEdit.text() == '加油':
            QMessageBox.information(self,'祝福语','美赛加油')
        else:
            QMessageBox.information(self, '提示', '输入密码错误，请重新输入')

    def password_clear(self):
        self.lineEdit.backspace()

    def center(self):
        screen = QDesktopWidget().screenGeometry()
        size = self.geometry()
        newLeft = int((screen.width()-size.width())/2)
        newTop = int((screen.height()-size.height())/2-40)
        self.move(newLeft,newTop)  #让窗口显示在屏幕中间

    def add_shadow(self):
        # 给窗口添加阴影
        self.effect_shadow = QGraphicsDropShadowEffect(self)
        self.effect_shadow.setOffset(0, 0)  # 偏移
        self.effect_shadow.setBlurRadius(10)  # 阴影半径
        self.effect_shadow.setColor(Qt.gray)  # 阴影颜色
        self.widget.setGraphicsEffect(self.effect_shadow)  # 将设置套用到widget窗口中

class MyThread(QThread):  # 线程类
    my_signal = pyqtSignal(str)  # 自定义信号对象。参数str就代表这个信号可以传一个字符串
    my_signal2 = pyqtSignal()
    def __init__(self,parent):
        super().__init__()
        self.parent = parent


    def run(self):  # 线程执行函数

        try:
            word = baiduOCR('character.jpg') #文字识别

            # 向百度AI发送图片
            self.my_signal.emit(word)  # 释放自定义的信号
        except:
            # 不向百度AI发送图片
            self.my_signal2.emit()



if __name__ == "__main__":
    app = QApplication(sys.argv)

    # 主程序
    MainWindow = MyWindow()

    # 窗口置中
    MainWindow.center()

    # 窗口添加阴影
    MainWindow.add_shadow()

    # 窗口显示
    MainWindow.show()

    sys.exit(app.exec_())
