import cv2
import mediapipe as mp
from ctypes import cast, POINTER
from comtypes import CLSCTX_ALL
from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
import math
import numpy as np




class HandControlVolume:
    def __init__(self):
        # 初始化medialpipe
        self.mp_drawing = mp.solutions.drawing_utils
        self.mp_drawing_styles = mp.solutions.drawing_styles
        self.mp_hands = mp.solutions.hands


        # 获取电脑音量范围
        devices = AudioUtilities.GetSpeakers()
        interface = devices.Activate(
            IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
        self.volume = cast(interface, POINTER(IAudioEndpointVolume))
        self.volume.SetMute(0, None)
        self.volume_range = self.volume.GetVolumeRange()
        # print(self.volume_range)

    def recognize(self):

        # 获取摄像头
        cap = cv2.VideoCapture(0)
        cap.set(3, 1280)
        cap.set(4, 720)

        # 视频分辨率
        resize_w = 800
        resize_h = 500

        # 画面显示初始化参数
        rect_height = 0
        rect_percent_text = 0

        with self.mp_hands.Hands(min_detection_confidence=0.5,
                                 min_tracking_confidence=0.5,
                                 max_num_hands=1) as hands:
            while True:
                #摄像头打开，读取每一帧图像
                success, image = cap.read()
                image = cv2.resize(image, (resize_w, resize_h))


                # 提高性能
                image.flags.writeable = False
                # 转为RGB
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                # 镜像
                image = cv2.flip(image, 1)
                # mediapipe模型处理
                results = hands.process(image)

                image.flags.writeable = True
                image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

                #识别边框顶点
                dot = [120, 120, 700, 450]

                # 识别边框
                cv2.line(image, (dot[0], dot[1]), (dot[2], dot[1]), (0, 255, 0), 2)
                cv2.line(image, (dot[0], dot[1]), (dot[0], dot[3]), (0, 255, 0), 2)
                cv2.line(image, (dot[0], dot[3]), (dot[2], dot[3]), (0, 255, 0), 2)
                cv2.line(image, (dot[2], dot[1]), (dot[2], dot[3]), (0, 255, 0), 2)

                # 判断是否有手掌
                if results.multi_hand_landmarks:
                    # 遍历每个手掌
                    for handLms in results.multi_hand_landmarks:
                        # 在画面标注手指
                        self.mp_drawing.draw_landmarks(image, handLms,
                                                       self.mp_hands.HAND_CONNECTIONS,
                                                       self.mp_drawing_styles.get_default_hand_landmarks_style(),
                                                       self.mp_drawing_styles.get_default_hand_connections_style())

                        # 解析手指，存入各个手指坐标
                        lmList = []
                        for landmark_id, finger_axis in enumerate(handLms.landmark):
                            lmList.append([
                                landmark_id, finger_axis.x, finger_axis.y,
                                finger_axis.z
                            ])



                        if lmList:

                            # 获取大拇指指尖坐标
                            thumb_finger_tip = lmList[4]
                            thumb_finger_tip_x = math.ceil(thumb_finger_tip[1] * resize_w)
                            thumb_finger_tip_y = math.ceil(thumb_finger_tip[2] * resize_h)
                            # 获取食指指尖坐标
                            index_finger_tip = lmList[8]
                            index_finger_tip_x = math.ceil(index_finger_tip[1] * resize_w)
                            index_finger_tip_y = math.ceil(index_finger_tip[2] * resize_h)
                            # 大拇指指与食指中间点
                            thumb_index_finger_middle_point = (thumb_finger_tip_x + index_finger_tip_x) // 2, (
                                    thumb_finger_tip_y + index_finger_tip_y) // 2
                            # print(thumb_finger_tip_x)
                            thumb_finger_point = (thumb_finger_tip_x, thumb_finger_tip_y)
                            index_finger_point = (index_finger_tip_x, index_finger_tip_y)
                            # 画指尖2点
                            image = cv2.circle(image, index_finger_point, 10, (255, 0, 255), -1)#设置圆心，圆的半径，颜色，实心圆
                            image = cv2.circle(image, thumb_index_finger_middle_point, 10, (255, 0, 255), -1)
                            # 画2点连线
                            image1 = cv2.line(image, thumb_finger_point, index_finger_point, (255, 0, 255), 5)
                            # 勾股定理计算长度
                            line_len = math.sqrt((index_finger_tip_x-thumb_finger_tip_x)**2
                                                 + (index_finger_tip_y-thumb_finger_tip_y)**2)
                            # print(thumb_index_line_len)
                            # 线段最大10，最长270

                            #获取电脑最大最小音量
                            min_volume = self.volume_range[0]
                            max_volume = self.volume_range[1]
                            # 将指尖长度映射到音量上，从[10,270]转变成[-96，0]
                            vol = np.interp(line_len, [10, 270], [min_volume, max_volume])
                            # 将指尖长度映射到矩形显示上
                            rect_height = np.interp(line_len, [10, 270], [0, 200])
                            rect_percent_text = np.interp(line_len, [10, 270], [0, 100])

                            # 设置电脑音量
                            self.volume.SetMasterVolumeLevel(vol, None)
                #显示矩形
                cv2.putText(image, str(math.ceil(rect_percent_text)) + "%", (10, 350),
                            cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)
                # 画出矩形条，image画板，设置起点和终点坐标，颜色，线宽
                image = cv2.rectangle(image, (30, 100), (70, 300), (255, 0, 0), 3)
                # 音量的幅度作为填充矩形条的高度
                image = cv2.rectangle(image, (30, math.ceil(300 - rect_height)), (70, 300), (255, 0, 0), -1)

                # 显示画面
                cv2.imshow('VolumeControl', image)
                if cv2.waitKey(1) & 0xFF == 27:
                    break

            cap.release()


# 开始程序
#control = HandControlVolume()
#control.recognize()
