import cv2
import mediapipe as mp
import numpy as np
import time
from flask import Response, jsonify, request, session
from flask_login import current_user
from models import db, User, GestureRecord
import logging
from utils.cache import cache_gesture_data, get_cached_gesture_data, cache_gesture_contents, get_cached_gesture_contents
from datetime import datetime

# 配置日志记录
logger = logging.getLogger(__name__)

# 全局变量：存储识别的手势和输入内容
recognized_gesture = ""  # 当前识别的手势
is_successful_recognition = False  # 手势识别成功标志
input_contents = {
    "1": "",  # 手势1对应的内容
    "2": "",  # 手势2对应的内容
    "3": "",  # 手势3对应的内容
    "4": "",  # 手势4对应的内容
    "5": "",  # 手势5对应的内容
}

# 初始化时尝试从缓存加载手势内容
cached_contents = get_cached_gesture_contents()
if cached_contents:
    input_contents.update(cached_contents)


def get_angle(v1, v2):
    """
    计算两个向量之间的夹角（度）

    Args:
        v1: 第一个向量
        v2: 第二个向量

    Returns:
        float: 两个向量之间的夹角，单位为度
    """
    angle = np.dot(v1, v2) / (np.sqrt(np.sum(v1 * v1)) * np.sqrt(np.sum(v2 * v2)))
    angle = np.arccos(angle) / 3.14 * 180
    return angle


def get_str_guester(up_fingers, list_lms):
    """
    根据举起的手指判断手势类型

    Args:
        up_fingers: 举起的手指关键点索引列表
        list_lms: 所有21个手部关键点坐标

    Returns:
        str: 手势对应的字符串标识（"1"-"5", "6", "8", " "）
    """
    if len(up_fingers) == 1 and up_fingers[0] == 8:
        # 食指举起（可能是1或点赞手势）
        v1 = list_lms[6] - list_lms[7]  # 食指中间关节到近端关节的向量
        v2 = list_lms[8] - list_lms[7]  # 食指指尖到中间关节的向量
        angle = get_angle(v1, v2)
        if angle < 160:  # 根据关节角度判断是伸直的食指还是弯曲的点赞
            str_guester = "1"
        else:
            str_guester = "1"
    elif len(up_fingers) == 2 and up_fingers[0] == 8 and up_fingers[1] == 12:
        str_guester = "2"  # 食指和中指举起（剪刀手）
    elif len(up_fingers) == 3 and up_fingers[0] == 8 and up_fingers[1] == 12 and up_fingers[2] == 16:
        str_guester = "3"  # 食指、中指、无名指举起
    elif len(up_fingers) == 4 and up_fingers[0] == 8 and up_fingers[1] == 12 and up_fingers[2] == 16 and up_fingers[
        3] == 20:
        str_guester = "4"  # 除拇指外的四个手指举起
    elif len(up_fingers) == 5:
        str_guester = "5"  # 五指张开
    elif len(up_fingers) == 2 and up_fingers[0] == 4 and up_fingers[1] == 20:
        str_guester = "6"  # 拇指和小指举起（六的手势）
    elif len(up_fingers) == 2 and up_fingers[0] == 4 and up_fingers[1] == 8:
        str_guester = "8"  # 拇指和食指举起（八的手势）
    else:
        str_guester = " "  # 未识别到预定义手势
    return str_guester


def generate_frames():
    """
    视频帧生成器，用于实时显示摄像头画面并识别手势
    使用MJPEG流格式返回视频帧

    工作流程:
    1. 初始化摄像头和手部检测模型
    2. 循环读取视频帧
    3. 检测手部关键点
    4. 每2秒进行一次手势识别
    5. 将处理后的帧编码为JPEG格式返回
    """
    global recognized_gesture
    cap = cv2.VideoCapture(0)  # 打开默认摄像头
    mpHands = mp.solutions.hands
    hands = mpHands.Hands()  # 初始化手部检测模型
    mpDraw = mp.solutions.drawing_utils  # 用于绘制手部关键点和连接线

    last_time = time.time()
    interval = 2  # 设置2秒的识别间隔，避免频繁识别

    while True:
        success, img = cap.read()  # 读取一帧图像
        if not success:
            continue

        image_height, image_width, _ = np.shape(img)
        imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # 转换为RGB格式（Mediapipe需要）
        results = hands.process(imgRGB)  # 处理图像，检测手部

        current_time = time.time()
        if results.multi_hand_landmarks and current_time - last_time >= interval:
            # 如果检测到手部且达到识别间隔时间
            last_time = current_time  # 更新上次识别时间

            hand = results.multi_hand_landmarks[0]  # 获取第一只手的关键点数据
            mpDraw.draw_landmarks(img, hand, mpHands.HAND_CONNECTIONS)  # 在图像上绘制关键点和连接线

            # 提取所有21个关键点的坐标
            list_lms = []
            for i in range(21):
                pos_x = hand.landmark[i].x * image_width
                pos_y = hand.landmark[i].y * image_height
                list_lms.append([int(pos_x), int(pos_y)])

            list_lms = np.array(list_lms, dtype=np.int32)

            # 计算手掌凸包，用于判断手指是否举起
            hull_index = [0, 1, 2, 3, 6, 10, 14, 19, 18, 17, 10]
            hull = cv2.convexHull(list_lms[hull_index, :])
            cv2.polylines(img, [hull], True, (0, 255, 0), 2)  # 在图像上绘制凸包

            # 判断哪些手指是举起的
            up_fingers = []
            for i in [4, 8, 12, 16, 20]:  # 拇指、食指、中指、无名指、小指的指尖索引
                pt = (int(list_lms[i][0]), int(list_lms[i][1]))
                dist = cv2.pointPolygonTest(hull, pt, True)  # 计算点到凸包的有符号距离
                if dist < 0:  # 如果点在凸包外部，认为手指举起
                    up_fingers.append(i)

            display_text = get_str_guester(up_fingers, list_lms)  # 根据举起的手指判断手势
            recognized_gesture = display_text  # 更新全局变量
        elif not results.multi_hand_landmarks:
            recognized_gesture = ""  # 没有检测到手时重置手势识别结果

        # 将图像编码为JPEG格式并返回
        ret, buffer = cv2.imencode('.jpg', img)
        frame = buffer.tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')

    cap.release()  # 释放摄像头资源


def get_gesture():
    """
    获取当前识别的手势并记录到数据库

    Returns:
        JSON响应: 包含手势识别结果和成功标志

    处理逻辑:
    1. 检查是否识别到有效手势(1-5)且用户已登录
    2. 优先从缓存获取手势数据
    3. 若缓存无数据或已过期，查询数据库并更新缓存
    4. 记录手势识别历史到数据库
    """
    global recognized_gesture, is_successful_recognition
    try:
        logger.info(f"当前识别到的手势: {recognized_gesture}")
        logger.info(f"当前用户: {current_user.username if current_user.is_authenticated else '未登录'}")

        # 只有当识别到有效手势（1-5）且用户已登录时才记录
        if recognized_gesture in ["1", "2", "3", "4", "5"] and current_user.is_authenticated:
            current_time = datetime.now()

            # 先尝试从缓存获取手势数据
            cached_data = get_cached_gesture_data(f"{current_user.id}:{recognized_gesture}")
            if cached_data:
                logger.info("从缓存获取手势数据")
                # 更新成功识别状态
                is_successful_recognition = cached_data.get('success', False)
                # 如果缓存数据过期，则重新获取
                if not is_successful_recognition:
                    logger.info("缓存数据已过期，重新获取")
                    cached_data = None
                else:
                    # 即使从缓存获取数据，也要记录到历史记录
                    record = GestureRecord(
                        user_id=current_user.id,
                        gesture=recognized_gesture,
                        content=cached_data.get('content', input_contents.get(recognized_gesture, "")),
                        timestamp=current_time
                    )
                    try:
                        db.session.add(record)
                        db.session.commit()
                        logger.info(f"从缓存获取数据并记录历史: 手势={recognized_gesture}")
                    except Exception as e:
                        db.session.rollback()
                        logger.error(f"记录历史失败: {str(e)}")

                    return jsonify(cached_data)

            # 记录手势到数据库
            record = GestureRecord(
                user_id=current_user.id,
                gesture=recognized_gesture,
                content=input_contents.get(recognized_gesture, ""),
                timestamp=current_time
            )
            db.session.add(record)
            try:
                db.session.commit()
                is_successful_recognition = True
                logger.info(
                    f"成功保存手势记录: 手势={recognized_gesture}, 内容={input_contents.get(recognized_gesture, '')}")

                # 缓存手势数据，减少数据库查询
                gesture_data = {
                    'gesture': recognized_gesture,
                    'success': True,
                    'content': input_contents.get(recognized_gesture, ""),
                    'timestamp': current_time.isoformat()  # 使用ISO格式的时间字符串
                }
                cache_gesture_data(f"{current_user.id}:{recognized_gesture}", gesture_data)

            except Exception as e:
                db.session.rollback()
                is_successful_recognition = False
                logger.error(f"保存手势记录失败: {str(e)}")
        else:
            is_successful_recognition = False
            if not current_user.is_authenticated:
                logger.warning("用户未登录")
            if not recognized_gesture or recognized_gesture not in ["1", "2", "3", "4", "5"]:
                logger.info("未识别到有效手势")
    except Exception as e:
        is_successful_recognition = False
        logger.error(f"处理手势时出错: {str(e)}")

    return jsonify({
        'gesture': recognized_gesture,
        'success': is_successful_recognition
    })


def get_contents():
    """
    获取所有手势对应的内容

    Returns:
        JSON响应: 包含所有手势(1-5)对应的内容
    """
    global input_contents
    return jsonify(input_contents)


def receive_content():
    """
    接收并更新手势对应的内容

    请求格式:
    {
        "gesture": "1",  # 手势标识
        "content": "示例内容"  # 对应内容
    }

    Returns:
        JSON响应: 更新结果状态和消息
    """
    global input_contents
    try:
        data = request.get_json()
        gesture = data.get('gesture')
        content = data.get('content')

        if gesture in input_contents and content is not None:
            input_contents[gesture] = content
            # 缓存手势指令内容
            cache_gesture_contents(input_contents)
            logger.info(f"接收到手势 {gesture} 的内容: {content}")
            return jsonify({'success': True, 'message': f'手势{gesture}的内容已更新'})
        else:
            logger.warning(f"无效的手势或内容: 手势={gesture}, 内容={content}")
            return jsonify({'success': False, 'error': '无效的手势或内容'}), 400
    except Exception as e:
        logger.error(f"处理请求时出错: {str(e)}")
        return jsonify({'success': False, 'error': str(e)}), 500