import cv2
import numpy as np
from flask import Flask, render_template, Response, jsonify
import mediapipe as mp
import time
import os
import traceback
import json
import tensorflow as tf
from utils import preprocess_trajectory

app = Flask(__name__, template_folder='templates')

# 初始化MediaPipe
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(
    max_num_hands=1,
    min_detection_confidence=0.4,
    min_tracking_confidence=0.3
)

# 轨迹记录参数
TRAJECTORY = []
LAST_DETECTION_TIME = 0
DETECTION_TIMEOUT = 1.5  # 检测超时时间（秒）
MIN_POINTS = 25  # 最小轨迹点数
MAX_POINTS = 150  # 最大轨迹点数
MIN_MOVE_DISTANCE = 30  # 最小移动距离（像素）

# 状态变量
CURRENT_GESTURE = "nohand"
CURRENT_MOTION = "nomotion"
LAST_RESULTS = {
    "gesture": "nohand",
    "motion": "nomotion",
    "timestamp": time.time()
}

# 是否开始追踪
TRACKING_ENABLED = False

# 轨迹识别器 - 修改为加载SavedModel格式
class TrajectoryRecognizer:
    def __init__(self):
        self.model = None
        self.label_names = None
        self.default_model_path = "models/default_trajectory_model"
        self.default_label_map = {
            0: "circle", 1: "triangle", 2: "rectangle"
        }

    def load(self, model_path="models/model_3/ensemble_model", label_map_path="preprocessed_data/label_map.json"):
        """加载模型和标签映射"""
        print("正在加载识别模型...")
        try:
            # 检查模型文件是否存在
            if not os.path.exists(model_path):
                print(f"警告: 模型目录 {model_path} 不存在, 使用默认模型")
                self._create_default_model()
                self.label_names = {idx: name for idx, name in enumerate(self.default_label_map.values())}
                return

            # 加载SavedModel格式
            self.model = tf.keras.models.load_model(model_path)
            print(f"模型从 {model_path} 加载成功 (SavedModel格式)")

            # 加载标签映射
            if os.path.exists(label_map_path):
                with open(label_map_path, 'r') as f:
                    label_map_data = json.load(f)

                    # 创建标签名称列表
                    self.label_names = {}
                    for idx, name in label_map_data.items():
                        self.label_names[int(idx)] = name

                print(f"标签映射从 {label_map_path} 加载成功")
                print(f"标签映射内容: {self.label_names}")
            else:
                print(f"警告: 未找到标签映射文件 {label_map_path}")
                # 尝试从模型推断标签
                if self.model is not None:
                    output_shape = self.model.layers[-1].output_shape
                    if output_shape and len(output_shape) > 1:
                        num_classes = output_shape[1]
                        self.label_names = {i: f"gesture_{i}" for i in range(num_classes)}
                        print(f"使用推断标签: {self.label_names}")
                    else:
                        print("无法推断标签: 模型输出形状未知")
                        self.label_names = self.default_label_map
                else:
                    print("模型未加载，使用默认标签")
                    self.label_names = self.default_label_map
        except Exception as e:
            print(f"模型加载错误: {str(e)}")
            print("创建并使用默认模型")
            self._create_default_model()
            self.label_names = {idx: name for idx, name in enumerate(self.default_label_map.values())}
            traceback.print_exc()

    def _create_default_model(self):
        """创建简单的默认模型"""
        print("创建默认手势识别模型...")
        self.model = tf.keras.Sequential([
            tf.keras.layers.InputLayer(input_shape=(64, 6)),  # 注意: 输入形状应与预处理一致
            tf.keras.layers.Flatten(),
            tf.keras.layers.Dense(64, activation='relu'),
            tf.keras.layers.Dense(3, activation='softmax')
        ])
        self.model.compile(optimizer='adam',
                           loss='categorical_crossentropy',
                           metrics=['accuracy'])
        print("默认模型创建完成")

    def predict(self, trajectory):
        """预测轨迹类别"""
        if self.model is None:
            print("错误: 模型未加载")
            return "model_not_loaded", 0.0

        if not self.label_names:
            print("错误: 标签映射未加载或为空")
            return "label_map_not_loaded", 0.0

        try:
            # 预处理轨迹 - 确保与训练时一致
            preprocessed = preprocess_trajectory(trajectory)

            # 添加批次维度
            preprocessed = np.expand_dims(preprocessed, axis=0)

            # 预测
            predictions = self.model.predict(preprocessed, verbose=0)
            class_idx = np.argmax(predictions[0])
            confidence = np.max(predictions[0])

            # 获取标签名称
            label_name = self.label_names.get(class_idx, f"unknown_{class_idx}")
            print(f"预测结果: 类别={label_name}, 置信度={confidence:.2f}")

            return label_name, float(confidence)
        except Exception as e:
            print(f"预测错误: {str(e)}")
            traceback.print_exc()
            return "error", 0.0


# 初始化识别器
recognizer = TrajectoryRecognizer()
recognizer.load()


def is_valid_trajectory(trajectory):
    """检查轨迹是否有效（有足够的移动）"""
    if len(trajectory) < 2:
        return False

    start_point = np.array(trajectory[0])
    end_point = np.array(trajectory[-1])
    distance = np.linalg.norm(end_point - start_point)

    return distance > MIN_MOVE_DISTANCE


def process_frame(frame):
    global TRAJECTORY, LAST_DETECTION_TIME, CURRENT_GESTURE, CURRENT_MOTION, LAST_RESULTS

    if not TRACKING_ENABLED:
        return frame

    # 水平翻转图像以获得镜像效果
    frame = cv2.flip(frame, 1)
    # frame = cv2.flip(frame, 1)
    rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

    # 处理帧
    results = hands.process(rgb_frame)
    gesture = "nohand"
    motion = "nomotion"
    confidence = 0.0

    if results.multi_hand_landmarks:
        landmarks = results.multi_hand_landmarks[0]

        # 获取食指指尖位置
        h, w, _ = frame.shape
        index_tip = landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP]
        x, y = int(index_tip.x * w), int(index_tip.y * h)

        # 更新轨迹
        if not TRAJECTORY or (x, y) != TRAJECTORY[-1]:
            TRAJECTORY.append((x, y))

            # 限制轨迹长度
            if len(TRAJECTORY) > MAX_POINTS:
                TRAJECTORY = TRAJECTORY[-MAX_POINTS:]

        # 绘制轨迹
        for i in range(1, len(TRAJECTORY)):
            color_intensity = int(255 * i / len(TRAJECTORY))
            cv2.line(frame, TRAJECTORY[i - 1], TRAJECTORY[i],
                     (0, color_intensity, 255 - color_intensity), 3)

        # 绘制当前手的位置
        cv2.circle(frame, (x, y), 10, (0, 255, 0), -1)

        motion = "moving"
        LAST_DETECTION_TIME = time.time()

        # 绘制手部关键点
        mp.solutions.drawing_utils.draw_landmarks(
            frame, landmarks, mp_hands.HAND_CONNECTIONS)
    else:
        # 没有检测到手
        if TRAJECTORY:
            # 有轨迹但手离开了，可能是手势完成
            motion = "hand_lost"
        else:
            motion = "nohand"

    # 检查是否应该进行手势识别
    current_time = time.time()
    if TRAJECTORY and current_time - LAST_DETECTION_TIME > DETECTION_TIMEOUT:
        # 检查轨迹有效性
        if len(TRAJECTORY) >= MIN_POINTS and is_valid_trajectory(TRAJECTORY):
            print(f"开始识别手势，轨迹点数: {len(TRAJECTORY)}")

            # 复制轨迹并清除
            trajectory_to_process = TRAJECTORY.copy()
            TRAJECTORY = []

            # 识别手势
            try:
                gesture, confidence = recognizer.predict(trajectory_to_process)
                print(f"识别结果: {gesture} (置信度: {confidence:.2f})")
            except Exception as e:
                print(f"识别错误: {str(e)}")
                traceback.print_exc()
                gesture = "error"
                confidence = 0.0

            motion = "completed"
        else:
            print(f"无效轨迹: 点数不足({len(TRAJECTORY)}/{MIN_POINTS})或移动距离太小")
            TRAJECTORY = []
            gesture = "invalid"
            motion = "discarded"

    # 更新状态
    CURRENT_GESTURE = gesture
    CURRENT_MOTION = motion
    LAST_RESULTS = {
        "gesture": CURRENT_GESTURE,
        "motion": CURRENT_MOTION,
        "confidence": confidence,
        "timestamp": time.time()
    }

    # 在图像上显示结果
    status_text = f"Gesture: {CURRENT_GESTURE}" + (f" ({confidence * 100:.1f}%)" if confidence > 0 else "")
    cv2.putText(frame, status_text, (10, 30),
                cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
    cv2.putText(frame, f"Motion: {CURRENT_MOTION}", (10, 70),
                cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)

    # 显示轨迹点数
    cv2.putText(frame, f"Points: {len(TRAJECTORY)}/{MIN_POINTS}", (10, 110),
                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (200, 200, 0), 2)

    return frame


def generate_frames():
    cap = cv2.VideoCapture(0)

    if not cap.isOpened():
        print("错误: 无法访问摄像头")
        # 返回黑色图像
        while True:
            black_frame = np.zeros((480, 640, 3), dtype=np.uint8)
            cv2.putText(black_frame, "Camera Error", (200, 240),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
            ret, buffer = cv2.imencode('.jpg', black_frame)
            frame_bytes = buffer.tobytes()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame_bytes + b'\r\n')
        return

    while cap.isOpened():
        success, frame = cap.read()
        if not success:
            print("错误: 无法读取摄像头帧")
            break

        # 处理帧
        processed_frame = process_frame(frame)

        # 将处理后的帧转换为JPEG格式
        ret, buffer = cv2.imencode('.jpg', processed_frame)
        if not ret:
            print("错误: 无法编码图像")
            continue

        frame_bytes = buffer.tobytes()

        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame_bytes + b'\r\n')

    cap.release()


@app.route('/')
def index():
    return render_template('dynamic_index.html')


@app.route('/video_feed')
def video_feed():
    return Response(generate_frames(),
                    mimetype='multipart/x-mixed-replace; boundary=frame')


@app.route('/get_results')
def get_results():
    return jsonify(LAST_RESULTS)


@app.route('/start_tracking')
def start_tracking():
    global TRACKING_ENABLED
    TRACKING_ENABLED = True
    TRAJECTORY = []  # 重置轨迹
    return jsonify(status="success", message="Tracking started")


@app.route('/stop_tracking')
def stop_tracking():
    global TRACKING_ENABLED
    TRACKING_ENABLED = False
    return jsonify(status="success", message="Tracking stopped")


if __name__ == '__main__':
    # 确保模型目录存在
    os.makedirs("models", exist_ok=True)

    # 打印TensorFlow版本
    print(f"TensorFlow version: {tf.__version__}")

    # 启动应用
    app.run(host='0.0.0.0', port=5000, debug=True)