# video_inference.py
import torch
import cv2
import numpy as np
import argparse
from pathlib import Path
from torch.cuda.amp import GradScaler
from PIL import Image, ImageDraw
import torchvision.transforms as T

# 导入项目模块
from model.mobilevit_model import mobile_model, mobile_backbone, mobile_neck, mobile_head
from utils.common_utils import device_setup, load_config_file
from utils.detect_utils import PostProcess_yolo_head, detect_view
from utils.checkpoint_utils import load_checkpoint

# 定义类别标签（根据您的数据集调整）
categories = {0: 'holothurian', 1: 'echinus', 2: 'scallop', 3: 'starfish', 
              4: 'fish', 5: 'corals', 6: 'diver', 7: 'cuttlefish', 8: 'turtle', 9: 'jellyfish'}

def load_model(opts, checkpoint_path):
    """加载训练好的模型"""
    device = getattr(opts, "dev_device", torch.device('cpu'))
    
    # 构建模型
    backbone = mobile_backbone(opts)
    neck = mobile_neck(opts)
    head = mobile_head(opts)
    model = mobile_model(backbone, neck, head).to(device=device)
    
    # 加载检查点
    if checkpoint_path and Path(checkpoint_path).exists():
        model, _, _, _, _, _, _ = load_checkpoint(
            opts=opts,
            model=model,
            optimizer=None,
            gradient_scalar=None,
            model_ema=None
        )
    
    model.eval()
    return model, device

def preprocess_frame(frame, input_size=(640, 640)):
    """预处理视频帧"""
    # 转换为PIL图像
    pil_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
    
    # 保存原始尺寸
    orig_w, orig_h = pil_img.size
    
    # 调整大小
    pil_img = pil_img.resize(input_size)
    
    # 转换为tensor
    transform = T.Compose([
        T.ToTensor(),
    ])
    
    img_tensor = transform(pil_img).unsqueeze(0)  # 添加batch维度
    
    return img_tensor, (orig_h, orig_w)

def postprocess_results(results, orig_size, input_size=(640, 640)):
    """后处理检测结果"""
    # 这里需要根据您的具体模型输出格式调整
    # 假设results包含检测结果
    return results

def draw_detections(frame, detections, orig_size, input_size=(640, 640)):
    """在帧上绘制检测结果"""
    frame_copy = frame.copy()
    h_ratio = orig_size[0] / input_size[0]
    w_ratio = orig_size[1] / input_size[1]
    
    for detection in detections:
        # 提取边界框坐标
        x1, y1, x2, y2 = detection['bbox']
        score = detection['score']
        class_id = detection['class_id']
        
        # 调整坐标到原始尺寸
        x1 = int(x1 * w_ratio)
        y1 = int(y1 * h_ratio)
        x2 = int(x2 * w_ratio)
        y2 = int(y2 * h_ratio)
        
        # 绘制边界框
        cv2.rectangle(frame_copy, (x1, y1), (x2, y2), (0, 255, 0), 2)
        
        # 绘制标签
        label = f"{categories.get(class_id, 'Unknown')}: {score:.2f}"
        cv2.putText(frame_copy, label, (x1, y1 - 10), 
                   cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
    
    return frame_copy

def run_video_inference(opts, video_path, checkpoint_path, output_path=None):
    """运行视频推理"""
    # 加载模型
    model, device = load_model(opts, checkpoint_path)
    
    # 打开视频文件或摄像头
    if video_path == '0':
        cap = cv2.VideoCapture(0)  # 使用摄像头
    else:
        cap = cv2.VideoCapture(video_path)  # 使用视频文件
    
    if not cap.isOpened():
        print(f"Error: Cannot open video {video_path}")
        return
    
    # 获取视频属性
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    
    # 初始化视频写入器（如果需要保存结果）
    video_writer = None
    if output_path:
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        video_writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
    
    print(f"Processing video: {width}x{height} @ {fps} FPS")
    
    frame_count = 0
    while True:
        ret, frame = cap.read()
        if not ret:
            break
        
        frame_count += 1
        print(f"Processing frame {frame_count}")
        
        # 预处理帧
        input_tensor, orig_size = preprocess_frame(frame)
        input_tensor = input_tensor.to(device)
        
        # 模型推理
        with torch.no_grad():
            backbone_features = model.backbone(input_tensor)
            neck_output = model.neck(backbone_features)
            
            # 获取检测头输出
            if 'detect_x_small' in neck_output:
                detect_features = [
                    neck_output['detect_x_small'],
                    neck_output['detect_small'], 
                    neck_output['detect_medium'],
                    neck_output['detect_large']
                ]
                
                # 运行检测头
                detection_output, _ = model.head.Detect_head(detect_features)
                
                # 后处理检测结果
                target_sizes = torch.tensor([[orig_size[0], orig_size[1]]])
                temp_sizes = torch.tensor([[640, 640]])  # 输入尺寸
                
                results = PostProcess_yolo_head(
                    detection_output, 
                    max_det=100,
                    temp_sizes=temp_sizes,
                    target_sizes=target_sizes,
                    is_plotting=True,
                    apply_nms=True
                )
                
                # 可视化结果
                if results and len(results) > 0:
                    result = results[0]  # 第一张图片的结果
                    boxes = result['boxes'].cpu().numpy()
                    scores = result['scores'].cpu().numpy()
                    labels = result['labels'].cpu().numpy()
                    
                    # 在帧上绘制检测结果
                    annotated_frame = frame.copy()
                    for box, score, label in zip(boxes, scores, labels):
                        if score > 0.5:  # 置信度阈值
                            x1, y1, x2, y2 = box.astype(int)
                            # 确保坐标在图像范围内
                            x1 = max(0, min(x1, width-1))
                            y1 = max(0, min(y1, height-1))
                            x2 = max(0, min(x2, width-1))
                            y2 = max(0, min(y2, height-1))
                            
                            # 绘制边界框
                            cv2.rectangle(annotated_frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
                            
                            # 绘制标签
                            class_name = categories.get(int(label), f"Class {int(label)}")
                            label_text = f"{class_name}: {score:.2f}"
                            cv2.putText(annotated_frame, label_text, (x1, y1-10),
                                       cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                else:
                    annotated_frame = frame
            else:
                annotated_frame = frame
        
        # 显示结果
        cv2.imshow('Video Inference', annotated_frame)
        
        # 保存帧（如果需要）
        if video_writer:
            video_writer.write(annotated_frame)
        
        # 按'q'键退出
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    
    # 释放资源
    cap.release()
    if video_writer:
        video_writer.release()
    cv2.destroyAllWindows()
    print(f"Processed {frame_count} frames")

def main():
    parser = argparse.ArgumentParser(description='Video inference with MobileViT model')
    parser.add_argument('--common_config_file', type=str,
                        default="./mobilevitv3_small_multiserver.yaml",
                        help="Configuration file")
    parser.add_argument('--video_path', type=str, required=True,
                        help="Path to input video file or '0' for camera")
    parser.add_argument('--checkpoint', type=str, default="./runs/train/exp1/weights/checkpoint648.pt",
                        help="Path to model checkpoint")
    parser.add_argument('--output', type=str, default=None,
                        help="Path to output video file (optional)")
    
    opts = parser.parse_args()
    opts = load_config_file(opts)
    opts = device_setup(opts)
    
    # 运行视频推理
    run_video_inference(opts, opts.video_path, opts.checkpoint, opts.output)

if __name__ == '__main__':
    main()