

import os
import sys
import glob
from collections import deque




import cv2
import numpy as np
import torch

sys.path.append('./')

from models.model_utils import create_model, load_pretrained_model
from config.config import parse_configs
from utils.post_processing import post_processing
from utils.misc import time_synchronized


def demo_images(configs):
    """Process a sequence of images and output segmentation masks with ball positions"""
    
    # 修复GPU设备设置
    if configs.gpu_idx is None:
        configs.gpu_idx = 0
    
    if torch.cuda.is_available():
        configs.device = torch.device('cuda:{}'.format(configs.gpu_idx))
    else:
        configs.device = torch.device('cpu')
        print("CUDA not available, using CPU")

    # 创建输出目录
    if not os.path.exists(configs.save_demo_dir):
        os.makedirs(configs.save_demo_dir)
    
    configs.frame_dir = os.path.join(configs.save_demo_dir, 'processed_frames')
    configs.mask_dir = os.path.join(configs.save_demo_dir, 'masks')
    if not os.path.exists(configs.frame_dir):
        os.makedirs(configs.frame_dir)
    if not os.path.exists(configs.mask_dir):
        os.makedirs(configs.mask_dir)

    # 加载模型
    model = create_model(configs)
    model.to(configs.device)

    assert configs.pretrained_path is not None, "Need to load the pre-trained model"
    model = load_pretrained_model(model, configs.pretrained_path, configs.gpu_idx, configs.overwrite_global_2_local)
    model.eval()

    # 获取图片列表
    image_extensions = ['*.jpg', '*.jpeg', '*.png', '*.bmp']
    image_paths = []
    for ext in image_extensions:
        image_paths.extend(glob.glob(os.path.join(configs.image_dir, ext)))
        image_paths.extend(glob.glob(os.path.join(configs.image_dir, ext.upper())))
    
    image_paths.sort()
    print(f"Found {len(image_paths)} images to process")

    # 设置参数
    middle_idx = int(configs.num_frames_sequence / 2)
    queue_frames = deque(maxlen=configs.num_frames_sequence)
    w_original, h_original = configs.original_size if hasattr(configs, 'original_size') else (1920, 1080)
    w_resize, h_resize = configs.input_size
    w_ratio = w_original / w_resize
    h_ratio = h_original / h_resize

    processed_count = 0

    with torch.no_grad():
        for img_idx, img_path in enumerate(image_paths):
            # 读取和预处理图片
            original_img = cv2.imread(img_path)
            if original_img is None:
                print(f"Warning: Could not read image {img_path}")
                continue
                
            original_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2RGB)
            original_img = cv2.resize(original_img, (w_original, h_original))
            
            # 调整图片大小用于模型输入
            resized_img = cv2.resize(original_img, (w_resize, h_resize))
            resized_img = resized_img.transpose(2, 0, 1)  # HWC to CHW
            
            # 添加到队列
            queue_frames.append(resized_img)
            
            # 当队列满了之后开始处理
            if len(queue_frames) == configs.num_frames_sequence:
                # 准备模型输入
                input_sequence = np.concatenate(list(queue_frames), axis=0)  # 连接所有帧
                input_tensor = torch.from_numpy(input_sequence).to(configs.device, non_blocking=True).float().unsqueeze(0)
                
                # 模型推理
                t1 = time_synchronized()
                pred_ball_global, pred_ball_local, pred_events, pred_seg = model.run_demo(input_tensor)
                t2 = time_synchronized()
                
                # 后处理
                prediction_global, prediction_local, prediction_seg, prediction_events = post_processing(
                    pred_ball_global, pred_ball_local, pred_events, pred_seg, configs.input_size[0],
                    configs.thresh_ball_pos_mask, configs.seg_thresh, configs.event_thresh)
                
                # 计算球的最终位置
                prediction_ball_final = [
                    int(prediction_global[0] * w_ratio + prediction_local[0] - w_resize / 2),
                    int(prediction_global[1] * h_ratio + prediction_local[1] - h_resize / 2)
                ]
                
                # 处理分割掩码
                seg_mask = prediction_seg.astype(np.uint8)
                seg_mask_resized = cv2.resize(seg_mask, (w_original, h_original))
                
                # 创建彩色掩码
                colored_mask = create_colored_mask(seg_mask_resized)
                
                # 绘制结果
                result_img = plot_detection_with_mask(original_img, prediction_ball_final, colored_mask, prediction_events)
                
                # 保存结果
                output_filename = f"processed_{processed_count:06d}.jpg"
                mask_filename = f"mask_{processed_count:06d}.jpg"
                
                # 保存处理后的图片
                result_img_bgr = cv2.cvtColor(result_img, cv2.COLOR_RGB2BGR)
                cv2.imwrite(os.path.join(configs.frame_dir, output_filename), result_img_bgr)
                
                # 保存掩码
                cv2.imwrite(os.path.join(configs.mask_dir, mask_filename), colored_mask)
                
                # 显示结果（可选）
                if hasattr(configs, 'show_image') and configs.show_image:
                    cv2.imshow('Processed Image', result_img_bgr)
                    cv2.imshow('Segmentation Mask', colored_mask)
                    cv2.waitKey(1)
                
                processed_count += 1
                print(f'Processed frame {processed_count} - time {t2 - t1:.3f}s - Ball position: {prediction_ball_final}')

    print(f"Processing completed. {processed_count} images processed.")
    print(f"Results saved to: {configs.frame_dir}")
    print(f"Masks saved to: {configs.mask_dir}")


def create_colored_mask(mask):
    """创建彩色分割掩码"""
    colored_mask = np.zeros((mask.shape[0], mask.shape[1], 3), dtype=np.uint8)
    
    # 为不同的分割区域分配不同颜色
    # 假设0是背景，1是桌子，2是球网等
    colors = [
        [0, 0, 0],       # 背景 - 黑色
        [0, 255, 0],     # 桌子 - 绿色
        [255, 0, 0],     # 球网 - 红色
        [0, 0, 255],     # 其他 - 蓝色
    ]
    
    for i, color in enumerate(colors):
        mask_indices = mask == i
        if np.any(mask_indices):
            colored_mask[mask_indices, 0] = color[0]  # R
            colored_mask[mask_indices, 1] = color[1]  # G
            colored_mask[mask_indices, 2] = color[2]  # B
    
    return colored_mask


def plot_detection_with_mask(img, ball_pos, mask, events):
    """在图像上显示预测信息和掩码"""
    # 将掩码叠加到原图上
    result_img = img.copy()
    
    # 叠加分割掩码
    mask_overlay = cv2.addWeighted(result_img, 0.7, mask, 0.3, 0)
    
    # 绘制球的位置
    if ball_pos[0] > 0 and ball_pos[1] > 0:  # 确保球位置有效
        cv2.circle(mask_overlay, tuple(ball_pos), 8, (255, 255, 0), -1)  # 黄色圆圈
        cv2.circle(mask_overlay, tuple(ball_pos), 12, (0, 0, 0), 2)     # 黑色边框
    
    # 添加事件信息
    event_text = f'Bounce: {events[0]:.2f}, Net: {events[1]:.2f}'
    cv2.putText(mask_overlay, event_text, (30, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2, cv2.LINE_AA)
    
    # 添加球位置信息
    pos_text = f'Ball: ({ball_pos[0]}, {ball_pos[1]})'
    cv2.putText(mask_overlay, pos_text, (30, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2, cv2.LINE_AA)
    
    return mask_overlay


def plot_detection(img, ball_pos, seg_img, events):
    """Show the predicted information in the image"""
    img = cv2.addWeighted(img, 1., seg_img * 255, 0.3, 0)
    img = cv2.circle(img, tuple(ball_pos), 5, (255, 0, 255), -1)
    event_name = 'is bounce: {:.2f}, is net: {:.2f}'.format(events[0], events[1])
    img = cv2.putText(img, event_name, (100, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)

    return img


if __name__ == '__main__':
    configs = parse_configs()
    
    # 添加新的配置参数
    if not hasattr(configs, 'image_dir'):
        configs.image_dir = './input_images'  # 输入图片目录
    if not hasattr(configs, 'original_size'):
        configs.original_size = (1920, 1080)  # 原始图片尺寸
    if not hasattr(configs, 'save_demo_dir'):
        configs.save_demo_dir = './output_demo'  # 输出目录
    if not hasattr(configs, 'show_image'):
        configs.show_image = True  # 是否显示图片
    
    demo_images(configs=configs)