#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
H.265视频抽帧并生成YOLO格式数据集 + 自动标注
完整流水线：视频抽帧 -> 自动标注 -> 生成数据集配置
"""

import sys
import os
from pathlib import Path
import subprocess
import random
import shutil
import time
import datetime
import tkinter as tk
from tkinter import filedialog, messagebox


# Remove current directory from Python path to avoid local module conflicts
current_dir = str(Path(__file__).parent.absolute())
if current_dir in sys.path:
    sys.path.remove(current_dir)
if '' in sys.path:
    sys.path.remove('')

# Now import ultralytics from the installed package
import cv2
from ultralytics import YOLO

def select_video_file():
    """打开文件选择对话框选择视频文件"""
    # 创建隐藏的根窗口
    root = tk.Tk()
    root.withdraw()  # 隐藏主窗口
    
    # 设置文件选择对话框
    file_path = filedialog.askopenfilename(
        title="选择视频文件",
        filetypes=[
            ("视频文件", "*.mp4 *.avi *.mov *.mkv *.flv *.wmv *.m4v"),
            ("MP4文件", "*.mp4"),
            ("AVI文件", "*.avi"),
            ("所有文件", "*.*")
        ],
        initialdir="D:/"  # 默认打开D盘
    )
    
    # 销毁根窗口
    root.destroy()
    
    if file_path:
        return Path(file_path)
    else:
        return None

def process_h265_to_yolo_hardcoded(input_folder=None, output_root=None):
    """
    使用硬编码参数处理H.265视频并生成YOLO格式数据集
    """
    
    # === 硬编码参数配置 ===
    SCRIPT_DIR = Path(__file__).parent.absolute()           # 脚本所在目录

    
    if input_folder is None:
        # 让用户选择视频文件
        print("请选择要处理的视频文件...")
        INPUT_VIDEO = select_video_file()
    else:
        INPUT_VIDEO = Path(input_folder)
    
    if INPUT_VIDEO is None:
        print("❌ 未选择视频文件，程序退出")
        return False
    
    print(f"✓ 已选择视频文件: {INPUT_VIDEO}")
    
    OUTPUT_ROOT = Path(output_root) if output_root else Path('D:/yolo_dataset')                  # YOLO数据集根目录

    # 清理或创建输出根目录
    if OUTPUT_ROOT.exists():
        print(f"清理输出目录: {OUTPUT_ROOT}")
        for item in OUTPUT_ROOT.iterdir():
            if item.is_dir():
                try:
                    shutil.rmtree(item)
                    print(f"  删除文件夹: {item.name}")
                except Exception as e:
                    print(f"  删除文件夹失败 {item.name}: {e}")
        print("✓ 输出目录清理完成")
    else:
        OUTPUT_ROOT.mkdir(parents=True, exist_ok=True)
        print(f"创建输出目录: {OUTPUT_ROOT}")
    
    # 抽帧参数
    FRAME_RATE = 2              # 抽帧频率（FPS）- 每秒抽2帧
    QUALITY = 2                 # JPEG质量（1-31，数值越小质量越高）
    MAX_FRAMES = 1000           # 最大抽帧数量（防止过多）
    
    # 图像处理参数
    RESIZE_WIDTH = 1280         # 重置宽度（保持比例，0表示不重置）
    START_TIME = 0              # 开始时间（秒）
    DURATION = 0                # 处理时长（秒，0表示处理完整视频）
    
    # ========================
    
    print("=" * 60)
    print("H.265视频抽帧工具（硬编码参数版）")
    print("=" * 60)
    print(f"输入视频: {INPUT_VIDEO}")
    print(f"输出目录: {OUTPUT_ROOT}")
    print(f"抽帧参数: {FRAME_RATE} FPS, 质量等级 {QUALITY}")
    print(f"数据集输出: 全部保存到 train 文件夹")
    print("=" * 60)
    
    # 检查输入文件
    if not INPUT_VIDEO.exists():
        print(f"错误: 找不到视频文件 {INPUT_VIDEO}")
        return False
    
    # 创建YOLO目录结构（只创建train文件夹）
    print("创建YOLO目录结构...")
    img_dir = OUTPUT_ROOT / 'images' / 'train'
    label_dir = OUTPUT_ROOT / 'labels' / 'train'
    img_dir.mkdir(parents=True, exist_ok=True)
    label_dir.mkdir(parents=True, exist_ok=True)
    print(f"  创建: {img_dir}")
    print(f"  创建: {label_dir}")
    
    # 检查FFmpeg是否可用
    try:
        subprocess.run(['ffmpeg', '-version'], capture_output=True, check=True)
        print("✓ FFmpeg 可用")
    except (subprocess.CalledProcessError, FileNotFoundError):
        print("✗ 错误: 找不到FFmpeg")
        print("请先安装FFmpeg: conda install ffmpeg")
        return False
    
    print("\n开始使用FFmpeg抽帧...")
    
    # 构建FFmpeg命令
    cmd = ['ffmpeg', '-y']  # -y 覆盖输出文件
    
    # 添加时间控制参数
    if START_TIME > 0:
        cmd.extend(['-ss', str(START_TIME)])
    if DURATION > 0:
        cmd.extend(['-t', str(DURATION)])
    
    cmd.extend(['-i', str(INPUT_VIDEO)])  # 输入文件
    
    # 添加抽帧参数
    cmd.extend(['-vf', f'fps={FRAME_RATE}'])  # 抽帧频率
    cmd.extend(['-q:v', str(QUALITY)])        # 图片质量
    
    # 图像尺寸调整
    if RESIZE_WIDTH > 0:
        cmd.extend(['-vf', f'fps={FRAME_RATE},scale={RESIZE_WIDTH}:-1'])
    
    # 输出设置
    temp_output = OUTPUT_ROOT / 'images' / 'temp_frames'
    temp_output.mkdir(exist_ok=True)
    cmd.append(str(temp_output / 'frame_%06d.jpg'))
    
    print(f"执行命令: {' '.join(cmd)}")
    
    try:
        result = subprocess.run(cmd, capture_output=True, text=True)
        
        if result.returncode != 0:
            print(f"FFmpeg错误: {result.stderr}")
            return False
            
        print("✓ FFmpeg抽帧完成!")
        
    except Exception as e:
        print(f"FFmpeg执行失败: {e}")
        return False
    
    # 获取所有抽帧的图片
    temp_frames = list(temp_output.glob('frame_*.jpg'))
    actual_frames = min(len(temp_frames), MAX_FRAMES)
    
    print(f"共抽到 {len(temp_frames)} 帧，实际使用 {actual_frames} 帧")
    
    if actual_frames == 0:
        print("没有抽到任何帧")
        return False
    
    # 限制最大帧数
    if len(temp_frames) > MAX_FRAMES:
        temp_frames = temp_frames[:MAX_FRAMES]
        print(f"限制帧数至 {MAX_FRAMES} 帧")
    
    print(f"\n数据集分配:")
    print(f"  - 训练集: {actual_frames} 帧（全部）")

    # 生成文件名的辅助函数
    def generate_filename():
        """生成时间戳+三位随机数的文件名"""
        # 获取当前时间戳（精确到毫秒）
        timestamp = int(time.time() * 1000)
        # 生成三位随机数
        random_num = random.randint(100, 999)
        # 组合成文件名
        return f"{timestamp}_{random_num}"
    
    # 用于确保文件名唯一性的集合
    used_filenames = set()

    for i, frame_path in enumerate(temp_frames):
        if i >= actual_frames:
            break
        
        # 生成唯一的文件名
        while True:
            new_name = generate_filename()
            if new_name not in used_filenames:
                used_filenames.add(new_name)
                break
            # 如果重复，稍微等待一下再生成
            time.sleep(0.001)
        
        # 移动图片到train文件夹
        target_img = OUTPUT_ROOT / 'images' / 'train' / f"{new_name}.jpg"
        shutil.move(str(frame_path), str(target_img))
        
        # 显示进度
        if i % 50 == 0 or i == actual_frames - 1:
            print(f"处理进度: {i+1}/{actual_frames} ({(i+1)/actual_frames*100:.1f}%) - 当前文件: {new_name}")
    
    # 清理临时目录
    try:
        temp_output.rmdir()
    except:
        pass
    
    print(f"\n🎉 视频抽帧完成!")
    print(f"📁 YOLO数据集位置: {OUTPUT_ROOT}")
    print(f"📊 总帧数: {actual_frames}")
    print(f"📝 文件命名格式: [时间戳毫秒]_[三位随机数].jpg/.txt")
    
    # 显示统计信息
    img_count = len(list((OUTPUT_ROOT / 'images' / 'train').glob("*.jpg")))
    print(f"   train: {img_count} 图片")
    
    # 显示几个文件名示例
    train_imgs = list((OUTPUT_ROOT / 'images' / 'train').glob("*.jpg"))
    if train_imgs:
        print(f"\n📋 文件名示例:")
        for i, img_path in enumerate(train_imgs[:3]):
            print(f"   {img_path.name}")
    
    return True

# ========== 自动标注配置 ==========
MODEL_PATH = Path('D:/pad230.pt')           # pad152.pt model path
IMAGE_FOLDER = Path('D:/yolo_dataset/images/train')    # Input image folder
LABEL_OUTPUT_DIR = Path('D:/yolo_dataset/labels/train')

# Detection parameters
CONFIDENCE_THRESHOLD = 0.3             # Confidence threshold
MIN_AREA_RATIO = 0.001                 # Minimum area ratio (0.1%)
MAX_AREA_RATIO = 0.95                  # Maximum area ratio (95%)

# Class mapping - pad152.pt model classes
CLASS_MAPPING = {
    0: "durian",    # Durian
    1: "person"     # Person
}

# Output class IDs (YOLO format)
OUTPUT_CLASS_IDS = {
    "durian": 0,
    "person": 1
}
# ==========================

def load_model(model_path):
    """Load pad152.pt model"""
    try:
        model = YOLO(model_path)
        print(f"✓ Successfully loaded pad152.pt model: {model_path}")
        
        # Show model info
        model_info = model.info()
        print(f"✓ Model classes count: {len(model.names)}")
        print(f"✓ Model classes: {list(model.names.values())}")
        
        return model
    except Exception as e:
        print(f"✗ Model loading failed: {e}")
        return None

def is_valid_detection(x1, y1, x2, y2, img_width, img_height, min_area_ratio, max_area_ratio):
    """Check if detection box is valid"""
    # Calculate box area ratio
    box_area = (x2 - x1) * (y2 - y1)
    img_area = img_width * img_height
    area_ratio = box_area / img_area
    
    # Check area ratio
    if area_ratio < min_area_ratio or area_ratio > max_area_ratio:
        return False
    
    # Check box dimensions (avoid too small or too large boxes)
    box_width = x2 - x1
    box_height = y2 - y1
    
    # Minimum size check (at least 10 pixels)
    if box_width < 10 or box_height < 10:
        return False
    
    # Aspect ratio check (avoid extreme aspect ratios)
    aspect_ratio = box_width / box_height
    if aspect_ratio > 10 or aspect_ratio < 0.1:
        return False
    
    return True

def process_images(model, image_folder, label_output_dir, confidence_threshold, min_area_ratio, max_area_ratio):
    """Batch process images and generate YOLO annotation files"""
    image_folder = Path(image_folder)
    label_output_dir = Path(label_output_dir)

    if not image_folder.exists():
        print(f"✗ Image directory does not exist: {image_folder}")
        return

    if not label_output_dir.exists():
        print(f"✗ Label output directory does not exist: {label_output_dir}")
        return

    # Supported image extensions
    image_extensions = ('.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif')
    image_files = [f for f in image_folder.iterdir() if f.suffix.lower() in image_extensions]

    if not image_files:
        print("✗ No supported image files found")
        return

    print(f"✓ Starting to process {len(image_files)} images...")
    print(f"✓ Detection parameters:")
    print(f"   - Confidence threshold: {confidence_threshold}")
    print(f"   - Minimum area ratio: {min_area_ratio}")
    print(f"   - Maximum area ratio: {max_area_ratio}")
    print(f"   - Supported classes: {list(CLASS_MAPPING.values())}")

    total_detections = 0
    processed_count = 0
    skipped_count = 0

    for i, img_path in enumerate(image_files):
        try:
            # Load image to get dimensions
            img = cv2.imread(str(img_path))
            if img is None:
                print(f"✗ Cannot load image: {img_path}")
                skipped_count += 1
                continue

            img_height, img_width = img.shape[:2]

            # YOLO inference
            results = model(str(img_path), verbose=False)

            if results[0].boxes is None:
                # No objects detected, create empty annotation file
                txt_name = img_path.stem + ".txt"
                txt_path = label_output_dir / txt_name
                with open(txt_path, 'w') as f:
                    pass  # Create empty file
                processed_count += 1
                continue

            # Prepare YOLO annotation content
            yolo_lines = []
            valid_detections = 0

            boxes = results[0].boxes
            for j in range(len(boxes)):
                cls = int(boxes.cls[j])
                
                # Check if class is in our supported range
                if cls not in CLASS_MAPPING:
                    continue

                conf = float(boxes.conf[j])
                if conf < confidence_threshold:
                    continue

                x1, y1, x2, y2 = map(float, boxes.xyxy[j].tolist())
                x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)

                # Ensure coordinates are within image bounds
                x1 = max(0, min(x1, img_width))
                y1 = max(0, min(y1, img_height))
                x2 = max(0, min(x2, img_width))
                y2 = max(0, min(y2, img_height))

                # Check if detection box is valid
                if not is_valid_detection(x1, y1, x2, y2, img_width, img_height, min_area_ratio, max_area_ratio):
                    continue

                # Convert to YOLO format (normalized coordinates)
                x_center = ((x1 + x2) / 2) / img_width
                y_center = ((y1 + y2) / 2) / img_height
                width = (x2 - x1) / img_width
                height = (y2 - y1) / img_height

                # Get class name and output ID
                class_name = CLASS_MAPPING[cls]
                output_class_id = OUTPUT_CLASS_IDS[class_name]

                yolo_lines.append(f"{output_class_id} {x_center:.6f} {y_center:.6f} {width:.6f} {height:.6f}\n")
                valid_detections += 1

            # Write annotation file
            txt_name = img_path.stem + ".txt"
            txt_path = label_output_dir / txt_name

            with open(txt_path, 'w') as f:
                f.writelines(yolo_lines)

            total_detections += valid_detections
            processed_count += 1

            # Show progress
            if i % 50 == 0 or i == len(image_files) - 1:
                print(f"✓ Processed: {i+1}/{len(image_files)} - Current image detected {valid_detections} objects")

        except Exception as e:
            print(f"✗ Processing failed {img_path}: {e}")
            skipped_count += 1

    print(f"\n✓ Auto-labeling completed!")
    print(f"✓ Statistics:")
    print(f"   - Successfully processed: {processed_count} images")
    print(f"   - Skipped: {skipped_count} images")
    print(f"   - Total detections: {total_detections} objects")
    print(f"   - Average per image: {total_detections/max(processed_count, 1):.2f} objects")

def create_dataset_yaml(label_output_dir):
    """Create dataset YAML configuration file"""
    yaml_content = f"""# Pad152 Auto-Generated Dataset Configuration
# Generated automatically by auto_labeling_pad152.py

path: {Path(label_output_dir).parent.parent}  # dataset root dir
train: images/train  # train images (relative to 'path')
val: images/train    # val images (relative to 'path') 
test:  # test images (optional)

# Classes
nc: 2  # number of classes
names: ['durian', 'person']  # class names
"""
    
    yaml_path = Path(label_output_dir).parent.parent / "dataset.yaml"
    with open(yaml_path, 'w', encoding='utf-8') as f:
        f.write(yaml_content)
    
    print(f"✓ Dataset configuration file created: {yaml_path}")

def auto_labeling():
    """执行自动标注流程"""
    print("\n" + "=" * 60)
    print("开始自动标注流程")
    print("=" * 60)
    print(f"✓ Image directory: {IMAGE_FOLDER}")
    print(f"✓ Label output directory: {LABEL_OUTPUT_DIR}")
    print(f"✓ Model path: {MODEL_PATH}")
    print("=" * 60)

    # Load model
    model = load_model(MODEL_PATH)
    if model is None:
        return False

    # Process images and generate annotations
    process_images(model, IMAGE_FOLDER, LABEL_OUTPUT_DIR,
                   CONFIDENCE_THRESHOLD, MIN_AREA_RATIO, MAX_AREA_RATIO)

    # Create dataset configuration file
    create_dataset_yaml(LABEL_OUTPUT_DIR)

    print(f"\n✓ Auto-labeling completed!")
    return True

def main(input_folder=None, output_root=None, output_root3=None):
    """主函数：执行完整的视频处理到自动标注流水线"""
    print("🚀 开始执行完整流水线：视频抽帧 -> 自动标注")
    print("=" * 80)
    
    # 第一步：视频抽帧
    print("\n📹 第一步：视频抽帧")
    print("-" * 40)
    step1_success = process_h265_to_yolo_hardcoded(input_folder=input_folder, output_root=output_root)
    
    if not step1_success:
        print("❌ 第一步失败，停止执行")
        return
    
    # 第二步：自动标注
    print("\n🏷️ 第二步：自动标注")
    print("-" * 40)
    step2_success = auto_labeling()
    
    if not step2_success:
        print("❌ 第二步失败")
        return
    
    # 完成统计
    print("\n" + "=" * 80)
    print("🎉 完整流水线执行成功！")
    print("=" * 80)
    
    # 显示最终统计信息
    OUTPUT_ROOT = Path('D:/yolo_dataset')
    img_count = len(list((OUTPUT_ROOT / 'images' / 'train').glob("*.jpg")))
    label_count = len(list((OUTPUT_ROOT / 'labels' / 'train').glob("*.txt")))


    if output_root3:
        output_root3_path = Path(output_root3)
        if not output_root3_path.exists():
            print(f"📁 创建 output_root3 文件夹: {output_root3_path}")
            output_root3_path.mkdir(parents=True, exist_ok=True)

        (old_img_dir := output_root3_path / 'images' / 'train').mkdir(parents=True, exist_ok=True)
        (old_label_dir := output_root3_path / 'labels' / 'train').mkdir(parents=True, exist_ok=True)
		
        merge_datasets(Path(output_root3), str(OUTPUT_ROOT))
    
    print(f"📊 最终统计:")
    print(f"   - 图片数量: {img_count}")
    print(f"   - 标签文件: {label_count}")
    print(f"   - 数据集位置: {OUTPUT_ROOT}")
    print(f"   - 配置文件: {OUTPUT_ROOT}/dataset.yaml")
    
    print(f"\n✅ 使用说明:")
    print(f"1. YOLO数据集已生成在 {OUTPUT_ROOT}")
    print(f"2. 可在 labels/train/ 目录检查自动生成的标注")
    print(f"3. 使用 labelImg 等工具进行标注校正")
    print(f"4. 使用 dataset.yaml 进行模型训练")



def merge_datasets(old_data_path, new_data_path):
    """合并新旧数据集"""
    
    # 创建输出目录
    # output_img_dir = Path(output_path) / 'images' / 'train'
    # output_label_dir = Path(output_path) / 'labels' / 'train'
    # output_img_dir.mkdir(parents=True, exist_ok=True)
    # output_label_dir.mkdir(parents=True, exist_ok=True)
    
    # # 复制旧数据
    old_img_dir = Path(old_data_path) / 'images' / 'train'
    old_label_dir = Path(old_data_path) / 'labels' / 'train'
    
    # if old_img_dir.exists():
    #     for img_file in old_img_dir.glob('*.jpg'):
    #         shutil.copy2(img_file, output_img_dir)
    #         label_file = old_label_dir / f"{img_file.stem}.txt"
    #         if label_file.exists():
    #             shutil.copy2(label_file, output_label_dir)
    
    # 复制新数据
    new_img_dir = Path(new_data_path) / 'images' / 'train'
    new_label_dir = Path(new_data_path) / 'labels' / 'train'
    
    if new_img_dir.exists():
        for img_file in new_img_dir.glob('*.jpg'):
            shutil.copy2(img_file, old_img_dir)
            label_file = new_label_dir / f"{img_file.stem}.txt"
            if label_file.exists():
                shutil.copy2(label_file, old_label_dir)
    
    print(f"数据集合并完成，共 {len(list(old_img_dir.glob('*.jpg')))} 张图片")




if __name__ == "__main__":
    main()