#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
H.265视频抽帧并生成YOLO格式数据集 + 自动标注
完整流水线：视频抽帧 -> 自动标注 -> 生成数据集配置
"""

import sys
from pathlib import Path
import subprocess
import random
import shutil
import time
import tkinter as tk
from tkinter import filedialog, messagebox
import os # Added for os.chdir


# Remove current directory from Python path to avoid local module conflicts
current_dir = str(Path(__file__).parent.absolute())
if current_dir in sys.path:
	sys.path.remove(current_dir)
if '' in sys.path:
	sys.path.remove('')

# Now import ultralytics from the installed package
import cv2

# 保存所有原始的 OpenCV 函数
original_cv2_functions = {}
for func_name in ['imread', 'imdecode', 'resize', 'cvtColor', 'imwrite', 'imshow']:
    if hasattr(cv2, func_name):
        original_cv2_functions[func_name] = getattr(cv2, func_name)

# 恢复原始的 OpenCV 函数，避免 ultralytics 补丁的影响
import numpy as np
def safe_imread(filename, flags=cv2.IMREAD_COLOR):
	"""安全的图片读取函数"""
	try:
		return original_cv2_functions['imdecode'](np.fromfile(filename, np.uint8), flags)
	except Exception:
		return None

def safe_imdecode(buf, flags):
	"""安全的图片解码函数"""
	try:
		return original_cv2_functions['imdecode'](buf, flags)
	except Exception:
		return None

def safe_resize(src, dsize, dst=None, fx=0, fy=0, interpolation=cv2.INTER_LINEAR):
	"""安全的图片缩放函数"""
	try:
		return original_cv2_functions['resize'](src, dsize, dst, fx, fy, interpolation)
	except Exception:
		return None

def safe_cvtColor(src, code, dst=None, dstCn=0):
	"""安全的颜色转换函数"""
	try:
		return original_cv2_functions['cvtColor'](src, code, dst, dstCn)
	except Exception:
		return None

# 现在导入 ultralytics
from ultralytics import YOLO

# 导入后立即恢复所有原始函数
for func_name, original_func in original_cv2_functions.items():
    setattr(cv2, func_name, original_func)

# 如果某些函数被补丁了，重新应用我们的安全版本
cv2.imread = safe_imread
cv2.imdecode = safe_imdecode
cv2.resize = safe_resize
cv2.cvtColor = safe_cvtColor

# ========== 自动标注配置 ==========
MODEL_PATH = Path('D:/pad230.pt')           # pad152.pt model path
IMAGE_FOLDER = Path('D:/yolo_dataset/images/train')    # Input image folder
LABEL_OUTPUT_DIR = Path('D:/yolo_dataset/labels/train')

# Detection parameters
CONFIDENCE_THRESHOLD = 0.3             # Confidence threshold
MIN_AREA_RATIO = 0.001                 # Minimum area ratio (0.1%)
MAX_AREA_RATIO = 0.95                  # Maximum area ratio (95%)

# Class mapping - pad152.pt model classes
CLASS_MAPPING = {
	0: "durian",    # Durian
	1: "person"     # Person
}

# Output class IDs (YOLO format)
OUTPUT_CLASS_IDS = {
	"durian": 0,
	"person": 1
}
# ==========================

def select_folder():
	"""打开文件夹选择对话框选择文件夹"""
	# 创建隐藏的根窗口
	root = tk.Tk()
	root.withdraw()  # 隐藏主窗口
	
	# 设置文件夹选择对话框
	folder_path = filedialog.askdirectory(
		title="选择文件夹",
		initialdir="D:/"  # 默认打开D盘
	)
	
	# 销毁根窗口
	root.destroy()
	
	if folder_path:
		return Path(folder_path)
	else:
		return None

def is_valid_detection(x1, y1, x2, y2, img_width, img_height, min_area_ratio, max_area_ratio):
	"""Check if detection box is valid"""
	# Calculate box area ratio
	box_area = (x2 - x1) * (y2 - y1)
	img_area = img_width * img_height
	area_ratio = box_area / img_area
	
	# Check area ratio
	if area_ratio < min_area_ratio or area_ratio > max_area_ratio:
		return False
	
	# Check box dimensions (avoid too small or too large boxes)
	box_width = x2 - x1
	box_height = y2 - y1
	
	# Minimum size check (at least 10 pixels)
	if box_width < 10 or box_height < 10:
		return False
	
	# Aspect ratio check (avoid extreme aspect ratios)
	aspect_ratio = box_width / box_height
	if aspect_ratio > 10 or aspect_ratio < 0.1:
		return False
	
	return True

def is_valid_image_file(file_path):
	"""检查图片文件是否完整有效"""
	try:
		# 使用 PIL 来检查图片文件，避免 OpenCV 补丁问题
		from PIL import Image
		
		# 尝试打开图片
		with Image.open(file_path) as img:
			# 检查图片尺寸是否合理
			width, height = img.size
			if width <= 0 or height <= 0 or width > 10000 or height > 10000:
				return False
			
			# 尝试加载图片数据
			img.verify()
			return True
	except Exception:
		return False

def load_model(model_path):
	"""Load pad152.pt model"""
	try:
		model = YOLO(model_path)
		print(f"✓ Successfully loaded pad152.pt model: {model_path}")
		
		# Show model info
		model_info = model.info()
		print(f"✓ Model classes count: {len(model.names)}")
		print(f"✓ Model classes: {list(model.names.values())}")
		
		return model
	except Exception as e:
		print(f"✗ Model loading failed: {e}")
		return None

def process_images_to_yolo_hardcoded(input_folder=None, output_root=None):
	"""
	使用硬编码参数处理文件夹中的图片并生成YOLO格式数据集
	可选参数：
	- input_folder: 输入文件夹路径（传入则不弹文件夹对话框）
	- output_root: 输出根目录（传入则覆盖默认 D:/yolo_dataset）
	"""
	# === 硬编码参数配置 ===
	SCRIPT_DIR = Path(__file__).parent.absolute()           # 脚本所在目录
	
	# 让用户选择文件夹（若未传入）
	if input_folder is None:
		print("请选择要处理的图片文件夹...")
		INPUT_FOLDER = select_folder()
		if INPUT_FOLDER is None:
			print("❌ 未选择文件夹，程序退出")
			return False
	else:
		INPUT_FOLDER = Path(input_folder)
	
	print(f"✓ 已选择文件夹: {INPUT_FOLDER}")
	
	# 输出根目录（可由外部传入）
	OUTPUT_ROOT = Path(output_root) if output_root else Path('D:/yolo_dataset')

	# 清理或创建输出根目录
	if OUTPUT_ROOT.exists():
		print(f"清理输出目录: {OUTPUT_ROOT}")
		for item in OUTPUT_ROOT.iterdir():
			if item.is_dir():
				try:
					shutil.rmtree(item)
					print(f"  删除文件夹: {item.name}")
				except Exception as e:
					print(f"  删除文件夹失败 {item.name}: {e}")
		print("✓ 输出目录清理完成")
	else:
		OUTPUT_ROOT.mkdir(parents=True, exist_ok=True)
		print(f"创建输出目录: {OUTPUT_ROOT}")
	
	# ========================
	
	print("=" * 60)
	print("图片处理工具（硬编码参数版）")
	print("=" * 60)
	print(f"输入文件夹: {INPUT_FOLDER}")
	print(f"输出目录: {OUTPUT_ROOT}")
	print(f"处理内容: 复制 jpg 图片到 train 文件夹")
	print("=" * 60)
	
	# 检查输入文件夹
	if not INPUT_FOLDER.exists():
		print(f"错误: 找不到文件夹 {INPUT_FOLDER}")
		return False
	
	# 创建YOLO目录结构（只创建train文件夹）
	print("创建YOLO目录结构...")
	img_dir = OUTPUT_ROOT / 'images' / 'train'
	label_dir = OUTPUT_ROOT / 'labels' / 'train'
	img_dir.mkdir(parents=True, exist_ok=True)
	label_dir.mkdir(parents=True, exist_ok=True)
	print(f"  创建: {img_dir}")
	print(f"  创建: {label_dir}")
	
	print("\n开始处理图片...")
	
	# 查找所有 jpg 图片
	image_extensions = ('.jpg', '.jpeg', '.JPG', '.JPEG')
	image_files = []
	
	for ext in image_extensions:
		image_files.extend(INPUT_FOLDER.glob(f'*{ext}'))
	
	if not image_files:
		print(f"❌ 在文件夹 {INPUT_FOLDER} 中未找到 jpg 图片")
		return False
	
	print(f"✓ 找到 {len(image_files)} 张图片")
	
	# 复制图片到 img_dir
	copied_count = 0
	for i, img_path in enumerate(image_files):
		try:
			# 生成唯一的文件名
			timestamp = int(time.time() * 1000)
			random_num = random.randint(100, 999)
			new_name = f"{timestamp}_{random_num}.jpg"
			target_path = img_dir / new_name
			
			# 复制图片
			shutil.copy2(img_path, target_path)
			copied_count += 1
			
			# 显示进度
			if i % 50 == 0 or i == len(image_files) - 1:
				print(f"处理进度: {i+1}/{len(image_files)} ({(i+1)/len(image_files)*100:.1f}%) - 当前文件: {new_name}")
				
		except Exception as e:
			print(f"❌ 复制图片失败 {img_path.name}: {e}")
	
	print(f"\n🎉 图片处理完成!")
	print(f"📁 YOLO数据集位置: {OUTPUT_ROOT}")
	print(f"📊 总图片数: {copied_count}")
	print(f"📝 文件命名格式: [时间戳毫秒]_[三位随机数].jpg")
	
	# 显示统计信息
	img_count = len(list((OUTPUT_ROOT / 'images' / 'train').glob("*.jpg")))
	print(f"   train: {img_count} 图片")
	
	# 显示几个文件名示例
	train_imgs = list((OUTPUT_ROOT / 'images' / 'train').glob("*.jpg"))
	if train_imgs:
		print(f"\n📋 文件名示例:")
		for i, img_path in enumerate(train_imgs[:3]):
			print(f"   {img_path.name}")
	
	return True

def run_inference_with_torch(model_path, image_path):
	"""使用 PyTorch 直接运行推理，绕过 ultralytics 的补丁问题"""
	try:
		import torch
		import torch.nn.functional as F
		
		# 加载模型
		checkpoint = torch.load(model_path, map_location='cpu')
		model = checkpoint['model']
		model.eval()
		
		# 检查模型的数据类型
		model_dtype = next(model.parameters()).dtype
		print(f"Model dtype: {model_dtype}")
		
		# 检查模型结构
		print(f"Model type: {type(model)}")
		print(f"Model parameters count: {sum(p.numel() for p in model.parameters())}")
		
		# 读取图片
		img = cv2.imread(str(image_path))
		if img is None:
			print(f"Failed to read image: {image_path}")
			return None, None
		
		# 预处理图片
		img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
		img_resized = cv2.resize(img_rgb, (640, 640))
		img_tensor = torch.from_numpy(img_resized).permute(2, 0, 1).float() / 255.0
		img_tensor = img_tensor.unsqueeze(0)  # 添加 batch 维度
		
		# 确保输入张量的数据类型与模型权重一致
		if model_dtype == torch.float16:
			img_tensor = img_tensor.half()
		elif model_dtype == torch.float32:
			img_tensor = img_tensor.float()
		elif model_dtype == torch.float64:
			img_tensor = img_tensor.double()
		
		print(f"Input tensor dtype: {img_tensor.dtype}, shape: {img_tensor.shape}")
		
		# 推理
		with torch.no_grad():
			try:
				outputs = model(img_tensor)
				print(f"Inference successful! Output type: {type(outputs)}")
				if isinstance(outputs, (list, tuple)):
					print(f"Output length: {len(outputs)}")
					for i, out in enumerate(outputs):
						print(f"  Output {i}: {type(out)}, shape: {out.shape if hasattr(out, 'shape') else 'N/A'}")
				elif hasattr(outputs, 'shape'):
					print(f"Output shape: {outputs.shape}")
				else:
					print(f"Output: {outputs}")
			except Exception as inference_error:
				print(f"Inference error: {inference_error}")
				# 尝试不同的输入格式
				try:
					print("Trying with different input format...")
					# 尝试使用 CPU 上的 float32
					img_tensor_cpu = img_tensor.float().cpu()
					outputs = model(img_tensor_cpu)
					print(f"CPU float32 inference successful!")
				except Exception as cpu_error:
					print(f"CPU inference also failed: {cpu_error}")
					return None, None
			
		# 处理输出（这里需要根据具体的模型输出格式调整）
		# 假设输出是标准的 YOLO 格式
		return outputs, img.shape[:2]  # 返回原始图片尺寸
		
	except Exception as e:
		print(f"Torch inference failed: {e}")
		import traceback
		traceback.print_exc()
		return None, None

def process_images_with_torch(model_path, image_folder, label_output_dir, confidence_threshold, min_area_ratio, max_area_ratio):
	"""使用 PyTorch 处理图片，绕过 ultralytics 问题"""
	image_folder = Path(image_folder)
	label_output_dir = Path(label_output_dir)

	if not image_folder.exists():
		print(f"✗ Image directory does not exist: {image_folder}")
		return

	if not label_output_dir.exists():
		print(f"✗ Label output directory does not exist: {label_output_dir}")
		return

	# Supported image extensions
	image_extensions = ('.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif')
	image_files = [f for f in image_folder.iterdir() if f.suffix.lower() in image_extensions]

	if not image_files:
		print("✗ No supported image files found")
		return

	# 过滤掉无效的图片文件
	valid_image_files = []
	for img_file in image_files:
		if is_valid_image_file(img_file):
			valid_image_files.append(img_file)
		else:
			print(f"⚠️ 跳过无效图片文件: {img_file.name}")

	if not valid_image_files:
		print("✗ No valid image files found after filtering")
		return

	print(f"✓ Starting to process {len(valid_image_files)} valid images with PyTorch...")
	print(f"✓ Detection parameters:")
	print(f"   - Confidence threshold: {confidence_threshold}")
	print(f"   - Minimum area ratio: {min_area_ratio}")
	print(f"   - Maximum area ratio: {max_area_ratio}")
	print(f"   - Supported classes: {list(CLASS_MAPPING.values())}")

	total_detections = 0
	processed_count = 0
	skipped_count = 0

	for i, img_path in enumerate(valid_image_files):
		try:
			# 使用 PyTorch 推理
			outputs, img_dims = run_inference_with_torch(model_path, img_path)
			
			if outputs is None:
				print(f"✗ Inference failed for {img_path}")
				skipped_count += 1
				continue
			
			img_height, img_width = img_dims
			
			# 这里需要根据具体的模型输出格式处理结果
			# 暂时创建一个空的标注文件
			txt_name = img_path.stem + ".txt"
			txt_path = label_output_dir / txt_name
			
			with open(txt_path, 'w') as f:
				pass  # 创建空文件
			
			processed_count += 1
			
			# Show progress
			if i % 50 == 0 or i == len(valid_image_files) - 1:
				print(f"✓ Processed: {i+1}/{len(valid_image_files)} - Current image")

		except Exception as e:
			print(f"✗ Processing failed {img_path}: {e}")
			skipped_count += 1

	print(f"\n✓ PyTorch processing completed!")
	print(f"✓ Statistics:")
	print(f"   - Total images found: {len(image_files)}")
	print(f"   - Valid images: {len(valid_image_files)}")
	print(f"   - Successfully processed: {processed_count} images")
	print(f"   - Skipped: {skipped_count} images")
	print(f"   - Total detections: {total_detections} objects")
	print(f"   - Average per image: {total_detections/max(processed_count, 1):.2f} objects")

def process_images(model, image_folder, label_output_dir, confidence_threshold, min_area_ratio, max_area_ratio):
	"""Batch process images and generate YOLO annotation files"""
	image_folder = Path(image_folder)
	label_output_dir = Path(label_output_dir)

	if not image_folder.exists():
		print(f"✗ Image directory does not exist: {image_folder}")
		return

	if not label_output_dir.exists():
		print(f"✗ Label output directory does not exist: {label_output_dir}")
		return

	# 切换到图片目录，使用相对路径
	original_cwd = Path.cwd()
	os.chdir(image_folder)
	
	try:
		# Supported image extensions
		image_extensions = ('.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif')
		image_files = [f for f in image_folder.iterdir() if f.suffix.lower() in image_extensions]

		if not image_files:
			print("✗ No supported image files found")
			return

		# 过滤掉无效的图片文件
		valid_image_files = []
		for img_file in image_files:
			if is_valid_image_file(img_file):
				valid_image_files.append(img_file)
			else:
				print(f"⚠️ 跳过无效图片文件: {img_file.name}")

		if not valid_image_files:
			print("✗ No valid image files found after filtering")
			return

		print(f"✓ Starting to process {len(valid_image_files)} valid images...")
		print(f"✓ Detection parameters:")
		print(f"   - Confidence threshold: {confidence_threshold}")
		print(f"   - Minimum area ratio: {min_area_ratio}")
		print(f"   - Maximum area ratio: {max_area_ratio}")
		print(f"   - Supported classes: {list(CLASS_MAPPING.values())}")

		total_detections = 0
		processed_count = 0
		skipped_count = 0

		for i, img_path in enumerate(valid_image_files):
			try:
				# 使用相对路径（相对于当前工作目录）
				img_filename = img_path.name
				
				# YOLO inference - 传递文件名（相对路径）
				results = model(img_filename, verbose=False)

				if results[0].boxes is None:
					# No objects detected, create empty annotation file
					txt_name = img_path.stem + ".txt"
					txt_path = label_output_dir / txt_name
					with open(txt_path, 'w') as f:
						pass  # Create empty file
					processed_count += 1
					continue

				# 获取图片尺寸用于坐标验证
				img = cv2.imread(img_filename)
				if img is None:
					print(f"✗ Cannot load image for size check: {img_filename}")
					skipped_count += 1
					continue
				img_height, img_width = img.shape[:2]

				# Prepare YOLO annotation content
				yolo_lines = []
				valid_detections = 0

				boxes = results[0].boxes
				for j in range(len(boxes)):
					cls = int(boxes.cls[j])
					
					# Check if class is in our supported range
					if cls not in CLASS_MAPPING:
						continue

					conf = float(boxes.conf[j])
					if conf < confidence_threshold:
						continue

					x1, y1, x2, y2 = map(float, boxes.xyxy[j].tolist())
					x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)

					# Ensure coordinates are within image bounds
					x1 = max(0, min(x1, img_width))
					y1 = max(0, min(y1, img_height))
					x2 = max(0, min(x2, img_width))
					y2 = max(0, min(y2, img_height))

					# Check if detection box is valid
					if not is_valid_detection(x1, y1, x2, y2, img_width, img_height, min_area_ratio, max_area_ratio):
						continue

					# Convert to YOLO format (normalized coordinates)
					x_center = ((x1 + x2) / 2) / img_width
					y_center = ((y1 + y2) / 2) / img_height
					width = (x2 - x1) / img_width
					height = (y2 - y1) / img_height

					# Get class name and output ID
					class_name = CLASS_MAPPING[cls]
					output_class_id = OUTPUT_CLASS_IDS[class_name]

					yolo_lines.append(f"{output_class_id} {x_center:.6f} {y_center:.6f} {width:.6f} {height:.6f}\n")
					valid_detections += 1

				# Write annotation file
				txt_name = img_path.stem + ".txt"
				txt_path = label_output_dir / txt_name

				with open(txt_path, 'w') as f:
					f.writelines(yolo_lines)

				total_detections += valid_detections
				processed_count += 1

				# Show progress
				if i % 50 == 0 or i == len(valid_image_files) - 1:
					print(f"✓ Processed: {i+1}/{len(valid_image_files)} - Current image detected {valid_detections} objects")

			except Exception as e:
				print(f"✗ Processing failed {img_path}: {e}")
				skipped_count += 1
	finally:
		# 恢复原始工作目录
		os.chdir(original_cwd)

	print(f"\n✓ Auto-labeling completed!")
	print(f"✓ Statistics:")
	print(f"   - Total images found: {len(image_files)}")
	print(f"   - Valid images: {len(valid_image_files)}")
	print(f"   - Successfully processed: {processed_count} images")
	print(f"   - Skipped: {skipped_count} images")
	print(f"   - Total detections: {total_detections} objects")
	print(f"   - Average per image: {total_detections/max(processed_count, 1):.2f} objects")

def create_dataset_yaml(label_output_dir):
	"""Create dataset YAML configuration file"""
	yaml_content = f"""# Pad152 Auto-Generated Dataset Configuration
# Generated automatically by auto_labeling_pad152.py

path: {Path(label_output_dir).parent.parent}  # dataset root dir
train: images/train  # train images (relative to 'path')
val: images/train    # val images (relative to 'path') 
test:  # test images (optional)

# Classes
nc: 2  # number of classes
names: ['durian', 'person']  # class names
"""
	
	yaml_path = Path(label_output_dir).parent.parent / "dataset.yaml"
	with open(yaml_path, 'w', encoding='utf-8') as f:
		f.write(yaml_content)
	
	print(f"✓ Dataset configuration file created: {yaml_path}")

def auto_labeling(output_root, use_torch=False):
	"""执行自动标注流程，基于给定输出根目录"""
	print("\n" + "=" * 60)
	print("开始自动标注流程")
	print("=" * 60)
	image_folder = Path(output_root) / 'images' / 'train'
	label_output_dir = Path(output_root) / 'labels' / 'train'
	print(f"✓ Image directory: {image_folder}")
	print(f"✓ Label output directory: {label_output_dir}")
	print(f"✓ Model path: {MODEL_PATH}")
	print(f"✓ Method: {'PyTorch' if use_torch else 'Ultralytics'}")
	print("=" * 60)
	
	if use_torch:
		# 使用 PyTorch 方法
		print("使用 PyTorch 方法进行推理...")
		process_images_with_torch(str(MODEL_PATH), image_folder, label_output_dir,
								CONFIDENCE_THRESHOLD, MIN_AREA_RATIO, MAX_AREA_RATIO)
	else:
		# 使用原始的 ultralytics 方法
		# Load model
		model = load_model(MODEL_PATH)
		if model is None:
			return False
		
		# Process images and generate annotations
		process_images(model, image_folder, label_output_dir,
					   CONFIDENCE_THRESHOLD, MIN_AREA_RATIO, MAX_AREA_RATIO)
	
	# Create dataset configuration file
	create_dataset_yaml(label_output_dir)
	
	print(f"\n✓ Auto-labeling completed!")
	return True

def main(input_folder=None, output_root=None, output_root3=None):
	"""主函数：执行完整的图片处理到自动标注流水线
	- input_folder: 输入文件夹路径（可选）
	- output_root: 输出根目录（可选）
	- output_root3: 输出根目录3（可选）
	"""
	print("🚀 开始执行完整流水线：图片处理 -> 自动标注")
	print("=" * 80)
	
	# 第一步：图片处理
	print("\n🖼️ 第一步：图片处理")
	print("-" * 40)
	step1_success = process_images_to_yolo_hardcoded(input_folder=input_folder, output_root=output_root)
	
	if not step1_success:
		print("❌ 第一步失败，停止执行")
		return
	
	# 第二步：自动标注
	print("\n🏷️ 第二步：自动标注")
	print("-" * 40)
	chosen_output_root = Path(output_root) if output_root else Path('D:/yolo_dataset')
	step2_success = auto_labeling(chosen_output_root, use_torch=True)  # 默认使用 PyTorch 方法
	
	if not step2_success:
		print("❌ 第二步失败")
		return
	
	# 完成统计
	print("\n" + "=" * 80)
	print("🎉 完整流水线执行成功！")
	print("=" * 80)
	
	# 显示最终统计信息
	OUTPUT_ROOT = Path(chosen_output_root)
	img_count = len(list((OUTPUT_ROOT / 'images' / 'train').glob("*.jpg")))
	label_count = len(list((OUTPUT_ROOT / 'labels' / 'train').glob("*.txt")))

	# 检查 output_root3 文件夹是否存在，如果不存在则创建
	if output_root3:
		output_root3_path = Path(output_root3)
		if not output_root3_path.exists():
			print(f"📁 创建 output_root3 文件夹: {output_root3_path}")
			output_root3_path.mkdir(parents=True, exist_ok=True)
		
		# 创建必要的子目录结构
		(old_img_dir := output_root3_path / 'images' / 'train').mkdir(parents=True, exist_ok=True)
		(old_label_dir := output_root3_path / 'labels' / 'train').mkdir(parents=True, exist_ok=True)
		
		merge_datasets(Path(output_root3), str(OUTPUT_ROOT))
	
	print(f"📊 最终统计:")
	print(f"   - 图片数量: {img_count}")
	print(f"   - 标签文件: {label_count}")
	print(f"   - 数据集位置: {OUTPUT_ROOT}")
	print(f"   - 配置文件: {OUTPUT_ROOT}/dataset.yaml")
	
	print(f"\n✅ 使用说明:")
	print(f"1. YOLO数据集已生成在 {OUTPUT_ROOT}")
	print(f"2. 可在 labels/train/ 目录检查自动生成的标注")
	print(f"3. 使用 labelImg 等工具进行标注校正")
	print(f"4. 使用 dataset.yaml 进行模型训练")

def merge_datasets(old_data_path, new_data_path):
	"""合并新旧数据集"""
	
	# 创建输出目录
	# output_img_dir = Path(output_path) / 'images' / 'train'
	# output_label_dir = Path(output_path) / 'labels' / 'train'
	# output_img_dir.mkdir(parents=True, exist_ok=True)
	# output_label_dir.mkdir(parents=True, exist_ok=True)
	
	# # 复制旧数据
	old_img_dir = Path(old_data_path) / 'images' / 'train'
	old_label_dir = Path(old_data_path) / 'labels' / 'train'
	
	# if old_img_dir.exists():
	#     for img_file in old_img_dir.glob('*.jpg'):
	#         shutil.copy2(img_file, output_img_dir)
	#         label_file = old_label_dir / f"{img_file.stem}.txt"
	#         if label_file.exists():
	#             shutil.copy2(label_file, output_label_dir)
	
	# 复制新数据
	new_img_dir = Path(new_data_path) / 'images' / 'train'
	new_label_dir = Path(new_data_path) / 'labels' / 'train'
	
	if new_img_dir.exists():
		for img_file in new_img_dir.glob('*.jpg'):
			shutil.copy2(img_file, old_img_dir)
			label_file = new_label_dir / f"{img_file.stem}.txt"
			if label_file.exists():
				shutil.copy2(label_file, old_label_dir)
	
	print(f"数据集合并完成，共 {len(list(old_img_dir.glob('*.jpg')))} 张图片")

if __name__ == "__main__":
	main()