from components.c_log import LOG, delete_logs_if_debug
from ultralytics import YOLO
import cv2
import os
from tqdm import tqdm
import shutil
from sklearn.model_selection import train_test_split
from swanlab import init as swanlab_init, log as swanlab_log
from components.c_config import app_config

TAG = "train"
SWANLAB_PROJECT = app_config.train.swanble_project_name
CLASS_NAMES = ['person', 'down','10+','dog']         # 模型保存路径
DATASET_YAML = "datasets/datasets.yaml"
PRE_TRAINED_MODEL_NAME = app_config.train.pre_trained_model_name
TRAINED_MODEL_NAME = f"{app_config.train.trained_model_name}-{app_config.train.model_version}"

def run():
    # 检验CUDA是否可用
    import torch, torchvision
    if not torch.cuda.is_available():
        raise RuntimeError("CUDA不可用，请检查GPU驱动和PyTorch安装")

    # 检查数据集配置文件是否存在
    if not os.path.exists(DATASET_YAML):
        raise FileNotFoundError(f"数据集配置文件 {DATASET_YAML} 不存在")

    # 在现有验证步骤后添加图像文件验证
    def validate_images():
        corrupt_files = []
        image_dirs = [
            os.path.abspath("datasets/images/train"),
            os.path.abspath("datasets/images/val")
        ]
        
        # 创建损坏文件存放目录
        corrupted_dir = os.path.abspath("datasets/corrupted_images")
        os.makedirs(corrupted_dir, exist_ok=True)

        for img_dir in image_dirs:
            for root, _, files in os.walk(img_dir):
                for file in tqdm(files, desc=f"验证 {os.path.basename(img_dir)} 图像"):
                    if file.lower().endswith(('.jpg', '.jpeg', '.png')):
                        img_path = os.path.join(root, file)
                        try:
                            img = cv2.imread(img_path)
                            if img is None:
                                raise ValueError("无法读取图像文件")
                                
                            # 验证JPEG的EXIF信息
                            if img_path.lower().endswith(('.jpg', '.jpeg')):
                                with open(img_path, 'rb') as f:
                                    f.seek(-2, 2)
                                    if f.read() != b'\xff\xd9':
                                        raise ValueError("不完整的JPEG文件")
                                        
                        except Exception as e:
                            dest_path = os.path.join(corrupted_dir, file)
                            shutil.move(img_path, dest_path)
                            corrupt_files.append(f"{img_path} -> {dest_path}: {str(e)}")

        if corrupt_files:
            print(f"\n[WARN] 发现 {len(corrupt_files)} 个损坏图像文件（已移动到 corrupted_images 目录）")
            with open("corrupted_images.log", "w") as f:
                f.write("\n".join(corrupt_files))
            print("详细日志已保存到 corrupted_images.log")

    validate_images()  # 新增图像验证

    # SwanLab初始化
    swanlab_init(
        experiment_name = f"{SWANLAB_PROJECT}",
        project=SWANLAB_PROJECT,
        config={
            "model": PRE_TRAINED_MODEL_NAME,
            "classes": CLASS_NAMES,
            "epochs": 10,
            "batch_size": 8,
            "device": "0"
        }
    )

    # 模型训练
    model = YOLO(f"model/{PRE_TRAINED_MODEL_NAME}.pt").to('cuda:0')  # 添加GPU设备指定
    def log_validation_metrics(validator):
        swanlab_log({
            "metrics/mAP50": validator.metrics.box.map50,
            "metrics/mAP50-95": validator.metrics.box.map
        })
    model.add_callback("on_val_end", log_validation_metrics)
    try:
        results = model.train(
            data=DATASET_YAML,
            epochs=10,
            imgsz=320,
            batch=16,
            device=0,
            workers=8,
            project="train-logs",
            name=TRAINED_MODEL_NAME,
            save=True,
            exist_ok=True,
            amp=True,
            cache="disk",
            optimizer="AdamW",
            lr0=0.001,
            seed=42
        )
        model.save(f"model/{TRAINED_MODEL_NAME}.pt")
    except Exception as e:
        print(f"训练过程中出现错误: {e}")
        # 添加更详细的错误处理
        if "not enough values to unpack" in str(e):
            print("错误原因：数据集格式不正确，请检查 train.txt 和 labels 文件")
            print("建议：按照 https://docs.ultralytics.com/datasets 重新整理数据集")


    
