"""
优化的训练脚本 - 集成所有性能优化
提高训练速度的完整解决方案
"""

import matplotlib
import matplotlib.pyplot as plt
import sys
import os
from pathlib import Path
from multiprocessing import freeze_support
import cv2
import numpy as np
import torch
import time
import psutil

# 添加项目路径
sys.path.append(r'D:\CodeRode\project\EmbedTrack-master')

# 导入优化配置
from optimization_config import get_optimized_training_config, get_preset_config
from embedtrack.train.run_training_pipeline import (
    DataConfig,
    ModelConfig,
    TrainConfig,
    run_pipeline,
)
from embedtrack.datasets.prepare_data import prepare_ctc_data

# 设置matplotlib后端
matplotlib.use("Agg")

def setup_optimized_environment():
    """设置优化的训练环境"""
    print("=" * 80)
    print("设置优化的训练环境...")
    
    # 导入优化配置
    opt_config = get_optimized_training_config()
    
    # 设置环境变量
    os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
    os.environ['OMP_NUM_THREADS'] = str(opt_config['num_workers'])
    
    # CUDA优化设置
    if torch.cuda.is_available():
        torch.backends.cudnn.benchmark = True
        torch.backends.cudnn.deterministic = False
        torch.backends.cudnn.enabled = True
        torch.cuda.empty_cache()
        
        # 启用混合精度
        torch.backends.cuda.matmul.allow_tf32 = True
        torch.backends.cudnn.allow_tf32 = True
        
        print(f"CUDA可用，GPU数量: {torch.cuda.device_count()}")
        for i in range(torch.cuda.device_count()):
            props = torch.cuda.get_device_properties(i)
            print(f"  GPU {i}: {props.name} ({props.total_memory / 1024**3:.1f}GB)")
    else:
        print("CUDA不可用，将使用CPU训练")
    
    print("环境设置完成")
    print("=" * 80)

def get_all_sequences(train_dir):
    """自动获取所有序列名"""
    train_dir = Path(train_dir)
    if not train_dir.exists():
        print(f"警告：{train_dir} 不存在，已自动创建。")
        train_dir.mkdir(parents=True, exist_ok=True)
        return []
    
    seqs = [d.name for d in train_dir.iterdir() if d.is_dir() and not d.name.endswith("_GT")]
    seqs.sort()
    return seqs

def create_optimized_train_config(data_set, opt_config):
    """创建优化的训练配置"""
    # 基础配置
    MODEL_SAVE_DIR = os.path.join(MODEL_PATH, data_set, MODEL_NAME)
    
    # 根据数据集调整批次大小
    if data_set != "Fluo-C2DL-MSC":
        CROP_SIZE = 256
        TRAIN_BATCH_SIZE = opt_config['batch_size']
        VAL_BATCH_SIZE = opt_config['batch_size']
        DISPLAY_IT = 1000
    else:
        CROP_SIZE = 512
        TRAIN_BATCH_SIZE = max(1, opt_config['batch_size'] // 2)
        VAL_BATCH_SIZE = max(1, opt_config['batch_size'] // 2)
        DISPLAY_IT = 200
    
    # 虚拟批次大小优化
    VIRTUAL_TRAIN_BATCH_MULTIPLIER = opt_config['virtual_batch_multiplier']
    VIRTUAL_VAL_BATCH_MULTIPLIER = opt_config['virtual_batch_multiplier']
    
    # 数据大小优化
    TRAIN_SIZE = 3000 if opt_config.get('use_small_dataset', False) else None
    VAL_SIZE = 2600 if opt_config.get('use_small_dataset', False) else None
    
    return TrainConfig(
        MODEL_SAVE_DIR,
        crop_size=CROP_SIZE,
        center="medoid",
        resume_training=False,
        train_size=TRAIN_SIZE,
        train_batch_size=TRAIN_BATCH_SIZE,
        virtual_train_batch_multiplier=VIRTUAL_TRAIN_BATCH_MULTIPLIER,
        val_size=VAL_SIZE,
        val_batch_size=VAL_BATCH_SIZE,
        virtual_val_batch_multiplier=VIRTUAL_VAL_BATCH_MULTIPLIER,
        n_epochs=N_EPOCHS,
        display=False,  # 关闭显示以提高速度
        display_it=DISPLAY_IT,
        learning_rate=opt_config['learning_rate'],
    )

def monitor_system_resources():
    """监控系统资源使用情况"""
    cpu_percent = psutil.cpu_percent(interval=1)
    memory = psutil.virtual_memory()
    
    print(f"系统资源监控:")
    print(f"  CPU使用率: {cpu_percent:.1f}%")
    print(f"  内存使用: {memory.percent:.1f}% ({memory.used / 1024**3:.1f}GB / {memory.total / 1024**3:.1f}GB)")
    
    if torch.cuda.is_available():
        for i in range(torch.cuda.device_count()):
            allocated = torch.cuda.memory_allocated(i) / 1024**3
            cached = torch.cuda.memory_reserved(i) / 1024**3
            total = torch.cuda.get_device_properties(i).total_memory / 1024**3
            print(f"  GPU {i} 内存: {allocated:.2f}GB / {cached:.2f}GB / {total:.2f}GB")

# 全局配置
FILE_PATH = Path(__file__)
PROJECT_PATH = str(FILE_PATH.parent)
RAW_DATA_PATH = os.path.join(PROJECT_PATH, "ctc_raw_data/train")
DATA_PATH_DEST = os.path.join(PROJECT_PATH, "data")
MODEL_PATH = os.path.join(PROJECT_PATH, "models")

USE_SILVER_TRUTH = True
TRAIN_VAL_SPLIT = 0.2
N_EPOCHS = 12
MODEL_NAME = "optimized_adam_norm_onecycle_" + str(N_EPOCHS)

# 数据集配置
DATA_SETS = [
    "hela",
    "pc3", 
    "raw",
]

def main():
    """主训练函数"""
    freeze_support()
    
    # 设置优化环境
    setup_optimized_environment()
    
    # 获取优化配置
    opt_config = get_preset_config('fast_training')  # 使用快速训练预设
    
    print(f"使用优化配置: {opt_config}")
    
    total_start_time = time.time()
    
    for data_set in DATA_SETS:
        print(f"\n{'='*60}")
        print(f"开始训练数据集: {data_set}")
        print(f"{'='*60}")
        
        dataset_start_time = time.time()
        
        # 检查并准备数据
        train_dir = os.path.join(DATA_PATH_DEST, data_set, "train")
        TRAIN_VAL_SEQUENCES = get_all_sequences(train_dir)
        
        if not TRAIN_VAL_SEQUENCES:
            print(f"未检测到{data_set}的序列，自动从原始数据准备...")
            raw_data_path = os.path.join(RAW_DATA_PATH, data_set)
            prepare_ctc_data(
                source_path=raw_data_path,
                result_path=DATA_PATH_DEST,
                keep_st=USE_SILVER_TRUTH,
                val_split=TRAIN_VAL_SPLIT,
                sub_dir_names=None
            )
            TRAIN_VAL_SEQUENCES = get_all_sequences(train_dir)
            
            if not TRAIN_VAL_SEQUENCES:
                print(f"自动准备后仍未检测到{data_set}的序列，跳过。")
                continue
        
        print(f"{data_set} 序列: {TRAIN_VAL_SEQUENCES}")
        
        # 数据配置
        use_silver_truth = False if data_set == "Fluo-N2DH-SIM+" else USE_SILVER_TRUTH
        data_config = DataConfig(
            RAW_DATA_PATH,
            data_set,
            DATA_PATH_DEST,
            use_silver_truth=use_silver_truth,
            train_val_sequences=TRAIN_VAL_SEQUENCES,
            train_val_split=TRAIN_VAL_SPLIT,
        )
        
        # 创建优化的训练配置
        train_config = create_optimized_train_config(data_set, opt_config)
        
        # 模型配置
        INPUT_CHANNELS = 1
        N_SEG_CLASSES = [4, 1]
        N_TRACK_CLASSES = 2
        model_config = ModelConfig(INPUT_CHANNELS, N_SEG_CLASSES, N_TRACK_CLASSES)
        
        # 监控系统资源
        monitor_system_resources()
        
        # 运行训练管道
        try:
            run_pipeline(data_config, train_config, model_config)
            dataset_time = time.time() - dataset_start_time
            print(f"{data_set} 训练完成，耗时: {dataset_time:.2f}秒")
        except Exception as e:
            print(f"{data_set} 训练失败: {str(e)}")
            continue
        
        # 清理GPU内存
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        
        plt.close("all")
    
    total_time = time.time() - total_start_time
    print(f"\n{'='*80}")
    print(f"所有数据集训练完成！")
    print(f"总耗时: {total_time:.2f}秒 ({total_time/60:.2f}分钟)")
    print(f"{'='*80}")

if __name__ == '__main__':
    main() 