#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import sys
import subprocess
import argparse
import time
import torch
import numpy as np
import random
from tqdm import tqdm
import matplotlib.pyplot as plt
from accelerate import Accelerator, DeepSpeedPlugin
from accelerate import DistributedDataParallelKwargs

def main():
    # 解析命令行参数
    parser = argparse.ArgumentParser(description='空间相关性数据处理和模型训练集成脚本')
    parser.add_argument('--skip_processing', action='store_true', help='跳过数据处理步骤')
    parser.add_argument('--skip_training', action='store_true', help='跳过模型训练步骤')
    parser.add_argument('--no_deepspeed', action='store_true', default=True, help='不使用DeepSpeed进行训练，使用accelerate（默认）')
    parser.add_argument('--use_deepspeed', action='store_true', help='使用DeepSpeed进行训练，而不是accelerate')
    parser.add_argument('--llm_model', type=str, default='Qwen3-8B', 
                        choices=['Qwen3-4B', 'Qwen3-8B', 'Qwen-1.7B'], 
                        help='选择使用的LLM模型，可选Qwen3-4B、Qwen3-8B或Qwen-1.7B')
    parser.add_argument('--batch_size', type=int, default=8, help='训练批次大小')
    parser.add_argument('--eval_batch_size', type=int, default=16, help='评估批次大小')
    parser.add_argument('--learning_rate', type=float, default=0.0001, help='学习率')
    parser.add_argument('--train_epochs', type=int, default=100, help='训练轮数')
    parser.add_argument('--patience', type=int, default=10, help='早停耐心值')
    parser.add_argument('--d_model', type=int, default=256, help='模型隐藏层维度')
    parser.add_argument('--n_heads', type=int, default=8, help='注意力头数量')
    parser.add_argument('--d_ff', type=int, default=512, help='前馈网络维度')
    parser.add_argument('--n_probes', type=int, default=32, help='探头数量')
    parser.add_argument('--llm_layers', type=int, default=6, help='LLM层数')
    parser.add_argument('--num_workers', type=int, default=4, help='数据加载器工作进程数')
    parser.add_argument('--use_transformer', action='store_true', default=True, help='是否使用Transformer替代TwoStageAttention')
    parser.add_argument('--n_transformer_layers', type=int, default=4, help='Transformer编码器层数')
    
    # 添加性能和内存优化选项
    parser.add_argument('--force_zero3', action='store_true', help='强制使用DeepSpeed ZeRO-3优化')
    parser.add_argument('--gradient_checkpointing', action='store_true', help='启用梯度检查点以节省内存')
    parser.add_argument('--use_bf16', action='store_true', default=True, help='使用BFloat16精度训练')
    parser.add_argument('--offload_optimizer', action='store_true', help='将优化器状态卸载到CPU')
    parser.add_argument('--offload_param', action='store_true', help='将参数卸载到CPU')
    parser.add_argument('--reduce_micro_bs', action='store_true', help='减小微批次大小以节省内存')
    
    args = parser.parse_args()
    
    # 为Qwen3-8B模型自动设置内存优化选项
    if args.llm_model == 'Qwen3-8B':
        if not args.use_deepspeed and not args.force_zero3:
            print("自动为Qwen3-8B启用ZeRO-3优化")
            args.force_zero3 = True
        
        if not args.gradient_checkpointing:
            print("自动为Qwen3-8B启用梯度检查点")
            args.gradient_checkpointing = True
            
        if not args.offload_optimizer:
            print("自动为Qwen3-8B启用优化器状态卸载")
            args.offload_optimizer = True
            
        if args.batch_size > 2:
            print(f"警告: Qwen3-8B模型批次大小过大({args.batch_size})，自动减小到2")
            args.batch_size = 2
        
        if args.eval_batch_size > 4:
            print(f"警告: Qwen3-8B模型评估批次大小过大({args.eval_batch_size})，自动减小到4")
            args.eval_batch_size = 4
    
    # 设置随机种子
    fix_seed = 2023
    random.seed(fix_seed)
    torch.manual_seed(fix_seed)
    np.random.seed(fix_seed)
    
    # 设置环境变量
    os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
    os.environ['TOKENIZERS_PARALLELISM'] = 'false'
    os.environ['CURL_CA_BUNDLE'] = ''
    os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:64"

    # 获取项目根目录
    root_dir = os.path.dirname(os.path.abspath(__file__))
    print(f"项目根目录: {root_dir}")

    # 设置输出目录
    output_dir = os.path.join(root_dir, "dataset/spatial_corr")
    os.makedirs(output_dir, exist_ok=True)
    print(f"输出目录: {output_dir}")

    # 步骤1: 处理空间相关性数据
    if not args.skip_processing:
        process_spatial_corr_data(root_dir, output_dir)
    else:
        print("跳过数据处理步骤")

    # 步骤2: 训练模型
    if not args.skip_training:
        if args.use_deepspeed and not args.no_deepspeed:
            print("使用DeepSpeed进行训练")
            train_with_deepspeed(root_dir, output_dir, args)
        else:
            print("使用Accelerate进行训练")
            train_without_deepspeed(root_dir, output_dir, args)
    else:
        print("跳过模型训练步骤")


def process_spatial_corr_data(root_dir, output_dir):
    """处理空间相关性数据，将MAT文件转换为NPZ格式"""
    print("\n步骤1: 处理空间相关性数据...")
    
    # 导入process_spatial_corr_data.py中的函数
    scripts_dir = os.path.join(root_dir, "scripts")
    sys.path.append(scripts_dir)
    
    try:
        # 使用绝对导入
        from process_spatial_corr_data import process_spatial_correlation_data
        print("成功导入process_spatial_correlation_data函数")
    except ImportError as e:
        print(f"导入错误: {e}")
        print("尝试替代导入方式...")
        # 尝试导入脚本作为模块
        import importlib.util
        spec = importlib.util.spec_from_file_location(
            "process_spatial_corr_data", 
            os.path.join(scripts_dir, "process_spatial_corr_data.py")
        )
        if spec is None:
            raise ImportError(f"无法找到脚本文件: {os.path.join(scripts_dir, 'process_spatial_corr_data.py')}")
            
        module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(module)
        process_spatial_correlation_data = module.process_spatial_correlation_data
        print("使用替代方式成功导入函数")
    
    # 处理数据
    input_dir = os.path.join(root_dir, "dy")
    window_size = 20
    overlap = 10
    
    try:
        output_path = process_spatial_correlation_data(
            input_dir=input_dir,
            output_dir=output_dir,
            window_size=window_size,
            overlap=overlap
        )
        
        # 检查数据文件是否生成成功
        npz_file = os.path.join(output_dir, "spatial_correlation_data.npz")
        if not os.path.exists(npz_file):
            raise FileNotFoundError(f"数据文件未生成: {npz_file}")
            
        print(f"数据处理完成，NPZ文件保存在: {npz_file}")
        
    except Exception as e:
        print(f"数据处理失败: {e}")
        sys.exit(1)


def train_with_deepspeed(root_dir, output_dir, args):
    """使用DeepSpeed训练模型"""
    print(f"\n步骤2: 使用DeepSpeed训练模型 (使用 {args.llm_model})...")
    
    # 检查是否已安装DeepSpeed
    try:
        import deepspeed
        print("已安装DeepSpeed，版本:", deepspeed.__version__)
    except ImportError:
        print("警告: 未安装DeepSpeed，尝试使用普通方式训练")
        train_without_deepspeed(root_dir, output_dir, args)
        return
    
    # 设置DeepSpeed配置文件路径
    if args.force_zero3:
        ds_config_path = os.path.join(root_dir, "ds_config_spatial_corr.json")
        if not os.path.exists(ds_config_path):
            print(f"找不到ZeRO-3配置文件，将创建临时配置文件")
            # 创建临时配置文件
            import json
            temp_config = {
                "bf16": {"enabled": args.use_bf16, "auto_cast": True},
                "zero_optimization": {
                    "stage": 3,
                    "offload_optimizer": {"device": "cpu", "pin_memory": True} if args.offload_optimizer else None,
                    "offload_param": {"device": "cpu", "pin_memory": True} if args.offload_param else None,
                    "overlap_comm": True,
                    "contiguous_gradients": True,
                    "sub_group_size": 1e9,
                    "stage3_gather_16bit_weights_on_model_save": True
                },
                "gradient_accumulation_steps": "auto",
                "train_batch_size": "auto",
                "train_micro_batch_size_per_gpu": 1 if args.reduce_micro_bs else "auto",
                "optimizer": {"type": "AdamW", "params": {"lr": "auto", "weight_decay": 1e-4}}
            }
            
            # 清除None值
            if not args.offload_optimizer:
                temp_config["zero_optimization"].pop("offload_optimizer", None)
            if not args.offload_param:
                temp_config["zero_optimization"].pop("offload_param", None)
                
            temp_config_path = os.path.join(root_dir, "temp_ds_config.json")
            with open(temp_config_path, 'w') as f:
                json.dump(temp_config, f, indent=2)
            ds_config_path = temp_config_path
            print(f"已创建临时DeepSpeed配置: {ds_config_path}")
        else:
            print(f"使用专用ZeRO-3 DeepSpeed配置: {ds_config_path}")
    else:
        ds_config_path = os.path.join(root_dir, "ds_config_zero2.json")
        print(f"使用ZeRO-2配置: {ds_config_path}")
    
    # 设置训练参数
    cmd_args = [
        # 使用deepspeed命令启动
        "deepspeed",
        "--num_nodes=1",
        "--num_gpus=4",
        "--master_addr=localhost",
        "--master_port=29500",
        
        # DeepSpeed配置
        f"--deepspeed={ds_config_path}",
        
        # 训练脚本
        os.path.join(root_dir, "run_spatial_corr.py"),
        
        # 模型和任务参数
        "--task_name", "spatial_corr_prediction",
        "--data", "SPATIAL_CORR",
        "--root_path", output_dir,
        "--data_path", "spatial_correlation_data.npz",
        "--checkpoints", os.path.join(root_dir, "checkpoints"),
        
        # 序列长度参数
        "--seq_len", "20",
        "--label_len", "10",
        "--pred_len", "10",
        
        # 模型结构参数
        "--d_model", str(args.d_model),
        "--n_heads", str(args.n_heads),
        "--d_ff", str(args.d_ff),
        "--n_probes", str(args.n_probes),
        "--llm_model", args.llm_model,  # 使用传入的LLM模型参数
        "--llm_layers", str(args.llm_layers),
        "--use_transformer",
        "--n_transformer_layers", str(args.n_transformer_layers),
        
        # 训练参数
        "--batch_size", str(args.batch_size),
        "--eval_batch_size", str(args.eval_batch_size),
        "--learning_rate", str(args.learning_rate),
        "--train_epochs", str(args.train_epochs),
        "--patience", str(args.patience),
        "--loss", "kl",
        "--model_comment", f"spatial_corr_only_{args.llm_model.lower().replace('-', '_')}",  # 在注释中包含模型名称
        "--num_workers", str(args.num_workers)
    ]
    
    # 添加梯度检查点参数
    if args.gradient_checkpointing:
        cmd_args.append("--gradient_checkpointing")
    
    # 如果启用bf16，添加相应参数
    if args.use_bf16:
        cmd_args.append("--use_amp")
    
    # 将所有参数转为字符串
    cmd_args = [str(arg) for arg in cmd_args]
    
    print("开始训练，使用以下命令:")
    print(" ".join(cmd_args))
    
    try:
        # 创建子进程执行训练
        process = subprocess.Popen(
            cmd_args,
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT,
            universal_newlines=True,
            bufsize=1
        )
        
        # 实时打印输出
        for line in process.stdout:
            print(line.strip())
        
        # 等待进程结束
        process.wait()
        
        if process.returncode == 0:
            print("训练完成!")
        else:
            print(f"训练失败，返回码: {process.returncode}")
            
    except Exception as e:
        print(f"执行训练时出错: {e}")
        sys.exit(1)


def train_without_deepspeed(root_dir, output_dir, args):
    """使用Accelerate库进行训练，提供更清晰的输出"""
    print(f"\n步骤2: 使用Accelerate训练模型 (使用 {args.llm_model})...")
    
    # 导入训练模块
    sys.path.append(root_dir)
    
    # 打印分隔线，使输出更清晰
    print("\n" + "="*80)
    print(f"【启动Accelerate训练】模型: {args.llm_model}, 批次大小: {args.batch_size}")
    print("="*80 + "\n")
    
    # 显示GPU设备信息
    if torch.cuda.is_available():
        print(f"CUDA可用，设备数量: {torch.cuda.device_count()}")
        for i in range(torch.cuda.device_count()):
            print(f"GPU {i}: {torch.cuda.get_device_name(i)}")
            print(f"  总内存: {torch.cuda.get_device_properties(i).total_memory / 1024**3:.2f} GB")
            print(f"  当前分配: {torch.cuda.memory_allocated(i) / 1024**3:.2f} GB")
            print(f"  当前缓存: {torch.cuda.memory_reserved(i) / 1024**3:.2f} GB")
    
    # 设置命令行参数
    sys_argv_backup = sys.argv.copy()
    sys.argv = [
        "run_spatial_corr.py",
        "--task_name", "spatial_corr_prediction",
        "--data", "SPATIAL_CORR",
        "--root_path", output_dir,
        "--data_path", "spatial_correlation_data.npz",
        "--checkpoints", os.path.join(root_dir, "checkpoints"),
        "--seq_len", "20",
        "--label_len", "10",
        "--pred_len", "10",
        "--d_model", str(args.d_model),
        "--n_heads", str(args.n_heads),
        "--d_ff", str(args.d_ff),
        "--n_probes", str(args.n_probes),
        "--llm_model", args.llm_model,  # 使用传入的LLM模型参数
        "--llm_layers", str(args.llm_layers),
        "--batch_size", str(args.batch_size),
        "--eval_batch_size", str(args.eval_batch_size),
        "--learning_rate", str(args.learning_rate),
        "--train_epochs", str(args.train_epochs),
        "--patience", str(args.patience),
        "--loss", "kl",
        "--model_comment", f"spatial_corr_only_{args.llm_model.lower().replace('-', '_')}",  # 在注释中包含模型名称
        "--num_workers", str(args.num_workers),
        "--use_transformer",
        "--n_transformer_layers", str(args.n_transformer_layers),
        "--print_every", "10",  # 更频繁地打印训练进度
    ]
    
    # 添加梯度检查点参数
    if args.gradient_checkpointing:
        sys.argv.append("--gradient_checkpointing")
    
    # 如果启用bf16，添加相应参数
    if args.use_bf16:
        sys.argv.append("--use_amp")
    
    print("训练参数:")
    print(" ".join(sys.argv))
    print("\n开始训练...\n")
    
    try:
        # 直接导入并执行训练脚本的main函数
        from run_spatial_corr import main as train_main
        train_main()
        print("\n" + "="*80)
        print("训练完成!")
        print("="*80 + "\n")
    except Exception as e:
        print("\n" + "="*80)
        print(f"训练过程中出错: {e}")
        import traceback
        traceback.print_exc()
        print("="*80 + "\n")
    finally:
        # 恢复sys.argv
        sys.argv = sys_argv_backup


if __name__ == "__main__":
    main() 