import argparse
import torch
import numpy as np
import os
import time
import random
from tqdm import tqdm
from torch import nn, optim
from accelerate import Accelerator, DeepSpeedPlugin
from accelerate import DistributedDataParallelKwargs

from models import ChannelLLM
from data_provider.data_factory import data_provider
from utils.tools import EarlyStopping, adjust_learning_rate, compute_metrics, visualize_weights, del_files

# 设置环境变量
os.environ['CURL_CA_BUNDLE'] = ''
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:64"

# 创建命令行参数解析器
parser = argparse.ArgumentParser(description='信道特征到探头权重预测LLM')

# 设置随机种子
fix_seed = 2023
random.seed(fix_seed)
torch.manual_seed(fix_seed)
np.random.seed(fix_seed)

# 获取项目根目录
project_root = os.path.dirname(os.path.abspath(__file__))

# 基本配置
parser.add_argument('--task_name', type=str, default='probe_weight_prediction', help='任务名称')
parser.add_argument('--is_training', type=int, default=1, help='是否训练模型')
parser.add_argument('--model_id', type=str, default='ChannelLLM', help='模型ID')
parser.add_argument('--model_comment', type=str, default='base', help='保存结果时的前缀')
parser.add_argument('--model', type=str, default='ChannelLLM', help='模型名称')
parser.add_argument('--seed', type=int, default=2023, help='随机种子')

# 数据加载参数
parser.add_argument('--data', type=str, default='CHANNEL', help='数据集类型')
parser.add_argument('--root_path', type=str, default=os.path.join(project_root, 'dataset/channel_data/'), help='数据文件根目录')
parser.add_argument('--data_path', type=str, default='channel_data.npz', help='数据文件名')
parser.add_argument('--features', type=str, default='M', help='特征类型，M表示多变量')
parser.add_argument('--loader', type=str, default='channel', help='数据加载器类型')
parser.add_argument('--checkpoints', type=str, default=os.path.join(project_root, 'checkpoints/'), help='模型检查点目录')

# 序列长度参数
parser.add_argument('--seq_len', type=int, default=20, help='输入序列长度')
parser.add_argument('--label_len', type=int, default=10, help='标签长度')
parser.add_argument('--pred_len', type=int, default=10, help='预测序列长度')

# 模型定义参数
parser.add_argument('--n_clusters', type=int, default=25, help='每个时间点的簇数量')
parser.add_argument('--n_probes', type=int, default=32, help='探头数量')
parser.add_argument('--d_model', type=int, default=128, help='模型隐藏层维度')
parser.add_argument('--n_heads', type=int, default=8, help='注意力头数量')
parser.add_argument('--d_ff', type=int, default=256, help='前馈网络维度')
parser.add_argument('--dropout', type=float, default=0.1, help='Dropout率')
parser.add_argument('--llm_layers', type=int, default=6, help='LLM层数')

# 优化参数
parser.add_argument('--num_workers', type=int, default=10, help='数据加载器工作进程数')
parser.add_argument('--itr', type=int, default=1, help='实验次数')
parser.add_argument('--train_epochs', type=int, default=20, help='训练轮数')
parser.add_argument('--batch_size', type=int, default=16, help='训练批次大小')
parser.add_argument('--eval_batch_size', type=int, default=8, help='评估批次大小')
parser.add_argument('--patience', type=int, default=5, help='早停耐心值')
parser.add_argument('--learning_rate', type=float, default=0.001, help='学习率')
parser.add_argument('--des', type=str, default='exp', help='实验描述')
parser.add_argument('--loss', type=str, default='kl', help='损失函数类型，可选[mse, kl]')
parser.add_argument('--use_amp', action='store_true', help='是否使用混合精度训练', default=False)
parser.add_argument('--print_every', type=int, default=50, help='每隔多少个批次打印一次信息')
parser.add_argument('--optim', type=str, default='adam', help='优化器类型')

args = parser.parse_args()

# 设置分布式训练参数
ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=os.path.join(project_root, 'ds_config_zero2.json'))
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs], deepspeed_plugin=deepspeed_plugin)

# 主函数
def main():
    # 实验设置
    setting = '{}_{}_{}_sl{}_ll{}_pl{}_dm{}_nh{}_bs{}_{}_{}'.format(
        args.task_name,
        args.model_id,
        args.data,
        args.seq_len,
        args.label_len,
        args.pred_len,
        args.d_model,
        args.n_heads,
        args.batch_size,
        args.des,
        args.model_comment
    )
    
    # 加载数据
    print("Loading data...")
    train_data, train_loader = data_provider(args, 'train')
    vali_data, vali_loader = data_provider(args, 'val')
    test_data, test_loader = data_provider(args, 'test')
    train_steps = len(train_loader)
    
    # 创建模型
    print(f"Creating {args.model} model...")
    model = ChannelLLM(args).float()
    
    # 检查点保存路径
    path = os.path.join(args.checkpoints, setting)
    if not os.path.exists(path) and accelerator.is_local_main_process:
        os.makedirs(path)
    
    # 计算模型参数数量
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    if accelerator.is_local_main_process:
        print('模型:', args.model)
        print('总参数数量:', total_params)
        print('可训练参数数量:', trainable_params)
        print('可训练参数比例:', trainable_params / total_params)
    
    # 准备训练
    trained_parameters = [p for p in model.parameters() if p.requires_grad]
    
    # 优化器选择
    if args.optim.lower() == 'adam':
        model_optim = optim.Adam(trained_parameters, lr=args.learning_rate)
    elif args.optim.lower() == 'adamw':
        model_optim = optim.AdamW(trained_parameters, lr=args.learning_rate, weight_decay=1e-4)
    else:
        model_optim = optim.SGD(trained_parameters, lr=args.learning_rate, momentum=0.9)
    
    # 损失函数选择
    if args.loss.lower() == 'mse':
        criterion = nn.MSELoss()
    elif args.loss.lower() == 'kl':
        criterion = nn.KLDivLoss(reduction='batchmean')
    else:
        criterion = nn.MSELoss()  # 默认使用MSE损失
    
    # 额外评估指标
    mae_metric = nn.L1Loss()
    
    # 早停机制
    early_stopping = EarlyStopping(accelerator=accelerator, patience=args.patience, verbose=True)
    
    # 准备加速器
    train_loader, vali_loader, test_loader, model, model_optim = accelerator.prepare(
        train_loader, vali_loader, test_loader, model, model_optim
    )
    
    # 设置混合精度训练
    if args.use_amp:
        scaler = torch.cuda.amp.GradScaler()
    
    # 训练过程
    if args.is_training:
        print("Start training...")
        for epoch in range(args.train_epochs):
            iter_count = 0
            train_loss = []
            
            # 训练模式
            model.train()
            epoch_time = time.time()
            
            for i, (batch_x_dict, batch_y_dict, batch_x_mark, batch_y_mark) in tqdm(enumerate(train_loader)):
                iter_count += 1
                model_optim.zero_grad()
                
                # 获取数据
                if isinstance(batch_x_dict, dict):
                    batch_x = batch_x_dict['data'].float()  # [B, T, n_clusters*5]
                else:
                    batch_x = batch_x_dict.float()
                
                batch_y = batch_y_dict['data'].float()  # [B, T, n_probes]
                
                # 前向传播
                if args.use_amp:
                    with torch.cuda.amp.autocast():
                        outputs = model(batch_x, None, batch_x_mark, None)
                        
                        # 使用全序列进行计算
                        pred = outputs  # 使用全部输出序列
                        true = batch_y  # 使用全部标签序列
                        
                        # 计算损失
                        if args.loss.lower() == 'kl':
                            # KL散度需要log概率
                            loss = criterion(torch.log(pred + 1e-10), true)
                        else:
                            loss = criterion(pred, true)
                        
                    train_loss.append(loss.item())
                    
                    # 反向传播
                    accelerator.backward(scaler.scale(loss))
                    scaler.step(model_optim)
                    scaler.update()
                else:
                    outputs = model(batch_x, None, batch_x_mark, None)
                    
                    # 使用全序列进行计算
                    pred = outputs  # 使用全部输出序列
                    true = batch_y  # 使用全部标签序列
                    
                    # 计算损失
                    if args.loss.lower() == 'kl':
                        # KL散度需要log概率
                        loss = criterion(torch.log(pred + 1e-10), true)
                    else:
                        loss = criterion(pred, true)
                    
                    train_loss.append(loss.item())
                    
                    # 反向传播
                    accelerator.backward(loss)
                    model_optim.step()
                
                # 打印训练信息
                if (i + 1) % args.print_every == 0:
                    accelerator.print(f"\titers: {i + 1}/{train_steps}, epoch: {epoch + 1} | loss: {loss.item():.7f}")
                    speed = (time.time() - epoch_time) / iter_count
                    left_time = speed * ((args.train_epochs - epoch) * train_steps - i)
                    accelerator.print(f'\tspeed: {speed:.4f}s/iter; left time: {left_time:.4f}s')
            
            # 打印每个epoch的信息
            accelerator.print(f"Epoch: {epoch + 1} cost time: {time.time() - epoch_time:.4f}s")
            train_loss = np.average(train_loss)
            
            # 验证
            vali_loss, vali_metrics = validate(model, vali_loader, criterion)
            
            # 打印验证结果
            accelerator.print(f"Epoch: {epoch + 1}, Steps: {train_steps} | Train Loss: {train_loss:.7f} "
                  f"Vali Loss: {vali_loss:.7f} Vali MSE: {vali_metrics['mse']:.7f} "
                  f"Vali MAE: {vali_metrics['mae']:.7f} Vali KL: {vali_metrics['kl_div']:.7f}")
            
            # 早停检查
            early_stopping(vali_loss, model, path)
            if early_stopping.early_stop:
                accelerator.print("Early stopping")
                break
    
    # 加载最佳模型
    best_model_path = os.path.join(path, 'checkpoint')
    accelerator.print('Loading best model...')
    accelerator.wait_for_everyone()
    unwrapped_model = accelerator.unwrap_model(model)
    unwrapped_model.load_state_dict(torch.load(best_model_path, map_location=lambda storage, loc: storage))
    
    # 测试
    accelerator.print("Testing...")
    model.eval()
    preds = []
    trues = []
    
    # 结果保存目录
    folder_path = os.path.join(project_root, 'results/', setting + '/')
    if accelerator.is_local_main_process:
        del_files(folder_path)  # 删除之前的结果
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)
    
    with torch.no_grad():
        for i, (batch_x_dict, batch_y_dict, batch_x_mark, batch_y_mark) in enumerate(test_loader):
            # 获取数据
            if isinstance(batch_x_dict, dict):
                batch_x = batch_x_dict['data'].float()  # [B, T, n_clusters*5]
            else:
                batch_x = batch_x_dict.float()
            
            batch_y = batch_y_dict['data'].float()  # [B, T, n_probes]
            
            # 前向传播
            outputs = model(batch_x, None, batch_x_mark, None)
            
            # 使用全序列进行计算
            pred = outputs  # 使用全部输出序列
            true = batch_y  # 使用全部标签序列
            
            # 转换为numpy数组
            pred = pred.detach().cpu().numpy()
            true = true.detach().cpu().numpy()
            
            preds.append(pred)
            trues.append(true)
    
    # 合并批次结果
    preds = np.concatenate(preds, axis=0)
    trues = np.concatenate(trues, axis=0)
    
    accelerator.print('测试完成，计算指标...')
    
    # 计算指标
    test_metrics = compute_metrics(preds, trues)
    accelerator.print(f"测试结果: MSE: {test_metrics['mse']:.7f}, MAE: {test_metrics['mae']:.7f}, "
          f"相对误差: {test_metrics['rel_err']:.7f}, KL散度: {test_metrics['kl_div']:.7f}")
    
    # 保存结果
    if accelerator.is_local_main_process:
        np.save(folder_path + 'metrics.npy', np.array([test_metrics['mse'], test_metrics['mae'], 
                                                     test_metrics['rel_err'], test_metrics['kl_div']]))
        np.save(folder_path + 'pred.npy', preds)
        np.save(folder_path + 'true.npy', trues)
        
        # 可视化权重
        accelerator.print("生成可视化图像...")
        sample_idx = np.random.randint(0, preds.shape[0])  # 随机选择一个样本
        visualize_weights(
            preds[sample_idx], 
            trues[sample_idx], 
            n_probes=args.n_probes,
            save_path=folder_path + 'weights_visualization.png'
        )
    
    accelerator.print('任务完成')


def validate(model, vali_loader, criterion):
    """
    模型验证函数
    
    Args:
        model: 模型
        vali_loader: 验证数据加载器
        criterion: 损失函数
        
    Returns:
        tuple: (vali_loss, metrics) 验证损失和其他指标
    """
    model.eval()
    total_loss = []
    preds = []
    trues = []
    
    with torch.no_grad():
        for i, (batch_x_dict, batch_y_dict, batch_x_mark, batch_y_mark) in enumerate(vali_loader):
            # 获取数据
            if isinstance(batch_x_dict, dict):
                batch_x = batch_x_dict['data'].float()  # [B, T, n_clusters*5]
            else:
                batch_x = batch_x_dict.float()
            
            batch_y = batch_y_dict['data'].float()  # [B, T, n_probes]
            
            # 前向传播
            outputs = model(batch_x, None, batch_x_mark, None)
            
            # 使用全序列进行计算
            pred = outputs  # 使用全部输出序列
            true = batch_y  # 使用全部标签序列
            
            # 计算损失
            if args.loss.lower() == 'kl':
                # KL散度需要log概率
                loss = criterion(torch.log(pred + 1e-10), true)
            else:
                loss = criterion(pred, true)
            
            total_loss.append(loss.item())
            
            # 转换为numpy数组，计算其他指标
            pred_np = pred.detach().cpu().numpy()
            true_np = true.detach().cpu().numpy()
            
            preds.append(pred_np)
            trues.append(true_np)
    
    # 合并所有批次的预测结果
    preds = np.concatenate(preds, axis=0)
    trues = np.concatenate(trues, axis=0)
    
    # 计算评估指标
    metrics = compute_metrics(preds, trues)
    
    return np.average(total_loss), metrics


if __name__ == "__main__":
    main() 