import argparse
import torch
import numpy as np
import os
import time
import random
from tqdm import tqdm
from torch import nn, optim
from accelerate import Accelerator, DeepSpeedPlugin
from accelerate import DistributedDataParallelKwargs
import matplotlib.pyplot as plt
import pandas as pd
from models import ChannelLLM
from data_provider.data_factory import data_provider
from utils.tools import EarlyStopping, adjust_learning_rate, compute_metrics, visualize_weights, del_files, load_content, visualize_training_metrics
from torch.utils.data import DataLoader

# 设置环境变量
os.environ['CURL_CA_BUNDLE'] = ''
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:64"

# 创建命令行参数解析器
parser = argparse.ArgumentParser(description='信道特征到探头权重预测LLM')

# 设置随机种子
fix_seed = 2023
random.seed(fix_seed)
torch.manual_seed(fix_seed)
np.random.seed(fix_seed)

# 获取项目根目录
project_root = os.path.dirname(os.path.abspath(__file__))

# 基本配置
parser.add_argument('--task_name', type=str, default='probe_weight_prediction', help='任务名称')
parser.add_argument('--is_training', type=int, default=1, help='是否训练模型')
parser.add_argument('--model_id', type=str, default='ChannelLLM', help='模型ID')
parser.add_argument('--model_comment', type=str, default='base', help='保存结果时的前缀')
parser.add_argument('--model', type=str, default='ChannelLLM', help='模型名称')
parser.add_argument('--seed', type=int, default=2023, help='随机种子')

# 数据加载参数
parser.add_argument('--data', type=str, default='CHANNEL', help='数据集类型')
parser.add_argument('--root_path', type=str, default=os.path.join(project_root, 'dataset/channel_data/'), help='数据文件根目录')
parser.add_argument('--data_path', type=str, default='channel_data.npz', help='数据文件名')
parser.add_argument('--features', type=str, default='M', help='特征类型，M表示多变量')
parser.add_argument('--loader', type=str, default='channel', help='数据加载器类型')
parser.add_argument('--checkpoints', type=str, default=os.path.join(project_root, 'checkpoints/'), help='模型检查点目录')

# 序列长度参数
parser.add_argument('--seq_len', type=int, default=20, help='输入序列长度')
parser.add_argument('--label_len', type=int, default=10, help='标签长度')
parser.add_argument('--pred_len', type=int, default=10, help='预测序列长度')

# 模型定义参数
parser.add_argument('--n_clusters', type=int, default=25, help='每个时间点的簇数量')
parser.add_argument('--n_probes', type=int, default=32, help='探头数量')
parser.add_argument('--d_model', type=int, default=128, help='模型隐藏层维度')
parser.add_argument('--n_heads', type=int, default=8, help='注意力头数量')
parser.add_argument('--d_ff', type=int, default=256, help='前馈网络维度')
parser.add_argument('--dropout', type=float, default=0.1, help='Dropout率')
parser.add_argument('--llm_layers', type=int, default=6, help='LLM层数')
parser.add_argument('--llm_model', type=str, default='Qwen3-8B', help='使用的LLM模型类型，可选[GPT2-large, GPT2-small, GPT2-medium, Qwen3-8B]')
parser.add_argument('--islora', action='store_true', help='是否使用LoRA进行训练', default=True)
parser.add_argument('--lora_r', type=int, default=16, help='LoRA秩参数')
parser.add_argument('--lora_alpha', type=int, default=32, help='LoRA alpha参数')
parser.add_argument('--lora_dropout', type=float, default=0.05, help='LoRA dropout率')
parser.add_argument('--use_patch_embedding', action='store_true', help='是否使用PatchEmbedding', default=False)
parser.add_argument('--use_two_stage_attention', action='store_true', help='是否使用TwoStageAttention', default=False)
parser.add_argument('--use_transformer', action='store_true', help='是否使用Transformer替代TwoStageAttention', default=True)
parser.add_argument('--n_transformer_layers', type=int, default=2, help='Transformer编码器层数')
parser.add_argument('--use_reprogramming', action='store_true', help='是否使用Reprogramming与tokenizer做注意力，否则直接线性映射', default=False)
parser.add_argument('--use_attention_probe_selection', action='store_true', help='是否使用基于注意力的探头选择', default=False)
parser.add_argument('--patch_len', type=int, default=2, help='Patch长度')
parser.add_argument('--stride', type=int, default=1, help='Patch步长')
parser.add_argument('--prompt_domain', action='store_true', help='是否使用自定义任务描述', default=True)
parser.add_argument('--content', type=str, default='', help='自定义任务描述内容')

# 优化参数
parser.add_argument('--num_workers', type=int, default=10, help='数据加载器工作进程数')
parser.add_argument('--itr', type=int, default=1, help='实验次数')
parser.add_argument('--train_epochs', type=int, default=100, help='训练轮数')
parser.add_argument('--batch_size', type=int, default=4, help='训练批次大小')
parser.add_argument('--eval_batch_size', type=int, default=4, help='评估批次大小')
parser.add_argument('--patience', type=int, default=5, help='早停耐心值')
parser.add_argument('--learning_rate', type=float, default=0.0005, help='学习率')
parser.add_argument('--des', type=str, default='exp', help='实验描述')
parser.add_argument('--loss', type=str, default='kl', help='损失函数类型，可选[mse, kl]')
parser.add_argument('--use_amp', action='store_true', help='是否使用混合精度训练', default=False)
parser.add_argument('--print_every', type=int, default=50, help='每隔多少个批次打印一次信息')
parser.add_argument('--optim', type=str, default='adamw', help='优化器类型')
parser.add_argument('--lradj', type=str, default='COS', help='学习率调度器类型')
parser.add_argument('--pct_start', type=float, default=0.1, help='OneCycleLR的pct_start参数')
parser.add_argument('--load_model', action='store_true', help='是否加载预训练模型', default=False)
parser.add_argument('--load_spatial_corr', action='store_true', help='是否加载空间相关性数据', default=True)

args = parser.parse_args()

# 设置分布式训练参数
ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=os.path.join(project_root, 'ds_config_zero2.json'))
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs], deepspeed_plugin=deepspeed_plugin)

# 主函数
def main():
    # 实验设置
    setting = '{}_{}_{}_sl{}_ll{}_pl{}_dm{}_nh{}_bs{}_{}_{}'.format(
        args.task_name,
        args.model_id,
        args.data,
        args.seq_len,
        args.label_len,
        args.pred_len,
        args.d_model,
        args.n_heads,
        args.batch_size,
        args.des,
        args.model_comment
    )
    
    # 检查点保存路径
    if args.islora:
        path = os.path.join(args.checkpoints, 'lora_' + setting)
    else:
        path = os.path.join(args.checkpoints, setting)
    
    # 如果使用PatchEmbedding，添加到路径
    if args.use_patch_embedding:
        path = path + f'_patch{args.patch_len}_stride{args.stride}'
    
    # 加载提示内容
    args.content = load_content(args)
    
    if not os.path.exists(path) and accelerator.is_local_main_process:
        os.makedirs(path)
    
    # 加载数据
    print("Loading data...")
    
    # 修改：一次性加载所有数据，所有数据现在共享同一个底层CSV文件
    # 由于ChannelDataset已经添加了缓存机制，文件只会读取一次
    train_data, train_loader = data_provider(args, 'train')
    print(f"训练集加载完成，样本数量: {len(train_data)}")
    
    # 加载测试集 - 将使用缓存的CSV数据，不会重复读取文件
    test_data, test_loader = data_provider(args, 'test')
    print(f"测试集加载完成，样本数量: {len(test_data)}")
    
    # 验证集使用所有数据 - 将使用缓存的CSV数据，不会重复读取文件
    args_all = args
    args_all.batch_size = args.eval_batch_size
    vali_data, vali_loader = data_provider(args_all, 'all')
    print(f"验证集加载完成，样本数量: {len(vali_data)}")
    
    print(f"数据集加载完毕 - 训练集: {len(train_data)}样本, 验证集: {len(vali_data)}样本, 测试集: {len(test_data)}样本")
    
    train_steps = len(train_loader)
    
    # 创建模型
    print(f"Creating {args.model} model...")
    model = ChannelLLM(args).float()
    
    # 计算模型参数数量
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    if accelerator.is_local_main_process:
        print('模型:', args.model)
        print('总参数数量:', total_params)
        print('可训练参数数量:', trainable_params)
        print('可训练参数比例:', trainable_params / total_params)
    
    # 确保所有模型参数都是连续的，解决"Tensors must be contiguous"错误
    for name, param in model.named_parameters():
        if not param.is_contiguous():
            print(f"修复非连续参数: {name}")
            param.data = param.data.contiguous()
    
    # 准备训练
    trained_parameters = [p for p in model.parameters() if p.requires_grad]
    
    # 优化器选择
    if args.optim.lower() == 'adam':
        model_optim = optim.Adam(trained_parameters, lr=args.learning_rate)
    elif args.optim.lower() == 'adamw':
        model_optim = optim.AdamW(trained_parameters, lr=args.learning_rate, weight_decay=1e-4)
    else:
        model_optim = optim.SGD(trained_parameters, lr=args.learning_rate, momentum=0.9)
    
    # 学习率调度器
    if hasattr(args, 'lradj') and args.lradj == 'COS':
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(model_optim, T_max=20, eta_min=1e-8)
    else:
        scheduler = torch.optim.lr_scheduler.OneCycleLR(
            optimizer=model_optim,
            max_lr=args.learning_rate,
            steps_per_epoch=len(train_loader),
            epochs=args.train_epochs,
            pct_start=args.pct_start,  # 使用命令行参数
        )
    
    # 损失函数选择
    if args.loss.lower() == 'mse':
        criterion = nn.MSELoss()
    elif args.loss.lower() == 'kl':
        criterion = nn.KLDivLoss(reduction='batchmean')
    else:
        criterion = nn.MSELoss()  # 默认使用MSE损失
    
    # 额外评估指标
    mae_metric = nn.L1Loss()
    
    # 初始化早停和学习率调整策略
    if args.patience == 0:
        args.patience = 7
    
    # 初始化早停机制(使用train_mse作为监控指标，需设置monitor_mode='min')
    early_stopping = EarlyStopping(accelerator=accelerator, patience=args.patience, verbose=True, monitor_mode='min')
    
    # 准备加速器
    # 确保DataLoader中的数据也是连续的
    print("确保数据加载器和优化器为连续状态...")
    
    # 添加检查点，确保准备传递给accelerator的参数都是有效的
    for loader in [train_loader, vali_loader, test_loader]:
        if loader is None:
            print("警告: 数据加载器为None")
    
    if model_optim is None:
        print("警告: 优化器为None")
    
    if scheduler is None:
        print("警告: 调度器为None")
            
    train_loader, vali_loader, test_loader, model, model_optim, scheduler = accelerator.prepare(
        train_loader, vali_loader, test_loader, model, model_optim, scheduler
    )
    
    # 如果指定了加载模型，则加载预训练模型
    if hasattr(args, 'load_model') and args.load_model:
        accelerator.print('正在加载预训练模型...')
        best_model_path = os.path.join(path, 'checkpoint')
        if os.path.exists(best_model_path):
            accelerator.wait_for_everyone()
            unwrapped_model = accelerator.unwrap_model(model)
            torch.cuda.synchronize()
            torch.cuda.empty_cache()
            unwrapped_model.load_state_dict(torch.load(best_model_path, map_location=lambda storage, loc: storage))
            accelerator.print('预训练模型加载成功')
        else:
            accelerator.print(f'预训练模型路径不存在: {best_model_path}')
    
    # 设置混合精度训练
    if args.use_amp and not accelerator.mixed_precision == 'bf16':
        scaler = torch.cuda.amp.GradScaler()
    
    # 训练过程
    if args.is_training:
        print("Start training...")
        train_losses = []
        vali_losses = []
        train_rmse_metrics = []  # 记录训练集RMSE指标
        vali_rmse_metrics = []  # 记录验证集RMSE指标
        best_train_rmse = float('inf')  # 初始化最佳训练RMSE为无穷大
        
        for epoch in range(args.train_epochs):
            iter_count = 0
            train_loss = []
            train_spatial_rmse_losses = []  # 收集每个批次的空间相关性RMSE损失
            
            # 训练模式
            model.train()
            epoch_time = time.time()
            
            for i, (batch_x_dict, batch_x_mark) in tqdm(enumerate(train_loader)):
                iter_count += 1
                model_optim.zero_grad()
                
                # 获取数据
                if isinstance(batch_x_dict, dict):
                    batch_x = batch_x_dict['data'].float()  # [B, T, n_clusters*5]
                else:
                    batch_x = batch_x_dict.float()
                
                # 前向传播
                if args.use_amp:
                    with torch.cuda.amp.autocast():
                        outputs = model(batch_x_dict if isinstance(batch_x_dict, dict) else batch_x, 
                                       None, batch_x_mark, None)
                        
                        # 监控GPU内存使用情况
                        if torch.cuda.is_available() and i % args.print_every == 0:
                            gpu_mem_alloc = torch.cuda.memory_allocated() / 1024**2
                            gpu_mem_reserved = torch.cuda.memory_reserved() / 1024**2
                            accelerator.print(f"\tGPU内存: 已分配 {gpu_mem_alloc:.2f}MB, 已缓存 {gpu_mem_reserved:.2f}MB")
                        
                        # 自监督损失计算
                        if isinstance(outputs, dict):
                            spatial_rmse_loss = outputs.get('spatial_rmse_loss', None)  # 获取空间相关性损失
                            
                            # 只使用空间相关性损失
                            if spatial_rmse_loss is not None:
                                loss = spatial_rmse_loss
                                train_spatial_rmse_losses.append(spatial_rmse_loss.item())  # 记录RMSE损失
                            else:
                                # 如果没有空间相关性损失，使用占位损失
                                loss = torch.tensor(0.0, device=batch_x.device)
                        else:
                            # 使用占位损失
                            loss = torch.tensor(0.0, device=batch_x.device)
                        
                    train_loss.append(loss.item())
                    
                    # 反向传播
                    accelerator.backward(scaler.scale(loss))
                    scaler.step(model_optim)
                    scaler.update()
                    scheduler.step()  # 更新学习率
                else:
                    outputs = model(batch_x_dict if isinstance(batch_x_dict, dict) else batch_x, 
                                   None, batch_x_mark, None)
                    
                    # 监控GPU内存使用情况
                    if torch.cuda.is_available() and i % args.print_every == 0:
                        gpu_mem_alloc = torch.cuda.memory_allocated() / 1024**2
                        gpu_mem_reserved = torch.cuda.memory_reserved() / 1024**2
                        accelerator.print(f"\tGPU内存: 已分配 {gpu_mem_alloc:.2f}MB, 已缓存 {gpu_mem_reserved:.2f}MB")
                        # 若有多个GPU，显示每个GPU的内存使用情况
                        if torch.cuda.device_count() > 1:
                            for gpu_id in range(torch.cuda.device_count()):
                                gpu_mem_alloc_per_device = torch.cuda.memory_allocated(gpu_id) / 1024**2
                                gpu_mem_reserved_per_device = torch.cuda.memory_reserved(gpu_id) / 1024**2
                                accelerator.print(f"\t\tGPU {gpu_id}: 已分配 {gpu_mem_alloc_per_device:.2f}MB, 已缓存 {gpu_mem_reserved_per_device:.2f}MB")
                    
                    # 自监督损失计算
                    if isinstance(outputs, dict):
                        spatial_rmse_loss = outputs.get('spatial_rmse_loss')  # 获取空间相关性损失
                        
                        # 只使用空间相关性损失
                        if spatial_rmse_loss is not None:
                            loss = spatial_rmse_loss
                            train_spatial_rmse_losses.append(spatial_rmse_loss.item())  # 记录RMSE损失
                        else:
                            # 如果没有空间相关性损失，使用占位损失
                            loss = torch.tensor(0.0, device=batch_x.device)
                    else:
                        # 使用占位损失
                        loss = torch.tensor(0.0, device=batch_x.device)
                    
                    train_loss.append(loss.item())
                    
                    # 反向传播
                    accelerator.backward(loss)
                    model_optim.step()
                    scheduler.step()  # 更新学习率
                
                # 打印训练信息
                if (i + 1) % args.print_every == 0:
                    accelerator.print(f"\titers: {i + 1}/{train_steps}, epoch: {epoch + 1} | loss: {loss.item():.7f}")
                    speed = (time.time() - epoch_time) / iter_count
                    left_time = speed * ((args.train_epochs - epoch) * train_steps - i)
                    accelerator.print(f'\tspeed: {speed:.4f}s/iter; left time: {left_time:.4f}s')
            
            # 计算并打印每个epoch的平均RMSE
            train_rmse = np.mean(train_spatial_rmse_losses) if train_spatial_rmse_losses else 0.0
            
            # 打印每个epoch的信息
            accelerator.print(f"Epoch: {epoch + 1} cost time: {time.time() - epoch_time:.4f}s")
            train_loss = np.average(train_loss)
            
            # 每10个epoch验证一次
            if (epoch + 1) % 10 == 0:
                # 验证
                vali_loss, vali_metrics = validate(model, vali_loader, criterion)
                
                # 打印验证结果
                accelerator.print(f"Epoch: {epoch + 1}, Steps: {train_steps} | Train Loss: {train_loss:.7f} "
                      f"Train RMSE: {train_rmse:.7f} | Vali Loss: {vali_loss:.7f} Vali RMSE: {vali_metrics['spatial_rmse']:.7f}")
                
                # 保存当前最佳模型 - 只在验证时判断
                if train_rmse < best_train_rmse:
                    best_train_rmse = train_rmse
                    # 保存当前模型
                    if accelerator.is_local_main_process:
                        if not os.path.exists(path):
                            os.makedirs(path)
                    accelerator.wait_for_everyone()
                    unwrapped_model = accelerator.unwrap_model(model)
                    accelerator.save(unwrapped_model.state_dict(), path + '/' + 'checkpoint')
                    accelerator.print(f"保存新的最佳模型，训练RMSE: {train_rmse:.6f}")
                
                vali_losses.append(vali_loss)
                vali_rmse_metrics.append(vali_metrics['spatial_rmse'])  # 记录验证RMSE指标
            else:
                # 非验证epoch，只打印训练信息
                accelerator.print(f"Epoch: {epoch + 1}, Steps: {train_steps} | Train Loss: {train_loss:.7f} "
                      f"Train RMSE: {train_rmse:.7f}")
                
                # 非验证epoch，使用None填充验证指标
                vali_losses.append(None)
                vali_rmse_metrics.append(None)
            
            train_losses.append(train_loss)
            train_rmse_metrics.append(train_rmse)  # 记录训练RMSE指标
        
        # 绘制损失变化曲线
        if accelerator.is_local_main_process and len(train_losses) > 0:
            # 使用专门的可视化函数生成训练指标曲线
            curve_path = os.path.join(path, 'training_curves.png')
            
            # 绘制图表，包含训练集和验证集的RMSE
            plt.figure(figsize=(15, 10))
            
            # 绘制损失曲线
            plt.subplot(2, 1, 1)
            epochs = list(range(1, len(train_losses) + 1))
            plt.plot(epochs, train_losses, 'b-', marker='o', label='Train Loss')
            # 只绘制非None的验证损失点
            vali_epochs = [i+1 for i, v in enumerate(vali_losses) if v is not None]
            vali_losses_plot = [v for v in vali_losses if v is not None]
            plt.plot(vali_epochs, vali_losses_plot, 'r-', marker='s', label='Validation Loss')
            plt.title('Loss over Epochs')
            plt.xlabel('Epoch')
            plt.ylabel('Loss')
            plt.legend()
            plt.grid(True)
            
            # 绘制RMSE指标曲线，同时显示训练集和验证集
            plt.subplot(2, 1, 2)
            plt.plot(epochs, train_rmse_metrics, 'g-', marker='o', label='Train RMSE')
            # 只绘制非None的验证RMSE点
            vali_rmse_plot = [v for v in vali_rmse_metrics if v is not None]
            plt.plot(vali_epochs, vali_rmse_plot, 'm-', marker='s', label='Validation RMSE')
            plt.title('RMSE Metrics over Epochs')
            plt.xlabel('Epoch')
            plt.ylabel('RMSE')
            plt.legend()
            plt.grid(True)
            
            plt.tight_layout()
            plt.savefig(curve_path)
            plt.close()
            
            accelerator.print(f"训练曲线已保存到: {curve_path}")
            
            # 将指标数据保存为numpy数组，方便后续分析
            np.save(os.path.join(path, 'train_losses.npy'), np.array(train_losses))
            np.save(os.path.join(path, 'vali_losses.npy'), np.array([v if v is not None else np.nan for v in vali_losses]))
            np.save(os.path.join(path, 'train_rmse_metrics.npy'), np.array(train_rmse_metrics))
            np.save(os.path.join(path, 'vali_rmse_metrics.npy'), np.array([v if v is not None else np.nan for v in vali_rmse_metrics]))
    
    # 加载最佳模型
    best_model_path = os.path.join(path, 'checkpoint')
    accelerator.print('Loading best model...')
    accelerator.wait_for_everyone()
    unwrapped_model = accelerator.unwrap_model(model)
    unwrapped_model.load_state_dict(torch.load(best_model_path, map_location=lambda storage, loc: storage))
    
    # 测试
    accelerator.print("Testing...")
    model.eval()
    preds = []
    inputs = []
    spatial_rmse_losses = []  # 添加列表收集spatial_rmse_loss
    
    # 结果保存目录
    folder_path = os.path.join(project_root, 'results/', setting + '/')
    if args.use_patch_embedding:
        folder_path = folder_path[:-1] + f'_patch{args.patch_len}_stride{args.stride}/'
    
    if accelerator.is_local_main_process:
        del_files(folder_path)  # 删除之前的结果
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)
    
    with torch.no_grad():
        for i, batch_data in enumerate(test_loader):
            # 处理批次数据，接受tuple或list
            if isinstance(batch_data, (tuple, list)) and len(batch_data) == 2:
                batch_x_dict, batch_x_mark = batch_data
            else:
                print(f"警告: 测试批次 {i} 格式不正确，跳过")
                continue
            
            # 获取数据
            if isinstance(batch_x_dict, dict):
                batch_x = batch_x_dict['data'].float()  # [B, T, n_clusters*5]
            else:
                batch_x = batch_x_dict.float()
            
            # 前向传播
            outputs = model(batch_x_dict if isinstance(batch_x_dict, dict) else batch_x, 
                           None, batch_x_mark, None)
            
            # 使用全序列
            if isinstance(outputs, dict):
                pred = outputs.get('probe_weights', None)  # 获取探头权重
                channel_reconstructed = outputs.get('channel_reconstructed', None)  # 获取重构信道
                
                # 提取其他有用的输出
                spatial_rmse_loss = outputs.get('spatial_rmse_loss', None)
                if spatial_rmse_loss is not None:
                    spatial_rmse_losses.append(spatial_rmse_loss.item())
            else:
                pred = outputs  # 使用全部输出序列
                channel_reconstructed = None
                spatial_rmse_loss = None
            
            # 转换为numpy数组
            if pred is not None:
                # 先将BFloat16类型转换为float32，再转换为NumPy数组
                pred = pred.detach().cpu().to(torch.float32).numpy()
                input_data = batch_x.detach().cpu().to(torch.float32).numpy()
                
                preds.append(pred)
                inputs.append(input_data)
            
            # 如果有重构信道数据，也保存下来
            if channel_reconstructed is not None:
                reconstructed = channel_reconstructed.detach().cpu().to(torch.float32).numpy()
                
                # 如果是字典并且包含真实空间相关性数据
                if isinstance(batch_x_dict, dict) and 'spatial_corr' in batch_x_dict:
                    true_spatial = batch_x_dict['spatial_corr'].detach().cpu().to(torch.float32).numpy()
                else:
                    true_spatial = None
    
    # 合并批次结果
    preds = np.concatenate(preds, axis=0)
    inputs = np.concatenate(inputs, axis=0)
    
    accelerator.print('测试完成，计算指标...')
    
    # 输出形状信息，帮助理解数据
    accelerator.print(f"预测值形状: {preds.shape}, 输入值形状: {inputs.shape}")
    
    # 计算指标（使用模型已经计算好的空间相关性RMSE）
    if spatial_rmse_losses:
        spatial_rmse = np.mean(spatial_rmse_losses)
        accelerator.print(f"测试结果: 空间相关性RMSE: {spatial_rmse:.7f}")
        
        # 保存结果
        if accelerator.is_local_main_process:
            np.save(folder_path + 'metrics.npy', np.array([spatial_rmse]))
    else:
        accelerator.print("警告: 未收集到空间相关性RMSE损失")
    
    # 保存预测值和输入值
    if accelerator.is_local_main_process:
        np.save(folder_path + 'pred.npy', preds)
        np.save(folder_path + 'input.npy', inputs)
        
        # 可视化探头权重分布
        accelerator.print("生成探头权重可视化图像...")
        sample_idx = np.random.randint(0, preds.shape[0])  # 随机选择一个样本
        visualize_weights(
            preds[sample_idx], 
            inputs[sample_idx], 
            n_probes=args.n_probes,
            save_path=folder_path + 'weights_visualization.png'
        )
        
        # 加载训练过程中保存的损失数据并生成完整的训练报告
        accelerator.print("生成训练过程RMSE变化曲线...")
        try:
            train_losses = np.load(os.path.join(path, 'train_losses.npy'))
            vali_losses = np.load(os.path.join(path, 'vali_losses.npy'))
            train_rmse_metrics = np.load(os.path.join(path, 'train_rmse_metrics.npy'))
            vali_rmse_metrics = np.load(os.path.join(path, 'vali_rmse_metrics.npy'))
            
            # 生成综合报告
            plt.figure(figsize=(15, 10))
            
            # 绘制损失曲线
            plt.subplot(2, 1, 1)
            epochs = list(range(1, len(train_losses) + 1))
            plt.plot(epochs, train_losses, 'b-', marker='o', label='Train Loss')
            plt.plot(epochs, vali_losses, 'r-', marker='s', label='Validation Loss')
            plt.title('Loss over Epochs')
            plt.xlabel('Epoch')
            plt.ylabel('Loss')
            plt.legend()
            plt.grid(True)
            
            # 绘制RMSE指标曲线，同时显示训练集和验证集
            plt.subplot(2, 1, 2)
            plt.plot(epochs, train_rmse_metrics, 'g-', marker='o', label='Train RMSE')
            plt.plot(epochs, vali_rmse_metrics, 'm-', marker='s', label='Validation RMSE')
            plt.title('RMSE Metrics over Epochs')
            plt.xlabel('Epoch')
            plt.ylabel('RMSE')
            plt.legend()
            plt.grid(True)
            
            plt.tight_layout()
            plt.savefig(folder_path + 'training_metrics_report.png')
            plt.close()
            
            accelerator.print(f"训练指标报告已保存至: {folder_path + 'training_metrics_report.png'}")
        except Exception as e:
            accelerator.print(f"加载训练损失数据失败，无法生成完整报告: {e}")
    
    accelerator.print('任务完成')


def validate(model, vali_loader, criterion):
    """
    模型验证函数
    
    Args:
        model: 模型
        vali_loader: 验证数据加载器
        criterion: 损失函数
        
    Returns:
        tuple: (vali_loss, metrics) 验证损失和其他指标
    """
    model.eval()
    total_loss = []
    spatial_rmse_losses = []
    
    # 收集每个时间点的MSE
    all_time_mse = []
    all_time_ids = []
    
    # 添加调试信息
    print("开始验证...")
    
    # 检查验证集大小
    vali_size = len(vali_loader.dataset) if hasattr(vali_loader, 'dataset') else 0
    print(f"验证集大小: {vali_size}")
    
    # 检查验证集批次大小
    batch_size = vali_loader.batch_size if hasattr(vali_loader, 'batch_size') else 'unknown'
    print(f"验证集批次大小: {batch_size}")
    
    # 检查验证集批次数量
    num_batches = len(vali_loader) if hasattr(vali_loader, '__len__') else 0
    print(f"验证集批次数量: {num_batches}")
    
    # 如果验证集为空，直接报错
    if num_batches == 0:
        raise ValueError("验证集为空，无法继续训练！需要确保验证集样本数量大于0且批次大小合适。")
    
    try:
        # 获取第一个批次，检查类型
        batch = next(iter(vali_loader))
        print(f"验证批次类型: {type(batch)}")
        print(f"是否为元组: {isinstance(batch, tuple)}")
        print(f"是否为列表: {isinstance(batch, list)}")
        is_sequence = isinstance(batch, (tuple, list))
        if is_sequence:
            print(f"验证批次长度: {len(batch)}")
            for i, item in enumerate(batch):
                print(f"  元素[{i}] 类型: {type(item)}")
                if hasattr(item, 'shape'):
                    print(f"  元素[{i}] 形状: {item.shape}")
    except StopIteration:
        raise ValueError("验证集迭代器为空，无法获取批次数据！")
    except Exception as e:
        raise RuntimeError(f"获取验证批次失败: {e}")
    
    with torch.no_grad():
        for i, batch_data in enumerate(vali_loader):
            # 处理批次数据，接受tuple或list
            if isinstance(batch_data, (tuple, list)) and len(batch_data) == 2:
                batch_x_dict, batch_x_mark = batch_data
            else:
                raise ValueError(f"批次 {i} 格式不正确，期望为(x, mark)格式的元组或列表")
            
            # 获取数据
            if isinstance(batch_x_dict, dict):
                batch_x = batch_x_dict['data'].float()  # [B, T, n_clusters*5]
                
                # 获取时间点ID
                time_ids = batch_x_dict.get('time_ids', None)
                if time_ids is not None:
                    time_ids = time_ids.cpu().numpy()
                
                # 获取空间相关性数据
                spatial_corr_real = batch_x_dict.get('spatial_corr_real', None)
                spatial_corr_imag = batch_x_dict.get('spatial_corr_imag', None)
                
                if spatial_corr_real is not None and spatial_corr_imag is not None:
                    true_spatial_corr = torch.cat([spatial_corr_real, spatial_corr_imag], dim=-1)
                else:
                    true_spatial_corr = None
            else:
                batch_x = batch_x_dict.float()
                time_ids = None
                true_spatial_corr = None
            
            # 前向传播
            outputs = model(batch_x_dict if isinstance(batch_x_dict, dict) else batch_x, 
                        None, batch_x_mark, None)
            
            # 计算损失
            if isinstance(outputs, dict):
                spatial_rmse_loss = outputs.get('spatial_rmse_loss', None)  # 获取空间相关性损失
                channel_reconstructed = outputs.get('channel_reconstructed', None)  # 获取重构信道
                
                # 只使用空间相关性损失
                if spatial_rmse_loss is not None:
                    loss = spatial_rmse_loss
                    spatial_rmse_losses.append(spatial_rmse_loss.item())
                else:
                    # 如果没有空间相关性损失，使用占位损失
                    loss = torch.tensor(0.0, device=batch_x.device)
                
                # 收集每个时间点的MSE
                if channel_reconstructed is not None and true_spatial_corr is not None:
                    batch_size, seq_len = channel_reconstructed.shape[0], channel_reconstructed.shape[1]
                    for b in range(batch_size):
                        for t in range(seq_len):
                            pred_single = channel_reconstructed[b, t]
                            true_single = true_spatial_corr[b, t]
                            
                            # 计算单点MSE并取平方根得到RMSE
                            mse = torch.mean((pred_single - true_single) ** 2)
                            rmse = torch.sqrt(mse).item()
                            
                            # 获取时间点ID
                            time_id = time_ids[b, t] if time_ids is not None else f"batch_{i}_sample_{b}_time_{t}"
                            
                            all_time_mse.append(rmse)  # 保存RMSE而非MSE
                            all_time_ids.append(time_id)
            else:
                # 使用占位损失
                loss = torch.tensor(0.0, device=batch_x.device)
            
            total_loss.append(loss.item())
    
    # 如果没有收集到任何损失，报错
    if not total_loss:
        raise ValueError("没有收集到任何损失值，验证失败！请检查模型输出或损失计算。")
    
    # 计算平均损失
    avg_loss = np.average(total_loss)
    
    # 保存每个时间点的MSE排序结果
    if all_time_mse and len(all_time_mse) > 0:
        # 创建结果DataFrame
        results_df = pd.DataFrame({
            'time_id': all_time_ids,
            'rmse': all_time_mse  # 改为rmse列名
        })
        
        # 按RMSE从大到小排序
        results_df = results_df.sort_values('rmse', ascending=False)
        
        # 保存结果
        csv_path = 'time_id_rmse_results.csv'  # 更新文件名
        results_df.to_csv(csv_path, index=False)
        print(f"\n已将所有时间点的RMSE结果保存到: {csv_path}")
        print("\nRMSE最高的前20个时间点:")
        print(results_df.head(20))
    
    # 只返回空间相关性RMSE指标
    metrics = {
        'spatial_rmse': np.mean(spatial_rmse_losses) if spatial_rmse_losses else 0.0,
        'all_time_mse': all_time_mse,
        'all_time_ids': all_time_ids
    }
    
    print(f"验证完成，平均损失: {avg_loss:.7f}")
    return avg_loss, metrics


if __name__ == "__main__":
    main() 