# 首先设置GPU环境变量，必须在任何torch导入之前
import os
os.environ['CURL_CA_BUNDLE'] = ''
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:64"
os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ['CUDA_VISIBLE_DEVICES'] = '3'  # 直接在代码中指定使用GPU 3

import argparse
import torch
import numpy as np
import time
import random
import matplotlib.pyplot as plt
from tqdm import tqdm
from torch import nn, optim
from accelerate import Accelerator
from accelerate import DistributedDataParallelKwargs
import pandas as pd
from models import ChannelLLM
from data_provider.data_factory import data_provider
from utils.tools import EarlyStopping, adjust_learning_rate, compute_metrics, visualize_weights, del_files, load_content
from torch.utils.data import DataLoader

# 创建命令行参数解析器
parser = argparse.ArgumentParser(description='基于空间相关性的信道预测')

# 设置随机种子
fix_seed = 2023
random.seed(fix_seed)
torch.manual_seed(fix_seed)
np.random.seed(fix_seed)

# 获取项目根目录
project_root = os.path.dirname(os.path.abspath(__file__))

# 基本配置
parser.add_argument('--task_name', type=str, default='spatial_corr_prediction', help='任务名称')
parser.add_argument('--is_training', type=int, default=1, help='是否训练模型')
parser.add_argument('--model_id', type=str, default='ChannelLLM', help='模型ID')
parser.add_argument('--model_comment', type=str, default='spatial_corr', help='保存结果时的前缀')
parser.add_argument('--model', type=str, default='ChannelLLM', help='模型名称')
parser.add_argument('--seed', type=int, default=2023, help='随机种子')

# 数据加载参数
parser.add_argument('--data', type=str, default='SPATIAL_CORR', help='数据集类型，设置为SPATIAL_CORR')
parser.add_argument('--root_path', type=str, default=os.path.join(project_root, 'dataset/spatial_corr/'), help='数据文件根目录')
parser.add_argument('--data_path', type=str, default='spatial_correlation_data.npz', help='数据文件名')
parser.add_argument('--features', type=str, default='M', help='特征类型，M表示多变量')
parser.add_argument('--checkpoints', type=str, default=os.path.join(project_root, 'checkpoints/'), help='模型检查点目录')

# 序列长度参数
parser.add_argument('--seq_len', type=int, default=20, help='输入序列长度')
parser.add_argument('--label_len', type=int, default=10, help='标签长度')
parser.add_argument('--pred_len', type=int, default=10, help='预测序列长度')

# 模型定义参数ss
parser.add_argument('--n_probes', type=int, default=20, help='探头数量')
parser.add_argument('--d_model', type=int, default=128, help='模型隐藏层维度')
parser.add_argument('--n_heads', type=int, default=8, help='注意力头数量')
parser.add_argument('--d_ff', type=int, default=128, help='前馈网络维度')
parser.add_argument('--dropout', type=float, default=0.1, help='Dropout率')
parser.add_argument('--activation', type=str, default='silu', choices=['relu', 'gelu', 'silu', 'swish', 'mish'], help='激活函数类型')
parser.add_argument('--llm_layers', type=int, default=6, help='LLM层数')
parser.add_argument('--llm_model', type=str, default='Qwen-1.7B', help='使用的LLM模型类型，可选[GPT2-large, GPT2-small, GPT2-medium, Qwen-4B, Qwen-1.7B]')
parser.add_argument('--islora', action='store_true', help='是否使用LoRA进行训练', default=False)
parser.add_argument('--lora_r', type=int, default=16, help='LoRA秩参数')
parser.add_argument('--lora_alpha', type=int, default=16, help='LoRA alpha参数')
parser.add_argument('--lora_dropout', type=float, default=0.1, help='LoRA dropout率')
parser.add_argument('--use_transformer', action='store_true', help='是否使用Transformer替代TwoStageAttention', default=False)
parser.add_argument('--n_transformer_layers', type=int, default=2, help='Transformer编码器层数')
parser.add_argument('--use_reprogramming', action='store_true', help='是否使用Reprogramming与tokenizer做注意力，否则直接线性映射', default=True)
parser.add_argument('--use_residual_in_reprogramming', action='store_true', help='是否在ReprogrammingLayer中使用残差连接', default=True)
parser.add_argument('--use_residual_after_llm', action='store_true', help='是否在大模型输出后添加残差连接（连接ReprogrammingLayer输出）', default=False)
parser.add_argument('--use_attention_probe_selection', action='store_true', help='是否使用基于注意力的探头选择', default=False)
parser.add_argument('--use_patch_embedding', action='store_true', help='是否使用PatchEmbedding进行特征提取', default=False)
parser.add_argument('--patch_len', type=int, default=2, help='Patch长度')
parser.add_argument('--stride', type=int, default=1, help='Patch步长')
parser.add_argument('--prompt_domain', action='store_true', help='是否使用自定义任务描述', default=True)
parser.add_argument('--content', type=str, default='', help='自定义任务描述内容')

# 优化参数
parser.add_argument('--num_workers', type=int, default=6, help='数据加载器工作进程数')
parser.add_argument('--itr', type=int, default=1, help='实验次数')
parser.add_argument('--train_epochs', type=int, default=1000, help='训练轮数')
parser.add_argument('--batch_size', type=int, default=8, help='训练批次大小')
parser.add_argument('--eval_batch_size', type=int, default=8, help='评估批次大小')
parser.add_argument('--patience', type=int, default=5, help='早停耐心值')
parser.add_argument('--learning_rate', type=float, default=0.001, help='optimizer learning rate')
parser.add_argument('--des', type=str, default='exp', help='实验描述')
parser.add_argument('--loss', type=str, default='MSE', help='loss function')
parser.add_argument('--use_amp', action='store_true', help='是否使用混合精度训练', default=True)
parser.add_argument('--bf16', action='store_true', help='是否使用BFloat16混合精度训练', default=True)
parser.add_argument('--print_every', type=int, default=200, help='每隔多少个批次打印一次信息')
parser.add_argument('--optim', type=str, default='adamw', help='优化器类型')
parser.add_argument('--lradj', type=str, default='COS', help='学习率调度器类型: COS(余弦退火), STEP(阶梯衰减), EXP(指数衰减), PLATEAU(自适应衰减), ONECYCLE(一周期-推荐快速收敛)')
parser.add_argument('--pct_start', type=float, default=0.1, help='OneCycleLR的pct_start参数')
parser.add_argument('--grad_clip_norm', type=float, default=0, help='梯度裁剪范数，0表示不使用梯度裁剪')
parser.add_argument('--use_minimal_mode', action='store_true', help='使用极简模式：只保留ReprogrammingLayer', default=True)
parser.add_argument('--use_positional_encoding', action='store_true', help='在极简模式下独立使用位置编码', default=True)
parser.add_argument('--use_compile', action='store_true', help='是否使用torch.compile编译模型', default=False)
parser.add_argument('--load_model', action='store_true', help='是否加载预训练模型', default=False)

args = parser.parse_args()

# 处理可能以字符串形式传入的布尔值参数
bool_args = ['use_patch_embedding', 'use_transformer', 'use_reprogramming', 'use_residual_in_reprogramming', 'use_residual_after_llm', 'use_attention_probe_selection', 'use_amp', 'bf16']
for arg_name in bool_args:
    if hasattr(args, arg_name) and isinstance(getattr(args, arg_name), str):
        # 将字符串转换为布尔值
        arg_value = getattr(args, arg_name).lower()
        if arg_value in ['false', 'f', '0', 'no', 'n']:
            setattr(args, arg_name, False)
        elif arg_value in ['true', 't', '1', 'yes', 'y']:
            setattr(args, arg_name, True)

# 设置分布式训练参数
ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)

# 设置混合精度训练
if args.use_amp:
    if args.bf16 and torch.cuda.is_bf16_supported():
        print("使用BFloat16混合精度训练")
        mixed_precision = 'bf16'
    else:
        print("使用Float16混合精度训练")
        mixed_precision = 'fp16'
else:
    print("使用Float32全精度训练")
    mixed_precision = 'no'

accelerator = Accelerator(
    kwargs_handlers=[ddp_kwargs],
    mixed_precision=mixed_precision,
    # 强制使用单GPU，禁用分布式训练
    cpu=False,
    device_placement=True
)

# 检查当前使用的GPU设备
if torch.cuda.is_available():
    current_device = torch.cuda.current_device()
    device_name = torch.cuda.get_device_name(current_device)
    print(f"当前使用的GPU设备: {current_device} ({device_name})")
    print(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES', '未设置')}")
else:
    print("CUDA不可用，将使用CPU")

# 主函数
def main():
    # 实验设置
    setting = '{}_{}_{}_sl{}_ll{}_pl{}_dm{}_nh{}_bs{}_{}_{}'.format(
        args.task_name,
        args.model_id,
        args.data,
        args.seq_len,
        args.label_len,
        args.pred_len,
        args.d_model,
        args.n_heads,
        args.batch_size,
        args.des,
        args.model_comment
    )
    
    # 检查点保存路径
    if args.islora:
        path = os.path.join(args.checkpoints, 'lora_' + setting)
    else:
        path = os.path.join(args.checkpoints, setting)
    
    # 加载提示内容
    args.content = load_content(args)
    
    if not os.path.exists(path) and accelerator.is_local_main_process:
        os.makedirs(path)
    
    # 加载数据
    print("Loading data...")
    
    # 一次性加载所有数据
    train_data, train_loader = data_provider(args, 'train')
    print(f"训练集加载完成，样本数量: {len(train_data)}")
    
    # 加载测试集
    test_data, test_loader = data_provider(args, 'test')
    print(f"测试集加载完成，样本数量: {len(test_data)}")
    
    # 验证集使用后30%的数据
    args_all = args
    args_all.batch_size = args.eval_batch_size
    vali_data, vali_loader = data_provider(args_all, 'val')
    print(f"验证集加载完成，样本数量: {len(vali_data)}")
    
    print(f"数据集加载完毕 - 训练集: {len(train_data)}样本, 验证集: {len(vali_data)}样本, 测试集: {len(test_data)}样本")
    
    train_steps = len(train_loader)
    
    # 创建模型
    print(f"Creating {args.model} model...")
    
    # 为空间相关性数据设置特殊参数
    # 空间相关性数据是64个复数值，分为实部和虚部，所以特征维度是128
    args.feature_dim = 128
    args.use_spatial_corr_only = True  # 添加标记，表示仅使用空间相关性数据
    
    model = ChannelLLM(args).float()
    
    # 计算模型参数数量
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    if accelerator.is_local_main_process:
        print('模型:', args.model)
        print('总参数数量:', total_params)
        print('可训练参数数量:', trainable_params)
        print('可训练参数比例:', trainable_params / total_params)
        if args.grad_clip_norm > 0:
            print(f'启用梯度裁剪，最大范数: {args.grad_clip_norm}')
        else:
            print('未启用梯度裁剪')
    
    # 确保所有模型参数都是连续的，解决"Tensors must be contiguous"错误
    for name, param in model.named_parameters():
        if not param.is_contiguous():
            print(f"修复非连续参数: {name}")
            param.data = param.data.contiguous()
    
    # 添加torch.compile模型编译优化
    if args.use_compile:
        try:
            print("正在编译模型以提升训练速度...")
            print("⚠️  注意：第一次编译可能需要5-15分钟，请耐心等待...")
            print("💡 提示：模型越复杂，编译时间越长，但编译后训练会显著加速")
            print("🔄 如果编译时间过长，可以使用 --use_compile=False 跳过编译")
            
            # 使用更轻量的编译模式，减少编译时间
            model = torch.compile(model, mode="default")  # 使用默认模式而不是reduce-overhead
            print("✅ 模型编译成功！预期训练速度提升10-25%")
            print("🚀 编译完成，开始训练...")
        except Exception as e:
            print(f"⚠️ 模型编译失败，将使用未编译版本: {e}")
            print("这不会影响训练，但可能无法获得编译带来的速度提升")
    else:
        print("⏩ 跳过模型编译，使用原始模型进行训练")
    
    # 准备训练
    trained_parameters = [p for p in model.parameters() if p.requires_grad]
    
    # 优化器选择
    if args.optim.lower() == 'adam':
        model_optim = optim.Adam(trained_parameters, lr=args.learning_rate, betas=(0.9, 0.999), eps=1e-8)
    elif args.optim.lower() == 'adamw':
        # 恢复原始配置
        model_optim = optim.AdamW(
            trained_parameters, 
            lr=args.learning_rate, 
            betas=(0.9, 0.999),  # 恢复原始beta2
            eps=1e-8,
            weight_decay=1e-4  # 恢复原始权重衰减
        )
    else:
        model_optim = optim.SGD(trained_parameters, lr=args.learning_rate, momentum=0.9, weight_decay=1e-4)
    
    # 学习率调度器
    if hasattr(args, 'lradj') and args.lradj == 'COS':
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(model_optim, T_max=20, eta_min=1e-8)
    elif hasattr(args, 'lradj') and args.lradj == 'STEP':
        # StepLR: 每20个epoch学习率乘以0.5，恢复原始保守配置
        scheduler = torch.optim.lr_scheduler.StepLR(model_optim, step_size=20, gamma=0.5)
    elif hasattr(args, 'lradj') and args.lradj == 'EXP':
        # ExponentialLR: 每个epoch学习率乘以0.99，恢复原始保守配置
        scheduler = torch.optim.lr_scheduler.ExponentialLR(model_optim, gamma=0.99)
    elif hasattr(args, 'lradj') and args.lradj == 'PLATEAU':
        # ReduceLROnPlateau: 恢复原始保守配置
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model_optim, mode='min', factor=0.5, patience=10, verbose=True)
    elif hasattr(args, 'lradj') and args.lradj == 'ONECYCLE':
        # OneCycleLR: 一周期学习率，快速收敛
        scheduler = torch.optim.lr_scheduler.OneCycleLR(
            optimizer=model_optim,
            max_lr=args.learning_rate * 2,  # 最大学习率设为初始学习率的2倍
            steps_per_epoch=len(train_loader),
            epochs=args.train_epochs,
            pct_start=args.pct_start,  # 使用命令行参数
            anneal_strategy='cos',
            div_factor=10.0,  # 初始学习率 = max_lr / div_factor
            final_div_factor=100.0  # 最终学习率 = initial_lr / final_div_factor
        )
    else:
        # 默认使用COS调度器
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(model_optim, T_max=20, eta_min=1e-8)
    
    # 损失函数选择
    if args.loss == 'mse':
        criterion = nn.MSELoss()
    elif args.loss == 'kl':
        criterion = nn.KLDivLoss(reduction='batchmean')
    else:
        criterion = nn.MSELoss()
    
    # 使用accelerator准备模型、优化器和数据加载器
    model, model_optim, train_loader, vali_loader, test_loader, scheduler = accelerator.prepare(
        model, model_optim, train_loader, vali_loader, test_loader, scheduler
    )
    
    # 如果需要，加载已存在的模型
    if args.load_model:
        print(f"Loading model from {path}...")
        best_model_path = os.path.join(path, 'checkpoint.pth')
        if os.path.exists(best_model_path):
            checkpoint = torch.load(best_model_path, map_location=accelerator.device)
            model.load_state_dict(checkpoint['model'])
            print("Model loaded successfully.")
        else:
            print(f"Warning: {best_model_path} not found. Training from scratch.")
    
    early_stopping = EarlyStopping(patience=args.patience, verbose=True)
    
    # 训练和验证
    if args.is_training:
        train_and_validate(model, model_optim, criterion, train_loader, vali_loader, scheduler, accelerator, path, train_steps, early_stopping)
    
    # 测试最好的模型
    best_model_path = os.path.join(path, 'checkpoint.pth')
    if os.path.exists(best_model_path):
        checkpoint = torch.load(best_model_path, map_location=accelerator.device)
        model.load_state_dict(checkpoint['model'])
        print("加载最佳模型完成")
    
    print("开始测试...")
    test_loss, test_rmse = test(model, test_loader, criterion, accelerator, setting, path)
    
    print("训练、验证和测试完成")
    print(f"测试集RMSE: {test_rmse:.7f}")
    
    # 返回设置
    return setting

def train_and_validate(model, model_optim, criterion, train_loader, vali_loader, scheduler, accelerator, path, train_steps, early_stopping):
    # 训练记录变量
    total_loss = []
    train_epochs_loss = []
    val_epochs_loss = []
    val_epochs_rmse = []
    best_val_rmse = float('inf')  # 使用RMSE为早停指标
    best_epoch = 0
    
    print("开始训练...")
    for epoch in range(args.train_epochs):
        iter_count = 0
        train_loss = []
        
        epoch_time = time.time()
        train_epoch_time = time.time()
        
        model.train()
        for i, (batch_x, batch_mark) in enumerate(train_loader):
            iter_count += 1
            optimizer_time = time.time()
            
            model_optim.zero_grad()
            
            # 训练
            pred_output = model(batch_x)
            
            # 使用模型返回的空间相关性RMSE损失
            if isinstance(pred_output, dict) and 'spatial_rmse_loss' in pred_output:
                # 直接使用模型计算的RMSE损失
                loss = pred_output['spatial_rmse_loss']
            elif args.loss == 'kl':
                # 对预测值应用log_softmax，KLDivLoss期望的是log概率
                log_pred = torch.log_softmax(pred_output, dim=-1)
                # 对真实标签应用softmax，将其转换为概率分布
                target = torch.ones_like(pred_output) / pred_output.shape[-1]  # 均匀分布作为目标
                loss = criterion(log_pred, target)
            else:
                # 简单回归目标，预测固定值
                target = torch.ones_like(pred_output) * 0.5  # 将目标值设为0.5
                loss = criterion(pred_output, target)
            
            loss.backward()
            
            # 梯度裁剪
            if args.grad_clip_norm > 0:
                torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=args.grad_clip_norm)
            
            model_optim.step()
            
            # 对于OneCycleLR，需要在每个step调用scheduler.step()
            if hasattr(args, 'lradj') and args.lradj == 'ONECYCLE':
                scheduler.step()
            
            # 优化损失记录 - 减少同步操作
            # 不每次都调用loss.item()，而是累积损失
            train_loss.append(loss.detach())  # 使用detach()避免同步，保留在GPU上
            
            if (i + 1) % args.print_every == 0 and accelerator.is_local_main_process:
                # 只在打印时进行同步，计算累积损失的平均值
                recent_losses = train_loss[-args.print_every:]  # 获取最近的损失
                avg_recent_loss = torch.stack(recent_losses).mean().item()  # 一次性同步
                
                print("\titers: {0}, epoch: {1} | avg_loss: {2:.7f}".format(i + 1, epoch + 1, avg_recent_loss))
                # 打印当前学习率
                current_lr = model_optim.param_groups[0]['lr']
                print(f"\t\t当前学习率: {current_lr:.2e}")
                speed = args.print_every / (time.time() - optimizer_time)
                print("\t\tspeed: {:.4f} iters/s".format(speed))
                optimizer_time = time.time()
        
        # 学习率调度器步进 - 对于非OneCycleLR的调度器
        if hasattr(args, 'lradj') and args.lradj == 'PLATEAU':
            # ReduceLROnPlateau需要在验证后调用，传入验证损失
            pass  # 在验证后调用
        elif hasattr(args, 'lradj') and args.lradj != 'ONECYCLE':
            # 其他调度器在epoch结束时调用
            scheduler.step()
        
        # 计算训练集平均损失 - 处理tensor列表
        if train_loss:
            # 将所有损失tensor转换为标量并计算平均值
            train_loss_values = [loss.item() if hasattr(loss, 'item') else loss for loss in train_loss]
            train_loss = np.average(train_loss_values)
        else:
            train_loss = 0.0
        train_epochs_loss.append(train_loss)
        
        train_epoch_time = time.time() - train_epoch_time
        print("Epoch: {} cost time: {}".format(epoch + 1, train_epoch_time))
        
        if accelerator.is_local_main_process:
            # 每10个epoch验证一次
            if (epoch + 1) % 10 == 0:
                # 验证
                val_time = time.time()
                val_loss, val_rmse = validate(model, vali_loader, criterion)
                val_epochs_loss.append(val_loss)
                val_epochs_rmse.append(val_rmse)
                print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} | Val Loss: {3:.7f} | Val RMSE: {4:.7f}".format(
                    epoch + 1, train_steps, train_loss, val_loss, val_rmse))
                
                # 对于ReduceLROnPlateau，在验证后调用scheduler.step()
                if hasattr(args, 'lradj') and args.lradj == 'PLATEAU':
                    scheduler.step(val_rmse)  # 使用验证RMSE作为指标
                    print(f"当前学习率: {model_optim.param_groups[0]['lr']:.2e}")
                
                # 保存最佳模型 - 以RMSE为标准
                if val_rmse < best_val_rmse:
                    best_val_rmse = val_rmse
                    best_epoch = epoch + 1
                    print(f"发现新的最佳模型，验证RMSE: {best_val_rmse:.7f}")
                    
                    unwrapped_model = accelerator.unwrap_model(model)
                    
                    # 保存检查点
                    torch.save({
                        'epoch': epoch,
                        'model': unwrapped_model.state_dict(),
                        'optimizer': model_optim.state_dict(),
                        'val_loss': val_loss,
                        'train_loss': train_loss,
                        'val_rmse': val_rmse
                    }, os.path.join(path, 'checkpoint.pth'))
                    print("保存检查点成功")
                
                # 早停判断 - 使用RMSE而不是损失
                early_stopping(val_rmse, model, path)
                if early_stopping.early_stop:
                    print("触发早停，在第{}轮停止训练".format(epoch + 1))
                    break
                
                print(f"当前验证集RMSE: {val_rmse:.7f}, 当前最佳验证集RMSE: {best_val_rmse:.7f} (轮次 {best_epoch})")
            else:
                # 非验证epoch，只打印训练信息
                print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f}".format(
                    epoch + 1, train_steps, train_loss))
                
                # 非验证epoch，使用None填充验证指标
                val_epochs_loss.append(None)
                val_epochs_rmse.append(None)
            
            # 可视化训练过程（只在验证epoch更新图表）
            if (epoch + 1) % 10 == 0:
                plt.figure(figsize=(12, 8))
                plt.subplot(2, 1, 1)
                # 过滤None值
                train_epochs = list(range(1, len(train_epochs_loss) + 1))
                val_epochs = [i+1 for i, v in enumerate(val_epochs_loss) if v is not None]
                val_loss_plot = [v for v in val_epochs_loss if v is not None]
                
                plt.plot(train_epochs, train_epochs_loss, label='Training Loss')
                plt.plot(val_epochs, val_loss_plot, label='Validation Loss')
                plt.legend()
                plt.title('Training and Validation Loss')
                
                plt.subplot(2, 1, 2)
                val_rmse_plot = [v for v in val_epochs_rmse if v is not None]
                plt.plot(val_epochs, val_rmse_plot, label='Validation RMSE')
                plt.legend()
                plt.title('Validation RMSE')
                
                plt.tight_layout()
                plt.savefig(os.path.join(path, 'training_process.png'))
                plt.close()

    # 打印最佳结果
    if accelerator.is_local_main_process:
        print("\n训练完成，最佳验证集RMSE: {:.7f}，最佳轮次: {}".format(best_val_rmse, best_epoch))

def validate(model, vali_loader, criterion):
    model.eval()
    total_loss = []
    total_rmse = []
    
    # 用于记录每个时间点的RMSE
    time_rmse_dict = {}
    
    with torch.no_grad():
        for i, (batch_x, batch_mark) in enumerate(vali_loader):
            # 预测
            pred_output = model(batch_x)
            
            # 使用模型返回的空间相关性RMSE损失
            if isinstance(pred_output, dict) and 'spatial_rmse_loss' in pred_output:
                # 直接使用模型计算的RMSE损失
                loss = pred_output['spatial_rmse_loss']
                rmse = loss.item()  # RMSE就是损失值本身
                
                # 从batch_x中获取时间点ID并展平
                time_ids = batch_x['time_ids'].cpu().numpy().flatten()
                
                # 记录每个时间点的RMSE
                for time_id in time_ids:
                    time_id = int(time_id)  # 确保time_id是整数
                    if time_id not in time_rmse_dict:
                        time_rmse_dict[time_id] = []
                    time_rmse_dict[time_id].append(rmse)
                    
            elif args.loss == 'kl':
                # 对预测值应用log_softmax
                log_pred = torch.log_softmax(pred_output, dim=-1)
                # 对真实标签应用softmax
                target = torch.ones_like(pred_output) / pred_output.shape[-1]  # 均匀分布作为目标
                loss = criterion(log_pred, target)
                # 计算RMSE
                rmse = torch.sqrt(torch.mean((pred_output - target) ** 2)).item()
            else:
                # 简单回归目标
                target = torch.ones_like(pred_output) * 0.5  # 将目标值设为0.5
                loss = criterion(pred_output, target)
                # 计算RMSE
                rmse = torch.sqrt(torch.mean((pred_output - target) ** 2)).item()
                
            total_loss.append(loss.item())
            total_rmse.append(rmse)
    
    # 计算平均损失和RMSE
    average_loss = np.average(total_loss)
    average_rmse = np.average(total_rmse)
    
    # 计算每个时间点的平均RMSE
    time_rmse_avg = {time_id: np.mean(rmse_list) for time_id, rmse_list in time_rmse_dict.items()}
    
    # 按RMSE从大到小排序
    sorted_time_rmse = sorted(time_rmse_avg.items(), key=lambda x: x[1], reverse=True)
    
    # 保存排序后的结果到CSV文件
    if accelerator.is_local_main_process:
        # 创建DataFrame，只包含time_id和rmse两列
        df = pd.DataFrame(sorted_time_rmse, columns=['time_id', 'rmse'])
        
        # 保存到CSV文件，使用mode='w'确保覆盖之前的文件
        csv_path = '/home/zkh/lzt/damoxing/boshuyuce/xindaoyuce/time_id_rmse_results.csv'
        df.to_csv(csv_path, index=False, mode='w')
        print(f"已更新时间点RMSE结果到 time_id_rmse_results.csv")
        print(f"RMSE最大的前5个时间点:")
        for time_id, rmse in sorted_time_rmse[:5]:
            print(f"时间点 {time_id}: RMSE = {rmse:.7f}")
        
        # 读取刚生成的CSV文件，按照time_id重新排序并保存
        print("正在按照time_id重新排序...")
        try:
            # 读取CSV文件
            df_reread = pd.read_csv(csv_path)
            
            # 按照time_id排序
            df_sorted_by_id = df_reread.sort_values('time_id')
            
            # 重新保存
            df_sorted_by_id.to_csv(csv_path, index=False)
            print(f"已按照time_id重新排序并保存到 time_id_rmse_results.csv")
            print(f"排序后的前5个时间点:")
            for _, row in df_sorted_by_id.head(5).iterrows():
                print(f"时间点 {row['time_id']}: RMSE = {row['rmse']:.7f}")
        except Exception as e:
            print(f"重新排序时出错: {e}")
    
    model.train()
    
    return average_loss, average_rmse

def test(model, test_loader, criterion, accelerator, setting, path):
    model.eval()
    test_loss = []
    test_rmse = []
    
    with torch.no_grad():
        for i, (batch_x, batch_mark) in enumerate(test_loader):
            # 预测
            pred_output = model(batch_x)
            
            # 使用模型返回的空间相关性RMSE损失
            if isinstance(pred_output, dict) and 'spatial_rmse_loss' in pred_output:
                # 直接使用模型计算的RMSE损失
                loss = pred_output['spatial_rmse_loss']
                rmse = loss.item()  # RMSE就是损失值本身
            elif args.loss == 'kl':
                # 对预测值应用log_softmax
                log_pred = torch.log_softmax(pred_output, dim=-1)
                # 对真实标签应用softmax
                target = torch.ones_like(pred_output) / pred_output.shape[-1]  # 均匀分布作为目标
                loss = criterion(log_pred, target)
                # 计算RMSE
                rmse = torch.sqrt(torch.mean((pred_output - target) ** 2)).item()
            else:
                # 简单回归目标
                target = torch.ones_like(pred_output) * 0.5  # 将目标值设为0.5
                loss = criterion(pred_output, target)
                # 计算RMSE
                rmse = torch.sqrt(torch.mean((pred_output - target) ** 2)).item()
            
            test_loss.append(loss.item())
            test_rmse.append(rmse)
    
    # 计算平均损失和RMSE
    average_loss = np.average(test_loss)
    average_rmse = np.average(test_rmse)
    
    # 打印测试结果
    print("测试集结果 - Loss: {:.7f}, RMSE: {:.7f}".format(average_loss, average_rmse))
    
    if accelerator.is_local_main_process:
        # 保存测试结果
        result_path = os.path.join(path, 'test_result.txt')
        with open(result_path, 'w') as f:
            f.write("测试集结果 - Loss: {:.7f}, RMSE: {:.7f}\n".format(average_loss, average_rmse))
    
    return average_loss, average_rmse

if __name__ == "__main__":
    main() 