#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import numpy as np
import torch
import matplotlib.pyplot as plt
import glob
import pandas as pd
from tqdm import tqdm
import sys
import argparse

# 添加项目根目录到路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from models.ChannelLLM import Model as ChannelLLM
from data_provider.data_factory import data_provider

def parse_args():
    parser = argparse.ArgumentParser(description='计算每个样本的MSE')
    
    parser.add_argument('--model_path', type=str, default=None, help='模型检查点路径')
    parser.add_argument('--results_dir', type=str, default='results', help='结果目录')
    parser.add_argument('--batch_size', type=int, default=4, help='批次大小')
    parser.add_argument('--top_n', type=int, default=20, help='显示MSE最高的前N个样本')
    
    # 模型配置
    parser.add_argument('--seq_len', type=int, default=20, help='序列长度')
    parser.add_argument('--n_clusters', type=int, default=25, help='簇数量')
    parser.add_argument('--n_probes', type=int, default=32, help='探头数量')
    
    parser.add_argument('--data', type=str, default='CHANNEL', help='数据集类型')
    
    return parser.parse_args()

def load_model_and_data(args):
    """加载模型和测试数据"""
    # 先创建模型配置
    from argparse import Namespace
    
    model_config = Namespace(
        seq_len=args.seq_len,
        pred_len=10,
        label_len=10,
        n_clusters=args.n_clusters,
        d_model=128,
        d_ff=256,
        n_heads=8,
        n_probes=args.n_probes,
        dropout=0.1,
        llm_layers=6,
        llm_model='GPT2-large',
        patch_len=2,
        stride=1,
        use_patch_embedding=True,
        use_two_stage_attention=True,
        islora=True,
        lora_r=8,
        data=args.data,
        root_path='./data',
        data_path='channel_data.npy',
        features='M',
        loader='channel',
        eval_batch_size=args.batch_size,
        prompt_domain=True,
        content='信道预测任务是通过分析信道簇特征预测探头权重的关键技术，可提高无线通信系统性能。',
        gpt2_path='xindaoyuce/gpt2-large'
    )
    
    # 创建测试数据加载器
    _, test_loader = data_provider(model_config, 'test')
    
    # 创建模型
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = ChannelLLM(model_config).to(device)
    
    # 加载模型权重
    if args.model_path and os.path.exists(args.model_path):
        model.load_state_dict(torch.load(args.model_path, map_location=device))
        print(f"成功加载模型权重: {args.model_path}")
    else:
        print("警告: 未加载模型权重，将使用随机初始化的模型")
    
    model.eval()
    return model, test_loader

def find_latest_result_dir(results_base_dir):
    """查找最新的结果目录"""
    result_dirs = glob.glob(os.path.join(results_base_dir, '*'))
    if not result_dirs:
        return None
    
    # 按修改时间排序
    result_dirs.sort(key=os.path.getmtime, reverse=True)
    return result_dirs[0]

def load_test_results(result_dir):
    """加载测试结果数据"""
    pred_path = os.path.join(result_dir, 'pred.npy')
    input_path = os.path.join(result_dir, 'input.npy')
    
    if not os.path.exists(pred_path) or not os.path.exists(input_path):
        print(f"找不到预测数据文件: {pred_path} 或 {input_path}")
        return None, None
    
    pred = np.load(pred_path)
    input_data = np.load(input_path)
    
    print(f"加载预测数据: {pred.shape}, 输入数据: {input_data.shape}")
    return pred, input_data

def calculate_mse_by_sample(model, test_loader, device):
    """计算每个样本的MSE"""
    all_mse_values = []
    all_time_ids = []
    
    with torch.no_grad():
        for batch_idx, batch_data in enumerate(tqdm(test_loader, desc="计算样本MSE")):
            if isinstance(batch_data, (tuple, list)) and len(batch_data) == 2:
                x_dict, x_mark = batch_data
            else:
                print(f"警告: 批次 {batch_idx} 格式不正确，跳过")
                continue
            
            # 获取时间点ID
            time_ids = x_dict.get('time_ids', None)
            if time_ids is not None:
                time_ids = time_ids.cpu().numpy()
            
            # 获取空间相关性数据
            spatial_corr_real = x_dict.get('spatial_corr_real', None)
            spatial_corr_imag = x_dict.get('spatial_corr_imag', None)
            
            if spatial_corr_real is not None and spatial_corr_imag is not None:
                # 组合空间相关性数据
                true_spatial_corr = torch.cat([spatial_corr_real, spatial_corr_imag], dim=-1).to(device)
                
                # 前向传播
                outputs = model(x_dict, None, x_mark, None)
                
                # 获取重建的信道
                if isinstance(outputs, dict) and 'channel_reconstructed' in outputs:
                    reconstructed = outputs['channel_reconstructed']  # [B, seq_len, feature_dim*2]
                    
                    # 计算每个样本的MSE
                    batch_size, seq_len = reconstructed.shape[0], reconstructed.shape[1]
                    for b in range(batch_size):
                        for t in range(seq_len):
                            # 提取单个时间点的重建值和真实值
                            pred_single = reconstructed[b, t]
                            true_single = true_spatial_corr[b, t]
                            
                            # 计算MSE
                            mse = torch.mean((pred_single - true_single) ** 2).item()
                            
                            # 获取时间点ID
                            time_id = time_ids[b, t] if time_ids is not None else f"batch_{batch_idx}_sample_{b}_time_{t}"
                            
                            all_mse_values.append(mse)
                            all_time_ids.append(time_id)
    
    return all_mse_values, all_time_ids

def main():
    args = parse_args()
    
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")
    
    # 加载模型和数据
    model, test_loader = load_model_and_data(args)
    
    # 计算每个样本的MSE
    print("计算每个样本的MSE...")
    all_mse_values, all_time_ids = calculate_mse_by_sample(model, test_loader, device)
    
    if not all_mse_values:
        print("没有计算到任何MSE值，请检查数据和模型")
        return
    
    # 创建结果DataFrame
    results_df = pd.DataFrame({
        'time_id': all_time_ids,
        'mse': all_mse_values
    })
    
    # 按MSE排序并显示最高的N个
    results_df = results_df.sort_values('mse', ascending=False)
    
    print(f"\nMSE最高的 {args.top_n} 个样本:")
    print(results_df.head(args.top_n))
    
    # 保存结果
    csv_path = 'sample_mse_results.csv'
    results_df.to_csv(csv_path, index=False)
    print(f"已将所有结果保存到: {csv_path}")
    
    # 统计时间点出现频率
    time_id_counts = results_df.groupby('time_id').size().reset_index(name='frequency')
    time_id_counts = time_id_counts.sort_values('frequency', ascending=False)
    
    print("\n出现频率最高的时间点:")
    print(time_id_counts.head(10))
    
    # 计算每个时间点的平均MSE
    time_id_mse = results_df.groupby('time_id')['mse'].mean().reset_index(name='avg_mse')
    time_id_mse = time_id_mse.sort_values('avg_mse', ascending=False)
    
    print("\nMSE最高的时间点:")
    print(time_id_mse.head(10))
    
    # 可视化
    # 1. MSE分布直方图
    plt.figure(figsize=(10, 6))
    plt.hist(all_mse_values, bins=50)
    plt.xlabel('MSE值')
    plt.ylabel('样本数量')
    plt.title('样本MSE分布')
    plt.savefig('mse_distribution.png')
    
    # 2. 时间点频率柱状图
    top_n_time_ids = time_id_counts.head(10)
    plt.figure(figsize=(12, 6))
    plt.bar(top_n_time_ids['time_id'].astype(str), top_n_time_ids['frequency'])
    plt.xlabel('时间点ID')
    plt.ylabel('出现频率')
    plt.title('出现频率最高的10个时间点')
    plt.xticks(rotation=45)
    plt.tight_layout()
    plt.savefig('time_id_frequency.png')

if __name__ == "__main__":
    main() 