import json
import time
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch

def calculate_ranking_accuracy(model, tokenizer, test_data, batch_size=8, device="cuda"):
    """
    计算奖励模型的排序准确率并分析结果（适配新数据结构）
    
    参数:
    model: 训练好的奖励模型
    tokenizer: 对应模型的tokenizer
    test_data: 测试数据集(list of dicts)，格式: 
        [
            {
                "conversations": [{"from": "human", "value": "prompt"}],
                "chosen": {"from": "gpt", "value": "chosen_response"},
                "rejected": {"from": "gpt", "value": "rejected_response"}
            },
            ...
        ]
    batch_size: 批量推理大小
    device: 计算设备 ("cuda" 或 "cpu")
    
    返回:
    accuracy: 整体排序准确率
    results: 包含详细分析结果的字典
    """
    # 设置模型为评估模式
    model.eval().to(device)
    
    # 结果初始化
    correct = 0
    score_differences = []
    wrong_samples = []
    chosen_scores = []
    rejected_scores = []
    correct_ids = []
    wrong_ids = []
    
    # 批量处理测试数据
    start_time = time.time()
    
    for i in tqdm(range(0, len(test_data), batch_size)):
        batch = test_data[i:i+batch_size]
        
        # 提取prompt, chosen和rejected文本
        prompts = [item["conversations"][0]["value"] for item in batch]
        chosen_texts = [item["chosen"]["value"] for item in batch]
        rejected_texts = [item["rejected"]["value"] for item in batch]
        
        # 准备tokenized输入
        chosen_inputs = tokenizer(
            prompts, chosen_texts,
            padding=True, truncation=True, 
            max_length=512, return_tensors="pt"
        ).to(device)
        
        rejected_inputs = tokenizer(
            prompts, rejected_texts,
            padding=True, truncation=True, 
            max_length=512, return_tensors="pt"
        ).to(device)
        
        # 推理
        with torch.no_grad():
            chosen_outputs = model(**chosen_inputs)
            rejected_outputs = model(**rejected_inputs)
            
            # 获取分数 (假设单分数输出)
            if hasattr(chosen_outputs, 'logits'):
                chosen_batch_scores = chosen_outputs.logits.squeeze(-1).cpu().numpy()
                rejected_batch_scores = rejected_outputs.logits.squeeze(-1).cpu().numpy()
            else:
                chosen_batch_scores = chosen_outputs.cpu().numpy()
                rejected_batch_scores = rejected_outputs.cpu().numpy()
        
        # 处理结果
        for j in range(len(batch)):
            chosen_score = float(chosen_batch_scores[j])
            rejected_score = float(rejected_batch_scores[j])
            
            chosen_scores.append(chosen_score)
            rejected_scores.append(rejected_score)
            diff = chosen_score - rejected_score
            score_differences.append(diff)
            
            if chosen_score > rejected_score:
                correct += 1
                correct_ids.append(i+j)
            else:
                # 保存错误样本用于后续分析
                sample = {
                    "id": i+j,
                    "prompt": prompts[j],
                    "chosen": chosen_texts[j],
                    "rejected": rejected_texts[j],
                    "chosen_score": chosen_score,
                    "rejected_score": rejected_score
                }
                wrong_samples.append(sample)
                wrong_ids.append(i+j)
    
    # 计算指标
    processing_time = time.time() - start_time
    accuracy = correct / len(test_data)
    
    # 计算各种指标
    mean_diff = np.mean(score_differences)
    std_diff = np.std(score_differences)
    
    # 创建分析结果
    results = {
        "accuracy": accuracy,
        "total_samples": len(test_data),
        "correct_count": correct,
        "wrong_count": len(test_data) - correct,
        "mean_score_difference": mean_diff,
        "std_score_difference": std_diff,
        "median_score_difference": np.median(score_differences),
        "correct_ids": correct_ids,
        "wrong_ids": wrong_ids,
        "wrong_samples": wrong_samples,
        "chosen_scores": chosen_scores,
        "rejected_scores": rejected_scores,
        "score_differences": score_differences,
        "processing_time": processing_time,
        "speed": f"{len(test_data)/processing_time:.2f} samples/sec"
    }
    
    print("\n" + "="*50)
    print(f"排序准确率验证结果 (样本数: {len(test_data)})")
    print("="*50)
    print(f"排序准确率: {accuracy:.2%}")
    print(f"平均分数差: {mean_diff:.4f} ± {std_diff:.4f}")
    print(f"中位数分数差: {results['median_score_difference']:.4f}")
    print(f"处理速度: {results['speed']}")
    print(f"错误样本数: {results['wrong_count']} ({results['wrong_count']/len(test_data):.2%})")
    print("="*50)
    
    return accuracy, results

def visualize_results(results, save_path=None):
    """
    可视化排序准确率分析结果
    
    参数:
    results: calculate_ranking_accuracy返回的结果字典
    save_path: 图片保存路径(可选)
    """
    plt.figure(figsize=(15, 12))
    
    # 1. 分数分布图
    plt.subplot(2, 2, 1)
    sns.kdeplot(results["chosen_scores"], label="Chosen Scores", fill=True)
    sns.kdeplot(results["rejected_scores"], label="Rejected Scores", fill=True)
    plt.title(f"奖励分数分布 (准确率: {results['accuracy']:.2%})")
    plt.xlabel("分数")
    plt.ylabel("密度")
    plt.legend()
    
    # 2. 分数差分布
    plt.subplot(2, 2, 2)
    sns.histplot(results["score_differences"], kde=True, bins=30)
    plt.axvline(results["mean_score_difference"], color='r', linestyle='dashed', linewidth=1)
    plt.text(results["mean_score_difference"] + 0.1, 10, 
             f'平均值: {results["mean_score_difference"]:.2f}', color='r')
    plt.title(f"分数差分布 (标准差: {results['std_score_difference']:.2f})")
    plt.xlabel("分数差 (Chosen - Rejected)")
    
    # 3. 分数差箱线图
    plt.subplot(2, 2, 3)
    data = [results["chosen_scores"], results["rejected_scores"], results["score_differences"]]
    plt.boxplot(data, patch_artist=True,
                labels=["Chosen分数", "Rejected分数", "分数差"])
    plt.title("分数分布统计")
    
    # 4. 准确率图
    plt.subplot(2, 2, 4)
    sizes = [results["correct_count"], results["wrong_count"]]
    labels = [f'正确排序\n{results["correct_count"]}', f'错误排序\n{results["wrong_count"]}']
    plt.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=90,
            colors=['#66b3ff', '#ff9999'])
    plt.title(f"排序准确率: {results['accuracy']:.2%}")
    
    plt.tight_layout()
    
    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        print(f"可视化结果已保存至: {save_path}")
    
    plt.show()

def save_wrong_samples(wrong_samples, file_path="wrong_predictions.json"):
    """保存错误预测样本以供分析"""
    with open(file_path, 'w', encoding='utf-8') as f:
        json.dump(wrong_samples, f, indent=2, ensure_ascii=False)
    print(f"保存错误预测至: {file_path}")

# ==================== 使用示例 ====================
if __name__ == "__main__":
    # 设置GPU设备
    gpu_device = "cuda:1"  # 使用GPU 1
    
    # 1. 加载模型和tokenizer
    model_path = "/data2/modelscope/hub/models/saves/merge_20250627-174355"  # 替换为你的奖励模型路径
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    model = AutoModelForSequenceClassification.from_pretrained(model_path)
    
    # 2. 加载测试数据集
    test_file = "data/dpo_zh_demo.json"  # 测试数据路径
    with open(test_file, 'r', encoding='utf-8') as f:
        test_data = json.load(f)
    
    print(f"载入 {len(test_data)} 条测试样本")
    
    # 3. 计算排序准确率（指定设备）
    accuracy, results = calculate_ranking_accuracy(
        model, tokenizer, test_data,
        batch_size=16,  # 根据GPU内存调整
        device=gpu_device  # 使用指定的GPU
    )
    
    # 4. 可视化结果
    visualize_results(results, save_path="train_cf/src/20250627/reward_model_evaluation.png")
    
    # 5. 保存错误样本以供分析
    save_wrong_samples(results["wrong_samples"])
    
    # 6. 输出关键指标
    print("\n关键指标总结:")
    print(f"- 排序准确率: {accuracy:.2%}")
    print(f"- 平均分数差: {results['mean_score_difference']:.4f}")
    print(f"- 标准差: {results['std_score_difference']:.4f}")
    print(f"- 正确判断中位数差: {np.median([d for d in results['score_differences'] if d > 0]):.4f}")
    print(f"- 错误判断中位数差: {np.median([d for d in results['score_differences'] if d <= 0]):.4f}")
    
    # 建议标准
    print("\n[评估标准建议]")
    if accuracy > 0.90:
        print("✅ 优秀: 准确率 >90%，模型效果出色")
    elif accuracy > 0.85:
        print("🟢 良好: 准确率 85-90%，模型可用")
    elif accuracy > 0.75:
        print("🟡 一般: 准确率 75-85%，需要进一步优化")
    else:
        print("🔴 较差: 准确率 <75%，建议重新训练模型")
    
    if results['mean_score_difference'] > 1.0:
        print("✅ 分数区分度高: 平均分数差 >1.0")
    elif results['mean_score_difference'] > 0.5:
        print("🟢 分数区分度中等: 平均分数差 >0.5")
    else:
        print("🔴 分数区分度不足: 平均分数差过低")