import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
import json
import pandas as pd
import re
import numpy as np
from rouge import Rouge
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
from sentence_transformers import SentenceTransformer
from sklearn.metrics import precision_recall_fscore_support
from tqdm import tqdm
import os
from sklearn.feature_extraction.text import TfidfVectorizer

# 配置参数
MODEL_NAME = "/root/autodl-tmp/Qwen/Qwen3-8B"
LORA_PATH = "./lora_train"
TEST_DATA_PATH = "combined_medical_test.json"
MAX_NEW_TOKENS = 1024
BATCH_SIZE = 4
NUM_SAMPLES = 50
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)}

# 初始化语义评估模型
semantic_model = SentenceTransformer('paraphrase-multilingual-MiniLM-L12-v2').to(DEVICE)

# 加载模型和tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
tokenizer.padding_side = 'left'
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token

model = AutoModelForCausalLM.from_pretrained(
    MODEL_NAME,
    torch_dtype=torch.bfloat16,
    device_map=device_map,
    attn_implementation="flash_attention_2"
)
model = PeftModel.from_pretrained(model, LORA_PATH)
model.eval()

# 数据加载和预处理
def load_test_data(file_path):
    with open(file_path, 'r', encoding='utf-8') as f:
        data = json.load(f)
    return data[:NUM_SAMPLES]

test_data = load_test_data(TEST_DATA_PATH)

# 文本清理函数
def clean_text(text):
    text = re.sub(r'<\|im_end\|>|<\|endoftext\|>', '', text)
    return re.sub(r'\s+', ' ', text).strip()

# 计算基于token的精确匹配指标
def calculate_token_metrics(preds, refs):
    vectorizer = TfidfVectorizer(tokenizer=lambda x: x.split())
    
    # 将文本转换为token集合
    pred_tokens = [set(pred.split()) for pred in preds]
    ref_tokens = [set(ref.split()) for ref in refs]
    
    precisions = []
    recalls = []
    f1s = []
    accuracies = []
    
    for pred_set, ref_set in zip(pred_tokens, ref_tokens):
        common = pred_set & ref_set
        precision = len(common) / len(pred_set) if pred_set else 0
        recall = len(common) / len(ref_set) if ref_set else 0
        f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
        accuracy = len(common) / len(ref_set) if ref_set else 0
        
        precisions.append(precision)
        recalls.append(recall)
        f1s.append(f1)
        accuracies.append(accuracy)
    
    return {
        "precision": np.mean(precisions),
        "recall": np.mean(recalls),
        "f1": np.mean(f1s),
        "accuracy": np.mean(accuracies)
    }

# 语义相似度计算
def calculate_semantic_metrics(preds, refs):
    # 编码文本
    pred_embs = semantic_model.encode(preds, convert_to_tensor=True, show_progress_bar=False)
    ref_embs = semantic_model.encode(refs, convert_to_tensor=True, show_progress_bar=False)
    
    # 计算余弦相似度
    cos_sims = torch.nn.functional.cosine_similarity(pred_embs, ref_embs)
    
    # 转换为numpy
    cos_sims = cos_sims.cpu().numpy()
    
    return {
        "semantic_cosine_mean": float(np.mean(cos_sims)),
        "semantic_cosine_std": float(np.std(cos_sims)),
        "semantic_cosine_min": float(np.min(cos_sims)),
        "semantic_cosine_max": float(np.max(cos_sims))
    }

# 传统指标计算
def calculate_classic_metrics(preds, refs):
    rouge = Rouge()
    smoothie = SmoothingFunction().method4
    
    # ROUGE
    try:
        rouge_scores = rouge.get_scores(preds, refs)
        rouge_metrics = {
            "rouge-1": sum(s['rouge-1']['f'] for s in rouge_scores) / len(rouge_scores),
            "rouge-2": sum(s['rouge-2']['f'] for s in rouge_scores) / len(rouge_scores),
            "rouge-l": sum(s['rouge-l']['f'] for s in rouge_scores) / len(rouge_scores)
        }
    except:
        rouge_metrics = {"rouge-1": 0, "rouge-2": 0, "rouge-l": 0}
    
    # BLEU
    bleu_scores = []
    for p, r in zip(preds, refs):
        try:
            bleu_scores.append(sentence_bleu([r.split()], p.split(), smoothing_function=smoothie))
        except:
            bleu_scores.append(0)
    rouge_metrics["bleu"] = sum(bleu_scores) / len(bleu_scores)
    
    # 添加token级指标
    token_metrics = calculate_token_metrics(preds, refs)
    rouge_metrics.update(token_metrics)
    
    return rouge_metrics

# 生成函数
def batch_generate(questions):
    all_responses = []
    for i in tqdm(range(0, len(questions), BATCH_SIZE), desc="Generating"):
        batch = questions[i:i+BATCH_SIZE]
        inputs = tokenizer(
            [tokenizer.apply_chat_template(
                [{"role": "system", "content": "You are a helpful medical assistant."},
                 {"role": "user", "content": q}], 
                tokenize=False, 
                add_generation_prompt=True
            ) for q in batch],
            return_tensors="pt", 
            padding=True,
            truncation=True,
            max_length=2048
        )
        device = next(model.parameters()).device
        # 将所有张量移动到指定的设备上
        for key, value in inputs.items():
            inputs[key] = value.to(device)
        with torch.no_grad():
            outputs = model.generate(
                **inputs,
                max_new_tokens=MAX_NEW_TOKENS,
                temperature=0.8,
                top_p=0.9,
                do_sample=True,
                
            )
        
        decoded = tokenizer.batch_decode(outputs[:, inputs.input_ids.shape[1]:], skip_special_tokens=False) 
        all_responses.extend([clean_text(d) for d in decoded])
    return all_responses

# 主评估函数
def evaluate():
    questions = [d["question"] for d in test_data]
    references = [d["answer"] for d in test_data]
    cots = [d.get("cot", "") for d in test_data]
    
    # 生成回答
    answers = batch_generate(questions)
    
    # 准备评估数据
    eval_data = []
    for i, (q, ref, cot) in enumerate(zip(questions, references, cots)):
        eval_data.append({
            "question": q,
            "generated": answers[i],
            "reference": ref,
            "cot_reference": cot,
            "type": test_data[i]["type"]
        })
    
    # 计算指标
    metrics = {}
    
    # 1. Answer部分评估
    gen_answers = [d["generated"] for d in eval_data]
    ref_answers = [d["reference"] for d in eval_data]
    
    metrics["answer"] = {
        "classic": calculate_classic_metrics(gen_answers, ref_answers),
        "semantic": calculate_semantic_metrics(gen_answers, ref_answers)
    }
    
    # 2. CoT部分评估
    cot_data = [d for d in eval_data if d["type"] == "reason" and d["cot_reference"]]
    if cot_data:
        gen_cots = [d["generated"] for d in cot_data]
        ref_cots = [d["cot_reference"] for d in cot_data]
        
        metrics["cot"] = {
            "classic": calculate_classic_metrics(gen_cots, ref_cots),
            "semantic": calculate_semantic_metrics(gen_cots, ref_cots)
        }
    
    # 3. Combined评估
    combined_gen = []
    combined_ref = []
    for d in eval_data:
        if d["type"] == "reason" and d["cot_reference"]:
            combined_gen.append(f"{d['generated']} {d['cot_reference']}")
            combined_ref.append(f"{d['reference']} {d['cot_reference']}")
        else:
            combined_gen.append(d["generated"])
            combined_ref.append(d["reference"])
    
    metrics["combined"] = {
        "classic": calculate_classic_metrics(combined_gen, combined_ref),
        "semantic": calculate_semantic_metrics(combined_gen, combined_ref)
    }
    
    # 保存结果
    pd.DataFrame(eval_data).to_csv("semantic_evaluate_results.csv", index=False)
    with open("semantic_evaluate_metrics.json", "w") as f:
        json.dump(metrics, f, indent=2)
    
    return metrics, eval_data

if __name__ == "__main__":
    metrics, results = evaluate()
    
    print("\n=== Semantic Evaluation Results ===")
    print(f"\nEvaluated {len(results)} samples")
    
    def print_metrics(name, data):
        print(f"\n[{name.upper()}]")
        print("Classic Metrics:")
        print(f"  ROUGE-1: {data['classic']['rouge-1']:.4f}")
        print(f"  ROUGE-2: {data['classic']['rouge-2']:.4f}")
        print(f"  ROUGE-L: {data['classic']['rouge-l']:.4f}")
        print(f"  BLEU:    {data['classic']['bleu']:.4f}")
        print(f"  Precision: {data['classic']['precision']:.4f}")
        print(f"  Recall:    {data['classic']['recall']:.4f}")
        print(f"  F1:        {data['classic']['f1']:.4f}")
        print(f"  Accuracy:  {data['classic']['accuracy']:.4f}")
        print("\nSemantic Metrics:")
        print(f"  Cosine Mean: {data['semantic']['semantic_cosine_mean']:.4f}")
        print(f"  Cosine Std:  {data['semantic']['semantic_cosine_std']:.4f}")
        print(f"  Cosine Min:  {data['semantic']['semantic_cosine_min']:.4f}")
        print(f"  Cosine Max:  {data['semantic']['semantic_cosine_max']:.4f}")
    
    print_metrics("Answer", metrics["answer"])
    if "cot" in metrics:
        print_metrics("Chain-of-Thought", metrics["cot"])
    print_metrics("Combined", metrics["combined"])