import os
import asyncio
import numpy as np
import torch
import aiohttp
from typing import List
from tqdm.asyncio import tqdm_asyncio
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
from nltk.translate.meteor_score import meteor_score
from nltk.tokenize import word_tokenize
from openpyxl import Workbook
from tqdm import tqdm
import warnings
from data_v_1_0 import load_reference_only, load_answer, load_question_only
from datetime import datetime
from dotenv import load_dotenv

"""
    v2.1:
    添加问题列，使评估结果包含对应的问题
"""

load_dotenv("../250430.env")  # 自动加载.env文件
# print(os.getenv("EMBEDDING_API_KEY"))

warnings.filterwarnings('ignore')

# 权重分配
w_BLEU = 0.05
w_BERTScore = 0.85
w_METEOR = 0.1

device = 'cuda' if torch.cuda.is_available() else 'cpu'

load_dotenv("../250430.env")  # 自动加载.env文件
print(os.getenv("EMBEDDING_API_KEY"))

class AsyncEmbeddingCalculator:
    def __init__(self, api_key: str, model_name: str = "bge-m3", max_retries: int = 3):
        self.api_url = "http://10.10.252.225:8000/v1-openai/embeddings"
        self.headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {api_key}"
        }
        self.model = model_name
        self.max_retries = max_retries
        self.semaphore = asyncio.Semaphore(4)  # 控制并发量 TODO:

    async def get_embeddings(self, texts: List[str], batch_size: int = 32) -> List[List[float]]:
        """异步批量获取文本嵌入向量"""
        results = []
        batches = [texts[i:i + batch_size] for i in range(0, len(texts), batch_size)]

        async def process_batch(batch):
            async with self.semaphore:
                for attempt in range(self.max_retries):
                    try:
                        async with aiohttp.ClientSession() as session:
                            payload = {
                                "model": self.model,
                                "input": batch,
                                "encoding_format": "float"
                            }
                            async with session.post(
                                    self.api_url,
                                    headers=self.headers,
                                    json=payload,
                                    timeout=aiohttp.ClientTimeout(total=30)
                            ) as response:
                                response.raise_for_status()
                                data = await response.json()
                                return [item["embedding"] for item in data["data"]]
                    except Exception as e:
                        if attempt == self.max_retries - 1:
                            raise
                        await asyncio.sleep(2 ** attempt)  # 指数退避

        tasks = [process_batch(batch) for batch in batches]
        results = await tqdm_asyncio.gather(*tasks, desc="获取嵌入向量")
        return [emb for batch in results for emb in batch]


class ChatbotEvaluator:
    def __init__(self, output_file='evaluation_results.xlsx'):
        # 初始化平滑函数（用于BLEU）
        self.smoother = SmoothingFunction().method1
        self.output_file = output_file

        # 初始化Excel工作簿
        self.wb = Workbook()
        # 创建详细结果工作表
        self.detailed_ws = self.wb.active
        self.detailed_ws.title = "Detailed Scores"
        # 添加表头（新增Question列）
        self.detailed_ws.append(['Index', 'Question', 'Reference', 'Prediction', 'BLEU Score', 'BERT Score', 'METEOR Score'])

        # 创建汇总结果工作表
        self.summary_ws = self.wb.create_sheet(title="Summary Scores")
        self.summary_ws.append(['Metric', 'Average Score'])

        # 创建最终结果工作表
        self.final_ws = self.wb.create_sheet(title="Final Results")

        # 初始化统计变量
        self.total_bleu = torch.tensor(0.0, device=device)
        self.total_bert = torch.tensor(0.0, device=device)
        self.total_meteor = torch.tensor(0.0, device=device)
        self.count = torch.tensor(0, device=device)

    def save_to_excel(self):
        """保存Excel文件"""
        self.wb.save(self.output_file)

    async def async_evaluate(self, questions: List[str], references: List[str], predictions: List[str]):
        """
        异步评估函数
        :param questions: 问题列表
        :param references: 标准答案列表
        :param predictions: 生成答案列表
        """
        assert len(references) == len(predictions) == len(questions), "问题、参考和预测数量不匹配"

        # 初始化嵌入计算器
        embedding_calc = AsyncEmbeddingCalculator(os.getenv("EMBEDDING_API_KEY"))

        # 1. 获取所有嵌入向量（异步）
        all_texts = references + predictions
        embeddings = await embedding_calc.get_embeddings(all_texts)
        ref_embs = embeddings[:len(references)]
        pred_embs = embeddings[len(references):]

        progress_bar = tqdm(total=len(references), desc="评估进度")

        for idx, (question, ref, pred) in enumerate(zip(questions, references, predictions)):
            try:
                # 2. 计算BERTScore（基于嵌入）
                cos_sim = np.dot(ref_embs[idx], pred_embs[idx]) / (
                        np.linalg.norm(ref_embs[idx]) * np.linalg.norm(pred_embs[idx]))
                # bert_score = (cos_sim + 1) / 2  # 归一化到0-1范围
                bert_score = cos_sim  # 不归一化 TODO：

                # 3. 计算BLEU（本地）
                ref_tokens = [word_tokenize(ref)]
                pred_tokens = word_tokenize(pred)
                bleu = sentence_bleu(
                    ref_tokens,
                    pred_tokens,
                    smoothing_function=self.smoother
                )

                # 4. 计算METEOR（本地）
                meteor = meteor_score(
                    [word_tokenize(ref)],
                    word_tokenize(pred)
                )

                # 更新统计变量
                self.total_bleu += torch.tensor(bleu, device=device)
                self.total_bert += torch.tensor(bert_score, device=device)
                self.total_meteor += torch.tensor(meteor, device=device)
                self.count += 1

                # 写入详细结果（包含问题）
                self.detailed_ws.append([
                    idx + 1,
                    question,
                    ref,
                    pred,
                    bleu,
                    float(bert_score),
                    meteor
                ])

                progress_bar.update(1)

                # 每处理20条保存一次
                if (idx + 1) % 20 == 0:
                    self.save_to_excel()

            except Exception as e:
                print(f"处理第 {idx + 1} 条数据时出错: {str(e)}")
                continue

        progress_bar.close()

        # 计算平均分
        avg_bleu = (self.total_bleu / self.count).cpu().item() if self.count > 0 else 0
        avg_bert = (self.total_bert / self.count).cpu().item() if self.count > 0 else 0
        avg_meteor = (self.total_meteor / self.count).cpu().item() if self.count > 0 else 0

        # 综合得分（加权平均）
        composite_score = (
                w_BLEU * avg_bleu +
                w_BERTScore * avg_bert +
                w_METEOR * avg_meteor
        )

        # 写入汇总结果
        self.summary_ws.append(['BLEU', avg_bleu])
        self.summary_ws.append(['BERTScore', avg_bert])
        self.summary_ws.append(['METEOR', avg_meteor])
        self.summary_ws.append(['Composite Score', composite_score])

        # 写入最终结果
        self.final_ws.append(["评估结果:"])
        self.final_ws.append([f"BLEU平均分: {avg_bleu:.4f}"])
        self.final_ws.append([f"BERTScore平均分: {avg_bert:.4f}"])
        self.final_ws.append([f"METEOR平均分: {avg_meteor:.4f}"])
        self.final_ws.append([f"综合得分: {composite_score:.4f}"])
        self.final_ws.append([])
        self.final_ws.append([f"评估结果已保存到 {self.output_file} 文件"])

        # 最终保存
        self.save_to_excel()

        return {
            'avg_bleu': avg_bleu,
            'avg_bert': avg_bert,
            'avg_meteor': avg_meteor,
            'composite_score': composite_score
        }


async def run_evaluation(sheet='聊天助手', file_answer="聊天助手_answers_20250430_094618.json"):
    # 加载数据
    questions = load_question_only(sheet)
    references = load_reference_only(sheet)
    predictions = load_answer(file_answer)

    # 检查数据长度是否一致
    assert len(questions) == len(references) == len(predictions), "问题、参考和预测数量不匹配"

    # 初始化评估器
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    output_file = f"../data/{sheet}_evaluation_results_{timestamp}.xlsx"

    evaluator = ChatbotEvaluator(output_file=output_file)
    scores = await evaluator.async_evaluate(questions, references, predictions)

    # 打印结果
    print("\n评估结果:")
    print(f"BLEU平均分: {scores['avg_bleu']:.4f}")
    print(f"BERTScore平均分: {scores['avg_bert']:.4f}")
    print(f"METEOR平均分: {scores['avg_meteor']:.4f}")
    print(f"综合得分: {scores['composite_score']:.4f}")
    print(f"\n评估结果已保存到 {output_file}")


if __name__ == "__main__":
    asyncio.run(run_evaluation())