import json
import requests
import numpy as np
from tqdm import tqdm
import pandas as pd
from typing import List, Dict, Tuple
import jieba
import math
from collections import Counter
import warnings
from data_v_1_0 import load_question_and_categories,load_answer,load_reference_only
from dotenv import load_dotenv
import os
from datetime import datetime
import re
"""
    v3.0：
    评估方案改版，基于四个评估维度，八个问题分类
"""

warnings.filterwarnings("ignore")
load_dotenv("../250430.env")  # 自动加载.env文件
print(os.getenv("EMBEDDING_API_KEY"))

# --------------------------
# 模块1：数据加载与校验
# --------------------------
def load_data() -> Tuple[List[Dict], List[str], List[str]]:
    """加载问题、回答和参考答案数据"""
    questions = load_question_and_categories()

    answers = load_answer("聊天助手_answers_20250430_094618.json")

    references = load_reference_only(sheet="聊天助手")

    assert len(questions) == len(answers) == len(references), "数据长度不匹配"
    return questions, answers, references


# --------------------------
# 模块2：嵌入计算服务
# --------------------------
class EmbeddingClient:
    def __init__(self, api_url: str, api_key: str):
        self.api_url = api_url
        self.headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {api_key}"
        }

    def get_embeddings(self, texts: List[str]) -> List[List[float]]:
        """批量获取文本嵌入向量"""
        response = requests.post(
            self.api_url,
            headers=self.headers,
            json={"model": "bge-m3", "encoding_format": "float", "input": texts}
        )
        response.raise_for_status()
        return [item["embedding"] for item in response.json()["data"]]

    def cosine_similarity(self, vec1: List[float], vec2: List[float]) -> float:
        """计算余弦相似度"""
        dot_product = np.dot(vec1, vec2)
        norm = np.linalg.norm(vec1) * np.linalg.norm(vec2)

        return dot_product / (norm + 1e-10)


# --------------------------
# 模块3：评分计算组件
# --------------------------
class Evaluator:
    def __init__(self, embed_client: EmbeddingClient):
        self.embed_client = embed_client

    # ---------- 准确性维度 ----------
    def accuracy_semantic_similarity(self, answer: str, reference: str) -> float:
        """语义相似度（与参考答案）"""
        emb_answer, emb_ref = self.embed_client.get_embeddings([answer, reference])
        return self.embed_client.cosine_similarity(emb_answer, emb_ref)

    def accuracy_entity_precision(self, answer: str, reference: str) -> float:
        """实体词精确率（基于jieba分词）"""

        def get_entities(text):
            words = jieba.lcut(text)
            return set(w for w in words if len(w) > 1)  # 过滤单字

        ans_entities = get_entities(answer)
        ref_entities = get_entities(reference)

        if not ref_entities:
            return 1.0

        correct = ans_entities & ref_entities
        return len(correct) / len(ref_entities)

    # ---------- 相关性维度 ----------
    def relevance_semantic_to_question(self, answer: str, question: str) -> float:
        """回答与问题的语义相似度"""
        emb_answer, emb_question = self.embed_client.get_embeddings([answer, question])
        return self.embed_client.cosine_similarity(emb_answer, emb_question)

    def relevance_entity_to_question(self, answer: str, question: str) -> float:
        """回答与问题的实体词匹配"""

        def get_entities(text):
            words = jieba.lcut(text)
            return set(w for w in words if len(w) > 1)

        ans_entities = get_entities(answer)
        ques_entities = get_entities(question)

        if not ques_entities:
            return 1.0

        matched = ans_entities & ques_entities
        return len(matched) / len(ques_entities)

    # ---------- 清晰度维度 ----------
    def clarity_sentence_length(self, answer: str) -> float:
        """句子长度评分：平均每个句子的长度（越短越高）"""
        # sentences = answer.split('。')  # 简单分句
        # avg_len = sum(len(s) for s in sentences) / (len(sentences) + 1e-5)
        # return max(0, 1 - avg_len / 100)  # 超过100字得0分

        """基于jieba分词，每一句的词语数来当作计算句子长度，0-100分，每多一个词扣5分"""
        sentences = re.split(r'[。！？]', answer)  # 按句号、感叹号、问号分割句子
        sentence_lengths = [len(list(jieba.cut(sentence))) for sentence in sentences if sentence.strip()]
        avg_sentence_length = sum(sentence_lengths) / len(sentence_lengths) if sentence_lengths else 0
        sentence_length_score = max(0, 1 - (avg_sentence_length * 0.05))  # 每增加一个词，扣5分
        # print(f"111句子长度评分:{sentence_length_score}")
        return sentence_length_score


    def clarity_vocab_complexity(self, answer: str) -> float:
        """词汇复杂度评分,长度>2的视为复杂词"""
        words = jieba.lcut(answer)
        complex_words = [w for w in words if len(w) > 2]
        # print(f"222词汇复杂度评分:{1 - len(complex_words) / (len(words) + 1e-5)}")

        return 1 - len(complex_words) / (len(words) + 1e-5)

    def clarity_paragraph_structure(self, answer: str) -> float:
        """分段结构评分"""
        paragraphs = answer.split('\n')
        # print(f"333分段结构评分:{min(1, len(paragraphs) / 5)}")

        return min(1, len(paragraphs) / 5)  # 最多5段得满分

    def clarity_readability(self, answer: str) -> float:
        """可读性评分（基于平均词长）"""
        words = jieba.lcut(answer)
        if not words:
            return 1.0
        avg_len = sum(len(w) for w in words) / len(words)
        # print(f"444可读性评分:{max(0, 1 - (avg_len - 1) / 3) }")

        return max(0, 1 - (avg_len - 1) / 3)  # 1-4字为合理范围

    def clarity_punctuation(self, answer: str) -> float:
        """标点符号评分"""
        puncts = sum(1 for c in answer if c in '，。、；：！？')
        # print(f"555标点符号评分:{min(1, puncts / 10)}")
        return min(1, puncts / 10)  # 最多10个标点得满分

    # ---------- 完整性维度 ----------
    def completeness_entity_recall(self, answer: str, question: str) -> float:
        """实体召回率（与问题）"""

        def get_entities(text):
            words = jieba.lcut(text)
            return set(w for w in words if len(w) > 1)

        ans_entities = get_entities(answer)
        ques_entities = get_entities(question)

        if not ques_entities:
            return 1.0

        recalled = ans_entities & ques_entities
        return len(recalled) / len(ques_entities)

    def completeness_semantic_to_question(self, answer: str, question: str) -> float:
        """与问题向量相似度"""
        emb_answer, emb_question = self.embed_client.get_embeddings([answer, question])
        return self.embed_client.cosine_similarity(emb_answer, emb_question)

    # --------------------------
    # 主评估函数
    # --------------------------
    def evaluate(self, question: str, answer: str, reference: str, category2: str) -> Dict:
        """执行完整评估流程"""
        weights = self._get_weights(category2)

        scores = {
            "accuracy": self._calc_accuracy(answer, reference, weights),
            "relevance": self._calc_relevance(answer, question, reference, weights),
            "clarity": self._calc_clarity(answer, weights),
            "completeness": self._calc_completeness(answer, question, reference, weights)
        }

        total_score = sum(
            scores[dim] * weights["dim_weights"][dim]
            for dim in ["accuracy", "relevance", "clarity", "completeness"]
        )

        # print("返回的评估结果结构:", {
        #     "scores": {**scores, "total_score": ...},
        #     "weights": weights
        # })

        return {
            "scores": {
                **scores,
                "total_score": round(total_score * 100, 3)
            },
            "weights": weights
        }
    def _calc_accuracy(self, answer: str, reference: str, weights: Dict) -> float:
        """准确性维度计算"""
        subscores = {
            "semantic_similarity": self.accuracy_semantic_similarity(answer, reference),
            "entity_precision": self.accuracy_entity_precision(answer, reference)
        }
        return sum(
            subscores[k] * weights["accuracy_weights"][k]
            for k in subscores
        )

    def _calc_relevance(self, answer: str, question: str, reference: str, weights: Dict) -> float:
        """相关性维度计算"""
        subscores = {
            "semantic_to_question": self.relevance_semantic_to_question(answer, question),
            "semantic_to_reference": self.accuracy_semantic_similarity(answer, reference),
            "entity_to_question": self.relevance_entity_to_question(answer, question)
        }
        return sum(
            subscores[k] * weights["relevance_weights"][k]
            for k in subscores
        )

    def _calc_clarity(self, answer: str, weights: Dict) -> float:
        """清晰度维度计算"""
        subscores = {
            "sentence_length": self.clarity_sentence_length(answer),
            "vocab_complexity": self.clarity_vocab_complexity(answer),
            "paragraph_structure": self.clarity_paragraph_structure(answer),
            "readability": self.clarity_readability(answer),
            "punctuation": self.clarity_punctuation(answer)
        }
        return sum(
            subscores[k] * weights["clarity_weights"][k]
            for k in subscores
        )

    def _calc_completeness(self, answer: str, question: str, reference: str, weights: Dict) -> float:
        """完整性维度计算"""
        subscores = {
            "entity_recall": self.completeness_entity_recall(answer, question),
            "semantic_to_question": self.completeness_semantic_to_question(answer, question),
            "semantic_to_reference": self.accuracy_semantic_similarity(answer, reference)
        }
        return sum(
            subscores[k] * weights["completeness_weights"][k]
            for k in subscores
        )

    @staticmethod
    def _get_weights(category2: str) -> Dict:
        """获取问题类型对应的完整权重配置"""
        weight_configs = {
            # 1.1 事实性问题
            "事实性问题": {
                "dim_weights": {"accuracy": 0.55, "relevance": 0.20, "clarity": 0.15, "completeness": 0.10},
                "accuracy_weights": {"semantic_similarity": 0.5, "entity_precision": 0.5},
                "relevance_weights": {"semantic_to_question": 0.5, "semantic_to_reference": 0.2,
                                      "entity_to_question": 0.3},
                "clarity_weights": {"sentence_length": 0.2, "vocab_complexity": 0.2, "paragraph_structure": 0.1,
                                    "readability": 0.3, "punctuation": 0.2},
                "completeness_weights": {"entity_recall": 0.4, "semantic_to_question": 0.2,
                                         "semantic_to_reference": 0.4}
            },
            # 1.2 建议性问题
            "建议性问题": {
                "dim_weights": {"accuracy": 0.20, "relevance": 0.35, "clarity": 0.20, "completeness": 0.25},
                "accuracy_weights": {"semantic_similarity": 0.5, "entity_precision": 0.5},
                "relevance_weights": {"semantic_to_question": 0.6, "semantic_to_reference": 0.1,
                                      "entity_to_question": 0.3},
                "clarity_weights": {"sentence_length": 0.1, "vocab_complexity": 0.1, "paragraph_structure": 0.3,
                                    "readability": 0.3, "punctuation": 0.2},
                "completeness_weights": {"entity_recall": 0.3, "semantic_to_question": 0.3,
                                         "semantic_to_reference": 0.4}
            },
            # 1.3 比较性问题
            "比较性问题": {
                "dim_weights": {"accuracy": 0.20, "relevance": 0.25, "clarity": 0.20, "completeness": 0.35},
                "accuracy_weights": {"semantic_similarity": 0.4, "entity_precision": 0.6},
                "relevance_weights": {"semantic_to_question": 0.5, "semantic_to_reference": 0.2,
                                      "entity_to_question": 0.3},
                "clarity_weights": {"sentence_length": 0.1, "vocab_complexity": 0.1, "paragraph_structure": 0.4,
                                    "readability": 0.2, "punctuation": 0.2},
                "completeness_weights": {"entity_recall": 0.3, "semantic_to_question": 0.2,
                                         "semantic_to_reference": 0.5}
            },
            # 1.4 操作性问题
            "操作性问题": {
                "dim_weights": {"accuracy": 0.30, "relevance": 0.20, "clarity": 0.30, "completeness": 0.20},
                "accuracy_weights": {"semantic_similarity": 0.3, "entity_precision": 0.7},
                "relevance_weights": {"semantic_to_question": 0.4, "semantic_to_reference": 0.3,
                                      "entity_to_question": 0.3},
                "clarity_weights": {"sentence_length": 0.1, "vocab_complexity": 0.1, "paragraph_structure": 0.5,
                                    "readability": 0.2, "punctuation": 0.1},
                "completeness_weights": {"entity_recall": 0.5, "semantic_to_question": 0.2,
                                         "semantic_to_reference": 0.3}
            },
            # 1.5 定义性问题
            "定义性问题": {
                "dim_weights": {"accuracy": 0.25, "relevance": 0.25, "clarity": 0.20, "completeness": 0.30},
                "accuracy_weights": {"semantic_similarity": 0.6, "entity_precision": 0.4},
                "relevance_weights": {"semantic_to_question": 0.5, "semantic_to_reference": 0.3,
                                      "entity_to_question": 0.2},
                "clarity_weights": {"sentence_length": 0.2, "vocab_complexity": 0.3, "paragraph_structure": 0.1,
                                    "readability": 0.3, "punctuation": 0.1},
                "completeness_weights": {"entity_recall": 0.3, "semantic_to_question": 0.3,
                                         "semantic_to_reference": 0.4}
            },
            # 1.6 预测性问题
            "预测性问题": {
                "dim_weights": {"accuracy": 0.25, "relevance": 0.25, "clarity": 0.20, "completeness": 0.30},
                "accuracy_weights": {"semantic_similarity": 0.5, "entity_precision": 0.5},
                "relevance_weights": {"semantic_to_question": 0.6, "semantic_to_reference": 0.2,
                                      "entity_to_question": 0.2},
                "clarity_weights": {"sentence_length": 0.2, "vocab_complexity": 0.2, "paragraph_structure": 0.2,
                                    "readability": 0.3, "punctuation": 0.1},
                "completeness_weights": {"entity_recall": 0.3, "semantic_to_question": 0.3,
                                         "semantic_to_reference": 0.4}
            },
            # 1.7 解释性问题
            "解释性问题": {
                "dim_weights": {"accuracy": 0.25, "relevance": 0.20, "clarity": 0.25, "completeness": 0.30},
                "accuracy_weights": {"semantic_similarity": 0.4, "entity_precision": 0.6},
                "relevance_weights": {"semantic_to_question": 0.5, "semantic_to_reference": 0.3,
                                      "entity_to_question": 0.2},
                "clarity_weights": {"sentence_length": 0.1, "vocab_complexity": 0.2, "paragraph_structure": 0.3,
                                    "readability": 0.3, "punctuation": 0.1},
                "completeness_weights": {"entity_recall": 0.3, "semantic_to_question": 0.3,
                                         "semantic_to_reference": 0.4}
            },
            # 1.8 评价性问题
            "评价性问题": {
                "dim_weights": {"accuracy": 0.20, "relevance": 0.35, "clarity": 0.25, "completeness": 0.20},
                "accuracy_weights": {"semantic_similarity": 0.3, "entity_precision": 0.7},
                "relevance_weights": {"semantic_to_question": 0.6, "semantic_to_reference": 0.1,
                                      "entity_to_question": 0.3},
                "clarity_weights": {"sentence_length": 0.1, "vocab_complexity": 0.2, "paragraph_structure": 0.4,
                                    "readability": 0.2, "punctuation": 0.1},
                "completeness_weights": {"entity_recall": 0.4, "semantic_to_question": 0.3,
                                         "semantic_to_reference": 0.3}
            }
        }
        return weight_configs.get(category2, weight_configs["事实性问题"])  # 默认返回事实性问题的配置


# --------------------------
# 模块4：结果导出
# --------------------------
def save_to_excel(results: List[Dict], output_path: str):
    # print("第一条结果的键:", results[0].keys())

    """将评估结果保存到Excel"""
    df = pd.DataFrame([{
        "问题": r["question"],
        "问题类别1": r["category1"],
        "问题类别2": r["category2"],
        "参考答案": r["reference"],
        "回答": r["answer"],
        "准确性得分(%)": r["scores"]["accuracy"] * 100,
        "相关性得分(%)": r["scores"]["relevance"] * 100,
        "清晰度得分(%)": r["scores"]["clarity"] * 100,
        "完整性得分(%)": r["scores"]["completeness"] * 100,
        "总得分": r["scores"]["total_score"],
        "准确性权重(%)": r["weights"]["dim_weights"]["accuracy"] * 100,
        "相关性权重(%)": r["weights"]["dim_weights"]["relevance"] * 100,
        "清晰度权重(%)": r["weights"]["dim_weights"]["clarity"] * 100,
        "完整性权重(%)": r["weights"]["dim_weights"]["completeness"] * 100
    } for r in results])

    with pd.ExcelWriter(output_path, engine='openpyxl') as writer:
        df.to_excel(writer, index=False)
        worksheet = writer.sheets['Sheet1']

        # 设置列宽
        for col in worksheet.columns:
            max_length = max(len(str(cell.value)) for cell in col)
            worksheet.column_dimensions[col[0].column_letter].width = min(max_length + 2, 30)


# --------------------------
# 主执行流程
# --------------------------
def main(sheet="聊天助手"):
    # 初始化组件
    embed_client = EmbeddingClient(
        api_url="http://10.10.252.225:8000/v1-openai/embeddings",
        api_key=os.getenv("EMBEDDING_API_KEY")
    )
    evaluator = Evaluator(embed_client)

    # 加载数据
    questions, answers, references = load_data()

    # 执行评估
    results = []
    for q, ans, ref in tqdm(zip(questions, answers, references), total=len(questions), desc="评估进度"):
        try:
            scores = evaluator.evaluate(q["question"], ans, ref, q["category2"])
            results.append({
                "question": q["question"],
                "category1": q["category1"],
                "category2": q["category2"],
                "reference": ref,
                "answer": ans,
                **evaluator.evaluate(q["question"], ans, ref, q["category2"])  # 展开评估结果
            })
        except Exception as e:
            print(f"评估失败: {q['question']} - {str(e)}")

    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    output_file = f"../data/{sheet}_evaluation_results_{timestamp}.xlsx"

    # 保存结果
    save_to_excel(results, output_file)

    # print("组装后的结果样例:", results[0].keys())  # 确认包含'scores'和'weights'

    print(f"评估完成，结果已保存到 ../data/{sheet}_evaluation_results_{timestamp}.xlsx")


if __name__ == "__main__":
    main()