import asyncio
import json
from typing import List

import aiohttp
from bs4 import BeautifulSoup

from data_dyne.config import config
from data_dyne.llm.zhipu import semaphore_air, semaphore_rerank, zhipu_llm_air
from data_dyne.log import logger
from data_dyne.models.survey_model import PaperChunk
from data_dyne.prompt.outline_prompt import CLEAN_PAPER_PROMPT

# 添加全局速率限制器
ZHIPU_RERANK_SEMAPHORE = asyncio.Semaphore(1)  # 限制最大并发数为5


async def clean_chunks(chunks: list[PaperChunk], topic: str):
    """
    提取压缩信息


    https://python.langchain.com/docs/how_to/contextual_compression/#adding-contextual-compression-with-an-llmchainextractor

    # TODO 后面可以尝试语义分块，问大模型实在太费时了
    """
    ### 并发
    logger.debug(f"Cleaning {len(chunks)} chunks")
    prompts = []
    for chunk in chunks:
        prompt = CLEAN_PAPER_PROMPT.format(topic=topic, context=chunk.text)
        prompts.append(prompt)

    rsps = await asyncio.gather(
        *[
            ainvoke_zhipu_air(prompt) for prompt in prompts
        ]  # 上下文压缩不需要太强大的模型
    )

    for rsp, chunk in zip(rsps, chunks):
        chunk.clean_text = rsp.content
    return chunks


async def ainvoke_zhipu_air(prompt: str):
    async with semaphore_air:
        return await zhipu_llm_air.ainvoke(prompt)


async def clean_chunk(chunk: PaperChunk, topic: str):
    # 对PaperChunk.text进行更细致的切分， 然后rerank
    prompt = CLEAN_PAPER_PROMPT.format(topic=topic, context=chunk.text)
    rsp = zhipu_llm_air.ainvoke(prompt)
    chunk.clean_text = (
        f"year: {chunk.year} \n title: {chunk.paper_title} \n text: {rsp}"
    )
    return chunk


async def zhipu_rerank(
    query, documents: List[str], top_n=0, semaphore=semaphore_rerank
):
    """
        Rerank documents using Zhipu API asynchronously
        Return:
        {
        "created": 1732083164,
        "id": "20241120141244890ab4ee4af84acf",
        "request_id": "1111111111",
        "results": [
            {
                "document": "Washington, D.C. (also known as simply Washington or D.C., and officially as the District of Columbia) is the capital of the United States. It is a federal district.",
                "index": 1,
                "relevance_score": 0.99866986
            },
            {
                "document": "Carson City is the capital city of the American state of Nevada.",
                "index": 0,
                "relevance_score": 0.001294368
            }
        ],
        "usage": {
            "prompt_tokens": 72,
            "total_tokens": 72
        }
    }
    """
    # documents = documents[:30]  # 限制文档数量
    url = "https://open.bigmodel.cn/api/paas/v4/rerank"

    headers = {
        "Authorization": f"Bearer {config.zhipuai_api_key}",
        "Content-Type": "application/json",
    }

    data = {
        "request_id": "1111111111",
        "query": query,
        "documents": documents,
        "top_n": top_n,
        "return_documents": True,
        "return_raw_scores": True,
    }
    timeout = aiohttp.ClientTimeout(total=300)
    if semaphore:
        async with semaphore:
            async with aiohttp.ClientSession(timeout=timeout) as session:
                async with session.post(url, headers=headers, json=data) as response:
                    response.raise_for_status()
                    # 直接读取文本并解析 JSON，不依赖 Content-Type
                    text = await response.text()
    else:
        async with aiohttp.ClientSession(timeout=timeout) as session:
            async with session.post(url, headers=headers, json=data) as response:
                response.raise_for_status()
                # 直接读取文本并解析 JSON，不依赖 Content-Type
                text = await response.text()
    return json.loads(text)


async def extract_sup_tags(text: str) -> List[str]:
    """
    从文本中提取所有<sup>标签的内容

    Args:
        text: 包含XML标签的文本

    Returns:
        List[str]: 所有<sup>标签中的内容列表
    """

    # 创建BeautifulSoup对象，使用'html.parser'解析器
    soup = BeautifulSoup(text, "html.parser")

    # 找到所有<sup>标签
    sup_tags = soup.find_all("sup")

    # 提取每个标签的文本内容
    return [tag.text for tag in sup_tags]


async def fix_tag(
    tag: str, all_paper_chunks: List[PaperChunk]
) -> tuple[str, int, float]:
    """
    修复文本中的引用标签,通过重排序模型找到最匹配的论文引用

    Args:
        tag: 需要修复的引用标签内容
        all_paper_chunks: 所有可用的论文片段列表

    Returns:
        tuple: 包含以下三个元素:
            - str: 最匹配的论文引用文本
            - int: 该论文在列表中的索引(从1开始)
            - float: 相关性得分
    """
    documents = [paper_chunk.reference for paper_chunk in all_paper_chunks]
    # 使用智谱重排序API计算标签与所有论文的相关性
    res = await zhipu_rerank(tag, documents)
    # 获取相关性最高的论文
    top_doc = res["results"][0]
    return top_doc["document"], top_doc["index"] + 1, top_doc["relevance_score"]


if __name__ == "__main__":
    import asyncio

    query = "损失函数"
    documents = [
        "1在深度学习中，损失函数扮演着至关重要的角色。它不仅用于评估模型的表现，还通过反向传播来指导神经网络权重的更新。选择合适的损失函数对模型训练的成功至关重要。",
        "3损失函数（loss function）是用来度量模型的预测值f(x)与真实值Y的不一致程度，它是一个非负实值函数，通常使用L(Y, f(x))来表示，损失函数越小，模型的鲁棒性就越好。损失函数是经验风险函数的核心部分，也是结构风险函数的重要组成部分。",
        "2损失函数是机器学习中的一个重要概念，用于衡量模型预测与真实值之间的差异。它帮助我们量化模型的性能，并指导模型的优化过程。常见的损失函数包括均方误差、交叉熵等。",
    ]
    api_key = "0448a91c9bf5f6c384a3a82648971fc5.iAdW6I21Xh09z4SR"
    top_n = 3
    res = asyncio.run(
        zhipu_rerank(
            query,
            documents,
            api_key,
            top_n=top_n,
        )
    )
    # 会进行排序
    for res in res["results"]:
        # 会按得分高低返回
        print(f"{res['relevance_score']}: {res['document']}")
