import asyncio
import functools
import time
from typing import List

import aiohttp

from data_dyne.log import logger
from data_dyne.models.survey_model import Paper, PaperChunk
from data_dyne.utils import zhipu_rerank


class TokenBucket:
    """令牌桶速率限制器"""

    def __init__(self, rate: float, capacity: int = 1):
        """
        初始化令牌桶

        Args:
            rate: 每秒添加的令牌数
            capacity: 桶的容量
        """
        self.rate = rate
        self.capacity = capacity
        self.tokens = capacity
        self.last_time = time.time()
        self.lock = asyncio.Lock()

    async def acquire(self):
        """获取一个令牌，如果没有可用令牌则等待"""
        async with self.lock:
            current = time.time()
            time_passed = current - self.last_time
            self.last_time = current

            # 添加新令牌
            self.tokens = min(self.capacity, self.tokens + time_passed * self.rate)

            # 如果没有足够的令牌，计算需要等待的时间
            if self.tokens < 1:
                wait_time = (1 - self.tokens) / self.rate
                await asyncio.sleep(wait_time)
                self.tokens = 0
            else:
                self.tokens -= 1


def rate_limit(func):
    """速率限制装饰器，使用令牌桶算法"""

    @functools.wraps(func)
    async def wrapper(self, *args, **kwargs):
        if hasattr(self, "rate_limiter"):
            await self.rate_limiter.acquire()
        return await func(self, *args, **kwargs)

    return wrapper


class KBAPI:
    """知识库API封装类"""

    def __init__(
        self,
        base_url="http://180.184.65.98:38880/atomgit",
        semaphore: asyncio.Semaphore = asyncio.Semaphore(40),
        rate_limit_per_second: float = 5.0,
    ):
        self.base_url = base_url.rstrip("/")
        self.session = None
        self.semaphore = semaphore
        # 添加速率限制器，默认每秒5个请求
        self.rate_limiter = TokenBucket(rate=rate_limit_per_second, capacity=10)

    async def __aenter__(self):
        self.session = aiohttp.ClientSession()
        return self

    async def __aexit__(self, exc_type, exc_val, exc_tb):
        if self.session:
            await self.session.close()

    async def close(self):
        """关闭aiohttp session"""
        if self.session:
            await self.session.close()

    async def _session_get(self, url, params={}):
        if not self.session:
            self.session = aiohttp.ClientSession()

        # 创建适当的timeout对象
        timeout = aiohttp.ClientTimeout(total=300)

        if self.semaphore:
            async with self.semaphore:
                async with self.session.get(
                    url, params=params, timeout=timeout
                ) as response:
                    return await response.json()
        else:
            async with self.session.get(
                url, params=params, timeout=timeout
            ) as response:
                return await response.json()

    @rate_limit
    async def get_metadata(self) -> dict:
        """获取论文数据库的元数据信息"""
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/metadata"
        return await self._session_get(url)

    def __flatten_dict_to_last_level(self, d):
        """
        将多层嵌套的字典展开为一个单层字典，只包含最底层的键值对。

        参数:
        d (dict): 要展开的嵌套字典。

        返回:
        dict: 展开后的字典，只包含最底层的键值对。
        """
        flat_dict = {}
        for key, value in d.items():
            if isinstance(value, dict):
                # 如果当前值是字典，递归调用函数
                deeper_dict = self.__flatten_dict_to_last_level(value)
                flat_dict.update(deeper_dict)  # 合并字典
            else:
                # 如果当前值不是字典，直接添加到结果字典中
                flat_dict[key] = value
        return flat_dict

    async def search_paper_ids(
        self, query: str, top_k: int = 30, min_distance=0
    ) -> list[str]:
        """根据文本查询搜索论文片段
        返回的内容会按照分数进行排序

        """
        papers = await self.search_papers(query, top_k, min_distance)
        return [paper.paper_id for paper in papers]

    @rate_limit
    async def search_papers(
        self, query: str, top_k: int = 30, min_distance=0
    ) -> list[PaperChunk]:
        """根据文本查询搜索论文片段
        返回的内容会按照分数进行排序

        """
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/search_papers"
        params = {"query": query, "top_k": top_k}

        res_data = await self._session_get(url, params)

        papers_with_distance = []
        for paper in res_data:
            paper_chunk = PaperChunk(**self.__flatten_dict_to_last_level(paper))
            if paper_chunk.distance >= min_distance:
                papers_with_distance.append(paper_chunk)
            else:
                break
        return papers_with_distance

    @rate_limit
    async def search_papers_abs_with_score(
        self,
        query: str,
        top_k: int = 10,
        min_score: float = 15.0,
        max_concurrent: int = 5,
    ) -> list[PaperChunk]:
        """根据文本查询搜索论文片段，返回按相关性分数排序的结果。

        该方法执行以下步骤:
        1. 使用query搜索相关论文ID
        2. 获取每篇论文的第一个chunk (通常是摘要)
        3. 对这些chunk进行rerank，计算与query的相关性分数
        4. 根据分数排序并过滤结果

        Args:
            query (str): 搜索查询文本
            top_k (int, optional): 返回结果的最大数量. Defaults to 10.
            min_score (float, optional): 最小相关性分数阈值. Defaults to 15.0.
            max_concurrent (int, optional): 最大并发请求数. Defaults to 5.

        Returns:
            list[PaperChunk]: 返回按相关性分数排序的论文chunk列表
        """
        # 获取论文ID
        paper_ids = await self.search_paper_ids(query)

        if not paper_ids:
            return []

        # 限制并发请求数量
        semaphore = asyncio.Semaphore(max_concurrent)

        async def get_paper_with_semaphore(paper_id):
            async with semaphore:
                return await self.get_paper_by_paper_id(paper_id)

        # 并行获取论文内容，但限制并发数
        papers = await asyncio.gather(
            *[get_paper_with_semaphore(paper_id) for paper_id in paper_ids]
        )

        # 提取每篇论文的第一个chunk并转换为PaperChunk
        papers_abs = []
        for paper in papers:
            if paper and paper.chunks:
                chunk = paper.chunks[0]
                papers_abs.append(chunk)

        # 计算相关性分数并排序
        if papers_abs:
            await self.add_score_to_papers(papers_abs, query)
            papers_abs.sort(key=lambda x: x.score, reverse=True)
            # 根据top_k和min_score过滤结果
            papers_abs = [p for p in papers_abs[:top_k] if p.score >= min_score]

        return papers_abs

    @rate_limit
    async def search_papers_with_score(
        self, query: str, top_k: int = 10, min_score: float = 15.0
    ) -> list[PaperChunk]:
        """根据文本查询搜索论文片段，返回按相关性分数排序的结果。

        该方法执行以下步骤:
        1. 使用query搜索相关论文片段
        2. 对这些片段进行rerank，计算与query的相关性分数
        3. 根据分数排序并过滤结果

        Args:
            query (str): 搜索查询文本
            top_k (int, optional): 返回结果的最大数量. Defaults to 10.
            min_score (float, optional): 最小相关性分数阈值. Defaults to 15.0.

        Returns:
            list[PaperChunk]: 返回按相关性分数排序的论文chunk列表
        """
        try:
            # 获取论文片段
            paper_chunks = await self.search_papers(query)

            if not paper_chunks:
                return []

            # 计算相关性分数并排序
            await self.add_score_to_papers(paper_chunks, query)
            paper_chunks.sort(key=lambda x: x.score, reverse=True)

            # 根据top_k和min_score过滤结果
            paper_chunks = [p for p in paper_chunks[:top_k] if p.score >= min_score]

            return paper_chunks

        except Exception as e:
            import traceback

            logger.error(f"Error in search_papers_with_score: {str(e)}")
            logger.error("详细堆栈信息:")
            logger.error(traceback.format_exc())
            # 发生错误时返回空列表
            return []

    @rate_limit
    async def add_score_to_papers(
        self, papers: list[PaperChunk], query: str, topk=10
    ) -> list[PaperChunk]:
        """对论文片段进行rerank，使用智谱API，使用自适应批处理大小"""
        if not papers:
            return []

        # 根据papers的数量动态调整批处理大小
        total_papers = len(papers)
        if total_papers <= 3:
            batch_size = total_papers
        elif total_papers <= 10:
            batch_size = 3
        else:
            batch_size = 5  # 对于大量数据，使用更大的批次以减少API调用次数

        sorted_papers = []
        prepared_batch = []

        # 分批处理papers
        for i in range(0, len(papers), batch_size):
            batch = papers[i : i + batch_size]
            prepared_batch.append(batch)

        # 使用并发处理每个批次，但限制最大并发数为3
        semaphore = asyncio.Semaphore(3)

        async def process_batch_with_semaphore(batch):
            async with semaphore:
                return await self._rerank_step(query, batch)

        # 并发处理每个批次
        rerank_results = await asyncio.gather(
            *[process_batch_with_semaphore(batch) for batch in prepared_batch]
        )

        for result in rerank_results:
            sorted_papers.extend(result)

        sorted_papers = sorted(sorted_papers, key=lambda x: x.score, reverse=True)
        return sorted_papers

    async def _rerank_step(self, query: str, batch: list[PaperChunk], max_retries=3):
        """调用智谱API进行rerank，带有重试机制"""
        documents = [paper.text[:4095] for paper in batch]
        result_papers = []

        # 指数退避重试
        retry_count = 0
        base_delay = 1.0  # 基础延迟时间（秒）

        while retry_count < max_retries:
            try:
                # 对当前批次调用API
                results = await zhipu_rerank(query, documents)

                # 处理结果
                paper_map = {paper.text[:4095]: paper for paper in batch}
                for result in results["results"]:
                    paper = paper_map[result["document"]]
                    paper.score = result["relevance_score"]
                    result_papers.append(paper)

                # 成功处理，返回结果
                return result_papers

            except Exception as e:
                retry_count += 1
                # 计算指数退避延迟时间
                delay = base_delay * (2 ** (retry_count - 1))
                print(
                    f"Error processing batch (attempt {retry_count}/{max_retries}): {e}"
                )
                print(f"Retrying in {delay} seconds...")

                # 如果已达到最大重试次数，则放弃
                if retry_count >= max_retries:
                    print("Max retries reached. Giving up on this batch.")
                    # 返回原始batch，但不设置score
                    for paper in batch:
                        paper.score = 0  # 设置默认分数为0
                    return batch

                # 等待后重试
                await asyncio.sleep(delay)

        # 这里不应该到达，但为了安全起见
        return result_papers

    @rate_limit
    async def query_chunk_by_paper_id(
        self, paper_id: str, top_k: int = 30
    ) -> List[PaperChunk]:
        """根据论文ID查询论文片段"""
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/query_by_paper_id"
        params = {"paper_id": paper_id, "top_k": top_k}

        res_data = await self._session_get(url, params)

        # 将JSON数据转换为PaperChunk对象列表
        return [PaperChunk(**chunk) for chunk in res_data]

    async def query_chunk_by_title(
        self, title: str, top_k: int = 30
    ) -> List[PaperChunk]:
        """根据论文标题查询论文片段, 要论文标题完全匹配"""
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/query_by_title"
        params = {"title": title, "top_k": top_k}
        # async with self.session.get(url, params=params) as response:
        #     res_data = await response.json()

        res_data = await self._session_get(url, params)

        return [PaperChunk(**chunk) for chunk in res_data]

    async def query_chunk_by_title_contain(
        self, title: str, top_k: int = 30
    ) -> List[PaperChunk]:
        """搜索标题中包含特定文本的论文片段， 不会做任何的排序
        大小写敏感
        会进行分词，但是会分的比较细，比如我搜a，g，单个词，其实只会召回两三篇文章， 比如查询 efficient 会返回 Decision-Estimation Coefficient 的论文
        fficient 也会匹配到
        Args:
            title: 标题包含的文本
            top_k: 返回结果数量,默认990

        Returns:
            包含匹配论文片段的列表
        """
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/query_by_title_contain"
        params = {"title": title, "top_k": top_k}
        # async with self.session.get(url, params=params) as response:
        #     res_data = await response.json()

        res_data = await self._session_get(url, params)

        papers = [PaperChunk(**paper) for paper in res_data]
        for paper in papers:
            print(paper.paper_title.count(title))
        return papers

    async def query_chunk_by_chunk_contain(
        self, chunk: str = "machine learning", top_k: int = 30
    ) -> List[PaperChunk]:
        """搜索论文内容片段中包含特定文本的片段， 不会做任何的排序
        真的就是 key in chunk_text 类似于这种, 并且是完全无序的, 根据id排序， 不会根据比如正文中包含某个key的次数更多进行排序， 所以如果需要排序，需要自己再排序
        后面可以做rerank
        Args:
            chunk: 内容片段包含的文本
            top_k: 返回结果数量,默认99

        Returns:
            包含匹配论文片段的列表
        """
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/query_by_chunk_contain"
        params = {"chunk": chunk, "top_k": top_k}
        # async with self.session.get(url, params=params) as response:
        #     res_data = await response.json()

        res_data = await self._session_get(url, params)

        papers = [PaperChunk(**paper) for paper in res_data]

        return papers

    async def get_paper_by_paper_id(self, paper_id: str) -> Paper:
        """根据论文ID查询论文"""
        paper_chunks = await self.query_chunk_by_paper_id(paper_id)
        paper = Paper(
            chunks=paper_chunks,
        )
        return paper

    # @# @rate_limit
    async def query_by_title_like(
        self, title: str, top_k: int = 30
    ) -> List[PaperChunk]:
        """查找标题相似的论文

        Args:
            title (str): 要查找的相似标题
            top_k (int, optional): 返回结果的最大数量. Defaults to 30.

        Returns:
            List[PaperChunk]: 相似论文信息列表
        """
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/query_by_title_like"
        params = {"title": title, "top_k": top_k}

        res_data = await self._session_get(url, params)
        return [PaperChunk(**paper) for paper in res_data]

    # @rate_limit
    async def query_by_keyword(self, keyword: str) -> List[dict]:
        """根据关键词查询论文ID和标题

        Args:
            keyword (str): 要查询的关键词

        Returns:
            List[dict]: 包含论文ID和标题的字典列表
        """
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/query_by_keyword"
        params = {"keyword": keyword}

        return await self._session_get(url, params)

    # @# @rate_limit
    async def query_whole_text_by_id(self, paper_id: str) -> str:
        """根据论文ID查询论文完整文本

        Args:
            paper_id (str): 论文ID

        Returns:
            str: 论文完整文本，如果未找到则返回None
        """
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/query_whole_text_by_id"
        params = {"paper_id": paper_id}

        return await self._session_get(url, params)

    # @# @rate_limit
    async def query_whole_text_by_title(self, title: str) -> str:
        """根据论文标题查询论文完整文本

        Args:
            title (str): 论文标题

        Returns:
            str: 论文完整文本，如果未找到则返回None
        """
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/query_whole_text_by_title"
        params = {"title": title}

        return await self._session_get(url, params)

    # @# @rate_limit
    async def query_keywords_by_id(self, paper_id: str) -> List[str]:
        """根据论文ID查询关键词

        Args:
            paper_id (str): 论文ID

        Returns:
            List[str]: 关键词列表，如果未找到则返回None
        """
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/query_keywords_by_id"
        params = {"paper_id": paper_id}

        return await self._session_get(url, params)

    # @# @rate_limit
    async def query_keywords_by_title(self, title: str) -> List[str]:
        """根据论文标题查询关键词

        Args:
            title (str): 论文标题

        Returns:
            List[str]: 关键词列表，如果未找到则返回None
        """
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/query_keywords_by_title"
        params = {"title": title}

        return await self._session_get(url, params)

    # @# @rate_limit
    async def keywords_metadata(self) -> dict:
        """查询关键词统计信息

        Returns:
            dict: 包含关键词及其计数的字典
        """
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/keywords_metadata"

        return await self._session_get(url)


if __name__ == "__main__":
    import asyncio

    # 就是我要去查询，然后拿到相关论文的paper_id，最终的paper_id的所在的论文的第一个chunk做一个rerank， 然后才返回相关内容
    async def main():
        async with KBAPI() as kb_api:
            query = "损失函数"
            # papers = await kb_api.search_papers_abs_with_score(query)
            papers = await kb_api.search_papers_with_score(query)
            print(papers)
            # ids = set()
            # for paper in papers:
            # ids.add(paper.paper_id)
            # print(ids)
            # for id in ids:
            #     paper = await kb_api.get_paper_by_paper_id(id)
            #     paper.save_to_local()
            # print(papers)
            # paper_ids = []
            # papers = await kb_api.query_chunk_by_title_contain("FiD-Light")
            # paper = await kb_api.get_paper_by_paper_id("62c2a9595aee126c0fcf0c76")
            # paper.save_to_local()

    asyncio.run(main())
