import asyncio
from typing import List

import aiohttp
from pydantic import BaseModel, Field


class PaperChunk(BaseModel):
    id: int = Field(..., description="论文片段的唯一标识符")
    paper_id: str = Field(..., description="论文的唯一标识符")
    paper_title: str = Field(..., description="论文的标题")
    chunk_id: int = Field(
        ...,
        description="该论文片段在论文中的位置id， 从0开始，可以在回溯的时候使用，比如某一段是根据某个论文的某个chunk_id生成的，那么就可以回溯到原始的论文",
    )
    chunk_text: str = Field(
        ...,
        description="论文片段的文本内容，可能是中文也可能是英文, 中文内容特别少，主要还是英文论文",
    )
    original_filename: str = Field(
        ...,
        description="会议名称,Conf_Paper_Meta_Data_NeurIPS_2022_Neural_Information_Processing_Systems_with_whole_text.db， 意思是NeurIPS的2022年的论文",
    )


class PaperChunkWithDistance(PaperChunk):
    distance: float = Field(..., description="论文片段的相似度分数")


class SimpleRelevantPaper(BaseModel):
    paper_id: str = Field(..., description="论文的唯一标识符")
    paper_title: str = Field(..., description="论文的标题")
    original_filename: str = Field(
        ...,
        description="会议名称,Conf_Paper_Meta_Data_NeurIPS_2022_Neural_Information_Processing_Systems_with_whole_text.db， 意思是NeurIPS的2022年的论文",
    )
    abstract_chunk: PaperChunk|PaperChunkWithDistance = Field(
        ...,
        description="论文的摘要片段，可能是中文也可能是英文, 中文内容特别少，主要还是英文论文",
    )
    relevant_chunks: List[PaperChunk|PaperChunkWithDistance] = Field(
        ...,
        description="论文的相关片段，可能是中文也可能是英文, 中文内容特别少，主要还是英文论文",
    )
    summary: str | None = Field(default=None, description="论文的摘要，基于相关段落和abstract生成的")

class FullPaper(BaseModel):
    id: int = Field(..., description="论文片段的唯一标识符")
    paper_id: str = Field(..., description="论文的唯一标识符")
    paper_title: str = Field(..., description="论文的标题")
    original_filename: str = Field(
        ...,
        description="会议名称,Conf_Paper_Meta_Data_NeurIPS_2022_Neural_Information_Processing_Systems_with_whole_text.db， 意思是NeurIPS的2022年的论文",
    )
    all_chunks: List[PaperChunk|PaperChunkWithDistance] = Field(
        ...,
        description="论文的所有片段，可能是中文也可能是英文, 中文内容特别少，主要还是英文论文",
    )


class KnowledgeBaseAPI:
    """知识库API封装类"""

    def __init__(self, base_url="http://180.184.65.98:38880/atomgit"):
        self.base_url = base_url.rstrip("/")
        self.session = None

    async def __aenter__(self):
        self.session = aiohttp.ClientSession()
        return self

    async def __aexit__(self, exc_type, exc_val, exc_tb):
        if self.session:
            await self.session.close()

    async def close(self):
        """关闭aiohttp session"""
        if self.session:
            await self.session.close()

    async def get_metadata(self) -> dict:
        """获取论文数据库的元数据信息"""
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/metadata"
        async with self.session.get(url) as response:
            return await response.json()

    def __flatten_dict_to_last_level(self, d):
        """
        将多层嵌套的字典展开为一个单层字典，只包含最底层的键值对。

        参数:
        d (dict): 要展开的嵌套字典。

        返回:
        dict: 展开后的字典，只包含最底层的键值对。
        """
        flat_dict = {}
        for key, value in d.items():
            if isinstance(value, dict):
                # 如果当前值是字典，递归调用函数
                deeper_dict = self.__flatten_dict_to_last_level(value)
                flat_dict.update(deeper_dict)  # 合并字典
            else:
                # 如果当前值不是字典，直接添加到结果字典中
                flat_dict[key] = value
        return flat_dict

    async def search_papers(
        self, query: str, top_k: int = 30
    ) -> list[PaperChunkWithDistance]:
        """根据文本查询搜索论文片段"""
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/search_papers"
        params = {"query": query, "top_k": top_k}
        async with self.session.get(url, params=params) as response:
            res_data = await response.json()

        papers_with_distance = []
        for paper in res_data:
            papers_with_distance.append(
                PaperChunkWithDistance(**self.__flatten_dict_to_last_level(paper))
            )
        return papers_with_distance

    async def query_by_paper_id(
        self, paper_id: str, top_k: int = 1000
    ) -> List[PaperChunk]:
        """根据论文ID查询论文片段"""
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/query_by_paper_id"
        params = {"paper_id": paper_id, "top_k": top_k}
        async with self.session.get(url, params=params) as response:
            res_data =  await response.json()
        
        papers_result = []
        for paper in res_data:
            papers_result.append(
                PaperChunk(**self.__flatten_dict_to_last_level(paper))
            )
        return papers_result
    

    async def query_by_title(self, title: str, top_k: int = 1000) -> List[PaperChunk]:
        """根据论文标题查询论文片段, 要论文标题完全匹配"""
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/query_by_title"
        params = {"title": title, "top_k": top_k}
        async with self.session.get(url, params=params) as response:
            res_data = await response.json()

        papers_result = []
        for paper in res_data:
            papers_result.append(
                PaperChunk(**self.__flatten_dict_to_last_level(paper))
            )
        return papers_result

    async def query_by_title_contain(
        self, title: str, top_k: int = 1000
    ) -> List[PaperChunk]:
        """搜索标题中包含特定文本的论文片段， 不会做任何的排序
        大小写敏感
        会进行分词，但是会分的比较细，比如我搜a，g，单个词，其实只会召回两三篇文章， 比如查询 efficient 会返回 Decision-Estimation Coefficient 的论文
        fficient 也会匹配到
        Args:
            title: 标题包含的文本
            top_k: 返回结果数量,默认1000

        Returns:
            包含匹配论文片段的列表
        """
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/query_by_title_contain"
        params = {"title": title, "top_k": top_k}
        async with self.session.get(url, params=params) as response:
            res_data = await response.json()

        papers = [PaperChunk(**paper) for paper in res_data]
        for paper in papers:
            print(paper.paper_title.count(title))
        return papers

    async def query_by_chunk_contain(
        self, chunk: str = "machine learning", top_k: int = 1000
    ) -> List[PaperChunk]:
        """搜索论文内容片段中包含特定文本的片段， 不会做任何的排序
        真的就是 key in chunk_text 类似于这种, 并且是完全无序的, 根据id排序， 不会根据比如正文中包含某个key的次数更多进行排序， 所以如果需要排序，需要自己再排序
        后面可以做rerank
        Args:
            chunk: 内容片段包含的文本
            top_k: 返回结果数量,默认100

        Returns:
            包含匹配论文片段的列表
        """
        if not self.session:
            self.session = aiohttp.ClientSession()

        url = f"{self.base_url}/query_by_chunk_contain"
        params = {"chunk": chunk, "top_k": top_k}
        async with self.session.get(url, params=params) as response:
            res_data = await response.json()

        papers = [PaperChunk(**paper) for paper in res_data]
        # current_id = 0
        # for paper in papers:
        #     if paper.id < current_id:
        #         print("不是按照id排序的")
        #     else:
        #         current_id = paper.id
        return papers
    
    async def query_simple_related_papers_by_keyword(
        self, keyword: str, top_k: int = 20
    ) -> List[SimpleRelevantPaper]:
        """根据关键词查询相关论文"""
        if not self.session:
            self.session = aiohttp.ClientSession()

        paper_chunks = await self.search_papers(keyword, top_k=top_k*10)

        paper_id_map = {}
        for paper_chunk in paper_chunks:
            if paper_chunk.paper_id not in paper_id_map:
                paper_id_map[paper_chunk.paper_id] = []
            paper_id_map[paper_chunk.paper_id].append(paper_chunk)

        if len(paper_id_map) > top_k:
            paper_id_map = dict(list(paper_id_map.items())[:top_k])
        relevant_papers = []
        relevant_papers_abstract = await asyncio.gather(
            *[self.query_by_paper_id(paper_id) for paper_id in paper_id_map.keys()]
        )
        i = 0
        for paper_id, paper_chunks in paper_id_map.items():
            abstract_chunk = relevant_papers_abstract[i][0]
            i += 1
            relevant_papers.append(
                SimpleRelevantPaper(
                    paper_id=abstract_chunk.paper_id,
                    paper_title=abstract_chunk.paper_title,
                    original_filename=abstract_chunk.original_filename,
                    abstract_chunk=abstract_chunk,
                    relevant_chunks=paper_chunks,
                )
            )

        return relevant_papers





if __name__ == "__main__":
    import asyncio

    async def main():
        query = "FiD-Light"
        async with KnowledgeBaseAPI() as kb_api:
            papers1 = await kb_api.query_simple_related_papers_by_keyword("Dataset Distillation", 4)

            papers2 = await kb_api.query_simple_related_papers_by_keyword("What is Dataset Distillation", 4)
            papers3 = await kb_api.query_simple_related_papers_by_keyword("Dataset Distillation Related works", 4)

        print(papers1)

    asyncio.run(main())
