"""

# 就是我输入一个主题名称，生成大纲


1，对我的topic进行重新， 比如重新出三个查询
2. 去把我查询到的论文，取出他们这些论文的 abs和titile
3. 分批次比如3片论文去生成一套 section
4. 最后输入给大模型让它融合成一套 section

----------生成 subsection -------------

1. 用每一个section的describ去知识库召回论文
2. 用这些论文的chunk去生成subsection
3. 最后输入给大模型让它融合成一套 section

"""

from data_dyne.models.survey_model import (
    PaperChunk,
    Section,
    Subsection,
    Survey,
)
from data_dyne.prompt.content_writer import (
    SECTION_WRITING_PROMPT,
    SUBSECTION_WRITING_PROMPT,
)
from data_dyne.rag.kb_service import KBService
from data_dyne.utils import clean_papers
from metagpt.actions import Action


class ContentWriter(Action):
    SECTION_WRITING_PROMPT: str = SECTION_WRITING_PROMPT
    SUBSECTION_WRITING_PROMPT: str = SUBSECTION_WRITING_PROMPT

    kb: KBService = KBService()

    async def __clean_papers(self, papers: list[PaperChunk], topic: str):
        """压缩信息

        https://python.langchain.com/docs/how_to/contextual_compression/#adding-contextual-compression-with-an-llmchainextractor


        """
        papers = await clean_papers(self.llm, papers, topic)
        return papers

    async def _generate_section_content(
        self, papers_chunks: list[PaperChunk], section: Section, survey: Survey
    ):
        prompt = self.SECTION_WRITING_PROMPT.format(
            topic=survey.title,
            overall_outline=survey.outline,
            paper_list="\n---------------\n".join(
                [paper.text for paper in papers_chunks]
            ),
            section_name=section.title,
            description=section.description,
            word_num=200,
        )
        rsp = await self._aask(prompt)
        rsp = rsp.split("\n")
        rsp = "\n".join(rsp[1:-1])
        return rsp

    async def _generate_subsection_content(
        self,
        papers_chunks: list[PaperChunk],
        section: Section,
        subsection: Subsection,
        survey: Survey,
    ):
        prompt = self.SUBSECTION_WRITING_PROMPT.format(
            topic=survey.title,
            overall_outline=survey.outline,
            paper_list="\n---------------\n".join(
                [paper.text for paper in papers_chunks]
            ),
            section_name=section.title,
            subsection_name=subsection.title,
            description=subsection.description,
            word_num=200,
        )

        rsp = await self._aask(prompt)
        rsp = rsp.split("\n")
        rsp = "\n".join(rsp[1:-1])
        return rsp

    async def run(self, survey: Survey):
        # 保存所有章节的所需的论文
        all_paper_chunks: set[PaperChunk] = set()
        for section in survey.sections[1:]:
            for subsection in section.subsections:
                # 召回
                papers_chunks: list[PaperChunk] = await self.kb.get_papers(
                    subsection.description, include_normal_papers=False
                )
                # 信息压缩
                papers_chunks = await self.__clean_papers(
                    papers_chunks, subsection.description
                )
                subsection.paper_chunks = papers_chunks
                # 生成
                subsection.content = await self._generate_subsection_content(
                    papers_chunks, section, subsection, survey
                )
                # 保存到全部paper池中，方便转换成引用
                all_paper_chunks.update(papers_chunks)
            # 召回
            papers_chunks = await self.kb.get_papers(section.description)
            # 信息压缩
            papers_chunks = await self.__clean_papers(
                papers_chunks[:1], section.description
            )
            section.paper_chunks = papers_chunks
            all_paper_chunks.update(papers_chunks)
            # 生成
            section.content = await self._generate_section_content(
                papers_chunks, section, survey
            )
        return survey


if __name__ == "__main__":
    import asyncio
    import pickle

    with open("data/cache/survey_with_subsections.pkl", "rb") as file:
        survey = pickle.load(file)
    survey = asyncio.run(ContentWriter().run(survey))
    with open("data/cache/survey_with_subsections_content.pkl", "wb") as file:
        pickle.dump(survey, file)

    # with open("data/cache/survey_with_subsections_content.pkl", "rb") as file:
    # survey = pickle.load(file)
    # md_path = "data/cache/survey_with_subsections_content.md"
    # with open(md_path, "w") as file:
    # file.write(survey.text)
    pass
