"""

# 就是我输入一个主题名称，生成大纲


1，对我的topic进行重新， 比如重新出三个查询
2. 去把我查询到的论文，取出他们这些论文的 abs和titile
3. 分批次比如3片论文去生成一套 section
4. 最后输入给大模型让它融合成一套 section

----------生成 subsection -------------

1. 用每一个section的describ去知识库召回论文
2. 用这些论文的chunk去生成subsection
3. 最后输入给大模型让它融合成一套 section

"""

import pickle

from data_dyne.models.survey_model import (
    PaperChunk,
    Section,
    Subsection,
    Survey,
)
from data_dyne.prompt.outline_writer import (
    MERGING_OUTLINE_PROMPT,
    ROUGH_OUTLINE_PROMPT,
    ROUGH_OUTLINE_PROMPT_V0,
    SUBSECTION_OUTLINE_PROMPT,
    TRAN_TOPIC_PROMPT,
)
from data_dyne.rag.kb_service import KBService
from data_dyne.utils import clean_papers
from metagpt.actions import Action
from metagpt.logs import logger


class SectionWriter(Action):
    ROUGH_OUTLINE_PROMPT: str = ROUGH_OUTLINE_PROMPT
    ROUGH_OUTLINE_PROMPT_V0: str = ROUGH_OUTLINE_PROMPT_V0

    kb: KBService = KBService()

    async def _trans_topic(self, topic: str):
        """
        把topic转成一个英文问的survey title
        """
        prompt = TRAN_TOPIC_PROMPT.format(topic=topic)
        rsp = await self._aask(prompt)
        res = rsp.split("\n")
        for line in res:
            if line.startswith("Title:"):
                return line.split(":")[-1]
        return topic

    async def __clean_papers(self, papers: list[PaperChunk], topic: str):
        """压缩信息

        https://python.langchain.com/docs/how_to/contextual_compression/#adding-contextual-compression-with-an-llmchainextractor


        """
        papers = await clean_papers(self.llm, papers, topic)
        return papers

    async def _format_sections(self, sections: list[str]) -> tuple[list[Section], str]:
        """
        把大模型返回的sections格式化成Section对象
        """
        sections_obj = []
        i = 0
        title = ""
        while i < len(sections):
            if sections[i].startswith("Section"):
                section_name, section_desc = (
                    sections[i].split(":")[-1],
                    sections[i + 1].split(":")[-1],
                )
                section_obj = Section(title=section_name, description=section_desc)
                sections_obj.append(section_obj)
                i += 2
            elif sections[i].startswith("Title"):
                title = sections[i].split(":")[-1]
                i += 1
            else:
                i += 1
        return sections_obj, title

    async def _generate_section(self, topic, papers_chunks: list[PaperChunk]) -> Survey:
        """
        生成一个section
        """
        i = 0
        papers_str = ""
        for paper in papers_chunks:
            if paper.clean_text:
                papers_str += f"paper {i}: {paper.clean_text}\n" + "---------------\n"
            else:
                papers_str += f"paper {i}: {paper.text}\n" + "---------------\n"
            i += 1

        prompt = self.ROUGH_OUTLINE_PROMPT.format(topic=topic, papers=papers_str)
        logger.info(prompt)
        rsp = await self._aask(prompt)
        sections = rsp.split("\n")
        sections_obj, title = await self._format_sections(sections)
        return Survey(title=title, sections=sections_obj)

    async def _generate_section_v0(self, topic) -> Survey:
        """
        生成一个section
        """
        i = 0
        papers_str = ""
        prompt = self.ROUGH_OUTLINE_PROMPT_V0.format(topic=topic, papers=papers_str)
        logger.info(prompt)
        rsp = await self._aask(prompt)
        sections = rsp.split("\n")
        sections_obj, title = await self._format_sections(sections)
        return Survey(title=title, sections=sections_obj)

    async def _merge_surveys(self, topic: str, all_surveys: list[Survey]) -> Survey:
        all_surveys_str = "\n---------------\n".join(
            [survey.outline for survey in all_surveys]
        )
        prompt = MERGING_OUTLINE_PROMPT.format(topic=topic, outlines=all_surveys_str)
        rsp = await self._aask(prompt)
        sections = rsp.split("\n")
        sections_obj, title = await self._format_sections(sections)
        return Survey(title=title, sections=sections_obj)

    async def run(self, topic):
        # Get all paper chunks
        # topic = await self._trans_topic(topic)
        # 大模型原有知识的也生成一篇section
        survey_v0 = await self._generate_section_v0(topic)
        papers_chunks = await self.kb.get_papers(topic)
        # 根据主题压缩信息， 压缩信息
        papers_chunks = await self.__clean_papers(papers_chunks, topic)
        # Split papers into groups of 3
        paper_groups = [
            papers_chunks[i : i + 3] for i in range(0, len(papers_chunks), 3)
        ]

        # Generate sections for each group
        all_surveys: list[Survey] = [survey_v0]
        for group in paper_groups:
            survey = await self._generate_section(topic, group)
            all_surveys.append(survey)

        # Use LLM to merge sections and pick best title
        survey = await self._merge_surveys(topic, all_surveys)
        return survey


class SubsectionWriter(Action):
    SUBSECTION_OUTLINE_PROMPT: str = SUBSECTION_OUTLINE_PROMPT
    kb: KBService = KBService()

    async def _generate_subsection(
        self, survey_title: str, section: Section, outline: str
    ) -> list[Subsection]:
        """
        用每一个section的describ去知识库召回论文
        用这些论文的chunk去生成subsection
        最后输入给大模型让它融合成一套 section
        """

        papers_chunks = await self.kb.get_papers(section.description, include_abstract_papers=False)
        papers_chunks = await clean_papers(self.llm, papers_chunks, section.description)
        papers_str = "\n---------------\n".join([paper.text for paper in papers_chunks])

        prompt = self.SUBSECTION_OUTLINE_PROMPT.format(
            topic=survey_title,
            outline=outline,
            section_name=section.title,
            section_description=section.description,
            papers=papers_str,
        )
        logger.info(prompt)
        rsp = await self._aask(prompt)
        subsections = rsp.split("\n")
        subsections_obj = []
        i = 0
        while i < len(subsections):
            if subsections[i].startswith("Subsection"):
                subsection_name, subsection_desc = (
                    subsections[i].split(":")[-1],
                    subsections[i + 1].split(":")[-1],
                )
                subsection_obj = Subsection(
                    title=subsection_name, description=subsection_desc
                )
                subsections_obj.append(subsection_obj)
                i += 2
            else:
                i += 1
        return subsections_obj

    async def run(self, survey: Survey):
        for section in survey.sections:
            if "introduction" in section.title.lower():  # 跳过introduction
                continue
            subsections = await self._generate_subsection(
                survey.title, section, survey.outline
            )
            section.subsections = subsections
        return survey


if __name__ == "__main__":
    import asyncio
    import pickle

    # survey = asyncio.run(SectionWriter().run("损失函数"))
    # with open("data/cache/sections.pkl", "wb") as file:
        # pickle.dump(survey, file)
    with open("data/cache/sections.pkl", "rb") as file:
        survey = pickle.load(file)
    survey: Survey = asyncio.run(SubsectionWriter().run(survey))
    with open("data/cache/survey_with_subsections.pkl", "wb") as file:
        pickle.dump(survey, file)
    with open("tmp.md", "w") as file:
        file.write(survey.outline)
    # print(survey.outline)

