# 大纲生成

# from asyncio import Semaphore
import asyncio
import os
import time
from functools import wraps
from typing import Dict, List

from langchain_core.prompts import ChatPromptTemplate
from pydantic import BaseModel, Field

from data_dyne.llm import llm
from data_dyne.llm.zhipu import semaphore_plus, semaphore_rag
from data_dyne.log import logger
from data_dyne.models.survey_model import PaperChunk, Section, Subsection, Survey
from data_dyne.prompt.outline_prompt import (
    MERGING_OUTLINE_PROMPT,
    ROUGH_OUTLINE_PROMPT,
    ROUGH_OUTLINE_PROMPT_V0,
    SUBSECTION_WRITING_PROMPT,
    TRAN_TOPIC_PROMPT,
)
from data_dyne.prompt.refine_prompt import LCE_PROMPT
from data_dyne.rag.kb_service import KBService
from data_dyne.utils import clean_chunks, extract_sup_tags, fix_tag

# kb_service = KBService(semaphore=semaphore_rag)


def limmiter(semaphore):
    """限制并发数的装饰器"""

    def decorator(func):
        @wraps(func)
        async def wrapper(*args, **kwargs):
            async with semaphore:
                return await func(*args, **kwargs)

        return wrapper

    return decorator


def timer(func):
    """用于统计函数执行时间的装饰器"""

    @wraps(func)
    async def wrapper(*args, **kwargs):
        start_time = time.time()
        result = await func(*args, **kwargs)
        end_time = time.time()
        elapsed_time = end_time - start_time

        # 获取类名,如果是类方法的话
        if args and hasattr(args[0], "__class__"):
            class_name = args[0].__class__.__name__
            logger.info(
                f"{class_name}.{func.__name__} took {elapsed_time:.2f} seconds to execute"
            )
        else:
            logger.info(f"{func.__name__} took {elapsed_time:.2f} seconds to execute")

        return result

    return wrapper


async def gen_title(topic: str):
    """
    把topic转成规范的综述标题
    """

    class Title(BaseModel):
        title: str = Field(
            ...,
            description="a survey title",
        )

    structured_llm = llm.with_structured_output(Title)
    title_prompt = ChatPromptTemplate.from_messages(
        [
            ("system", TRAN_TOPIC_PROMPT),
            ("human", "{topic}"),
        ]
    )
    gen_title = title_prompt | structured_llm
    title = await gen_title.ainvoke({"topic": topic})
    return title.title


class GenOutline:
    def __init__(self, kb_service: KBService = KBService()):
        self.kb_service = kb_service

    async def _format_survey(self, survey_str: str) -> Survey:
        """
        格式化section
        """
        survey = Survey()
        sections = survey_str.split("\n")
        sections_obj = []
        i = 0
        title = ""
        n = len(sections)
        while i + 1 < n:
            if sections[i].startswith("Section"):
                section_name, section_desc = (
                    sections[i].split(":")[-1],
                    sections[i + 1].split(":")[-1],
                )
                section_obj = Section(title=section_name, description=section_desc)
                subsections = []
                i += 2

                # 处理子节点
                while i + 1 < n and not sections[i].startswith("Section"):
                    if not sections[i].startswith("Subsection"):
                        i += 1
                        continue
                    subsection_name, subsection_desc = (
                        sections[i].split(":")[-1],
                        sections[i + 1].split(":")[-1],
                    )
                    subsection_obj = Subsection(
                        title=subsection_name, description=subsection_desc
                    )
                    subsections.append(subsection_obj)
                    i += 2
                section_obj.subsections = subsections
                sections_obj.append(section_obj)

            elif sections[i].startswith("Title"):
                title = sections[i].split(":")[-1]
                i += 1
            else:
                i += 1
        survey.sections = sections_obj
        survey.title = title
        return survey

    @timer
    @limmiter(semaphore_plus)
    async def generate_outline_v0(self, topic: str) -> Survey:
        """
        生成一级标题
        """
        # 先生成第一个版本
        prompt = ChatPromptTemplate.from_messages(
            [
                ("system", ROUGH_OUTLINE_PROMPT_V0),
                ("human", "{topic}"),
            ]
        )
        chain = prompt | llm
        res = await chain.ainvoke(
            {
                "topic": topic,
                "sections_num_range": "6-10",
                "subsections_num_range": "2-6",
            }
        )
        return await self._format_survey(res.content)

    @timer
    @limmiter(semaphore_plus)
    async def generate_outline(
        self, topic: str, paper_chunks: list[PaperChunk]
    ) -> Survey:
        """
        生成一级标题
        """
        # 转成字符串
        i = 0
        papers_str = ""
        for paper in paper_chunks:
            if paper.clean_text:
                papers_str += f"""paper title : {paper.paper_title} 
                paper_content: {paper.clean_text}\n
                ---------------------\n
                """
            else:
                papers_str += f"""paper title : {paper.paper_title} 
                paper_content: {paper.text}\n
                ---------------------\n
                """
            i += 1

        # 先生成第一个版本
        prompt = ChatPromptTemplate.from_messages(
            [
                ("system", ROUGH_OUTLINE_PROMPT),
                ("human", "{topic}"),
            ]
        )
        chain = prompt | llm
        res = await chain.ainvoke(
            {
                "topic": topic,
                "papers": papers_str,
                "sections_num_range": "6-10",
                "subsections_num_range": "2-6",
            }
        )
        return await self._format_survey(res.content)

    @timer
    @limmiter(semaphore_plus)
    async def _merge_surveys(self, topic: str, all_surveys: list[Survey]) -> Survey:
        all_surveys_str = "\n---------------\n".join(
            [survey.outline for survey in all_surveys]
        )
        prompt = MERGING_OUTLINE_PROMPT.format(
            topic=topic,
            outlines=all_surveys_str,
            sections_num_range="6-10",
            subsections_num_range="2-6",
        )
        rsp = await llm.ainvoke(prompt)
        return await self._format_survey(rsp.content)

    @timer
    async def _get_paper_and_clean(self, topic: str):
        # 查询数据库 get paper和clean是不是可以合并成一个任务，并发
        papers_chunks = await self.kb_service.get_papers(topic, include_abstract_papers=False, include_normal_papers=False, top_k=20) # 应该是top20会好一点
        papers_chunks = await clean_chunks(papers_chunks, topic)
        logger.info(f"Found {len(papers_chunks)} unique paper chunks")
        return papers_chunks

    @timer
    async def run(self, topic: str, group_size=4) -> Survey:
        papers_chunks: List[PaperChunk] = await self._get_paper_and_clean(topic)
        logger.info(f"Found {len(papers_chunks)} unique paper chunks")

        ### 分组
        papers_chunks_groups = [
            papers_chunks[i : i + group_size]
            for i in range(0, len(papers_chunks), group_size)
        ]
        ### 并发
        tasks = [self.generate_outline_v0(topic)]
        tasks.extend(
            [self.generate_outline(topic, group) for group in papers_chunks_groups]
        )
        all_surveys = await asyncio.gather(*tasks)
        # 合并survey
        survey = await self._merge_surveys(topic, all_surveys)
        return survey


class GenContent:
    """生成内容"""

    def __init__(self, kb_service: KBService = KBService()):
        self.kb_service = kb_service

    @timer
    # @limmiter(semaphore_plus)
    async def _generate_subsection_content(
        self,
        survey: Survey,
        section_num: int,
        subsection_num: int,
    ):
        # 先生成子章节
        section: Section = survey.sections[section_num]
        subsection: Subsection = section.subsections[subsection_num]

        logger.info(
            f"Processing section: {section.title}, Generating content for subsection: {subsection.title}"
        )
        """生成子章节内容"""
        # 准备生成内容的输入
        ## 查询知识库
        subsection_des = subsection.description
        papers_chunks: List[PaperChunk] = await self.kb_service.get_papers(
            subsection_des, merge_papers=False, include_abstract_papers=False, top_k=5
        )  # 默认查询10篇
        section_name = section.title
        subsection_name = subsection.title
        papers_chunks: List[PaperChunk] = await clean_chunks(
            papers_chunks, subsection_des
        )

        outline_str = survey.outline

        # 根据是否有 paper_chunks 来构建不同的 prompt 部分
        if papers_chunks:
            logger.info(
                f"Found {len(papers_chunks)} unique paper chunks in subsection: {subsection_name}"
            )
            paper_chunks_str = ""
            for paper_chunk in papers_chunks:
                paper_chunk_str = """
                ---------------------------------
                paper_info: {paper_chunk.reference}
                paper_content: {paper_chunk.clean_text}
                ---------------------------------
                """
                paper_chunks_str += paper_chunk_str.format(paper_chunk=paper_chunk)

            paper_chunks_section = (
                f"For reference, here are relevant paper excerpts related to your topic:\n\
<paper_chunks>\n{paper_chunks_str}\n</paper_chunks>"
            )

            citation_requirements = """Properly cite references using the following format:
               - Use parentheses for citations<sup>paper_info</sup>
               - Multiple papers can be cited together: <sup>paper_info1</sup>, <sup>paper_info2</sup>,... <sup>paper_infok</sup>
               - Only cite papers from the provided paper chunks
               - Citations must support your claims directly"""
        else:
            logger.warning(
                f"No specific paper references are available for this subsection {subsection_name}."
            )
            paper_chunks_str = ""
            paper_chunks_section = (
                "No specific paper references are available for this subsection."
            )
            citation_requirements = "Write based on general knowledge of the field without specific citations."

        # 生成
        prompt = SUBSECTION_WRITING_PROMPT.format(
            survey_title=survey.title,
            outline=outline_str,
            paper_chunks_section=paper_chunks_section,
            section_name=section_name,
            subsection_name=subsection_name,
            subsection_description=subsection_des,
            min_words=200,
            citation_requirements=citation_requirements,
        )
        # logger.info(f"{subsection_name} prompt: {prompt}")
        # rsp = await llm.ainvoke(prompt)
        rsp = await self._ainvoke(prompt)
        content: str = rsp.content  # type: ignore
        # logger.info(f"{subsection_name} raw response content : {content}")

        # 只有在有 paper_chunks 时才处理引用
        if papers_chunks:
            # 抽取标签
            all_tags = await extract_sup_tags(content)
            # 标签修复
            order_chunks: List[PaperChunk] = []
            seen = set()
            for tag in all_tags:
                if tag in seen:
                    continue
                paper_info, chunk_index, score = await fix_tag(tag, papers_chunks)
                # 替换标签
                logger.info(f"Fix tag {tag} to {paper_info}, in {subsection.title}")
                content = content.replace(tag, paper_info)
                order_chunks.append(papers_chunks[chunk_index - 1])
                seen.add(tag)
            # logger.info(f"Order chunks: {order_chunks}")
            # servey对象信息更新
            subsection.paper_chunks = order_chunks
        else:
            # 如果没有 paper_chunks，就设置为空列表
            subsection.paper_chunks = []

        if content.startswith("#"):
            subsection.content = "\n".join(content.split("\n")[1:])
        else:
            subsection.content = content

    @limmiter(semaphore_plus)
    async def _ainvoke(self, prompt):
        rsp = await llm.ainvoke(prompt)
        return rsp

    @timer
    async def post_inference(self, survey: Survey) -> Survey:
        """
        后处理，主要是处理reference
        """
        logger.info("Starting post-inference processing")
        # 去重，保持顺序
        seen = set()
        all_paper_chunks = []
        all_paper_chunks_index_map = {}  # 索引映射
        for section in survey.sections:
            for subsection in section.subsections:
                for chunk in subsection.paper_chunks:
                    if chunk.reference not in seen:
                        seen.add(chunk.reference)
                        all_paper_chunks.append(chunk)
                        all_paper_chunks_index_map[chunk.reference] = len(
                            all_paper_chunks
                        )
        logger.info(f"Found {len(all_paper_chunks)} unique paper chunks")

        # 索引处理
        tasks = []
        for section in survey.sections:
            for subsection in section.subsections:
                # 我需要把content里面的<sup>paper_info</sup>, all_paper_chunks 里面paperinfo最近的，要用rerank模型
                tasks.append(
                    self._subsection_post(
                        subsection, all_paper_chunks, all_paper_chunks_index_map
                    )
                )

        await asyncio.gather(*tasks)

        survey.chunks = all_paper_chunks
        logger.info("Post-inference processing completed")
        return survey

    async def _subsection_post(
        self,
        subsection: Subsection,
        all_paper_chunks,
        all_paper_chunks_index_map: Dict[str, int],
    ):
        """
        后处理，主要是处理reference
        """
        # 我需要把content里面的<sup>paper_info</sup>, all_paper_chunks 里面paperinfo最近的，要用rerank模型
        all_tags = await extract_sup_tags(subsection.content)
        logger.info(
            f"Processing subsection '{subsection.title}' with {len(all_tags)} tags"
        )

        # 把paper_info最近的paper_chunk替换成最新的paper_chunk
        for tag in set(all_tags):
            # 找到tag在all_paper_chunks中的索引
            chunk_index = all_paper_chunks_index_map.get(tag)
            if chunk_index is None:
                paper_info, chunk_index, score = await fix_tag(tag, all_paper_chunks)
                logger.error(f"Chunk index not found for tag: {tag}")
            logger.info(f"Replacing tag {tag} with chunk index {chunk_index}")
            subsection.content = subsection.content.replace(tag, str(chunk_index))

    @timer
    async def run(self, survey: Survey) -> Survey:
        """
        生成内容
        """
        logger.info(f"Starting content generation for survey: {survey.title}")

        tasks = []
        for section_num, section in enumerate(survey.sections):
            for subsection_num, subsection in enumerate(section.subsections):
                tasks.append(
                    self._generate_subsection_content(
                        survey, section_num, subsection_num
                    )
                )
        await asyncio.gather(*tasks)
        with open("data/cache/survey_with_content.md", "w") as f:
            f.write(survey.text)
        survey = await self.post_inference(survey)
        with open("data/cache/survey_with_content_post.md", "w") as f:
            f.write(survey.text)
        logger.info("Content generation completed")
        return survey


class RefineSurvey:
    """
    RefineSurvey类用于优化和重构综述文章,主要通过以下方式提升文章质量:

    1. 主要功能:
    - 对综述中每个小节(subsection)进行连贯性增强处理
    - 确保相邻小节之间的逻辑流畅性和过渡自然性
    - 通过并行处理提高处理效率

    2. 核心算法:
    - 采用交替并行处理策略(Even-Odd Parallel Processing):
      * 将所有小节按顺序编号,分为偶数组和奇数组
      * 先并行处理所有偶数小节
      * 再并行处理所有奇数小节
    - 这种策略可以避免同时处理相邻小节导致的冲突

    3. 处理流程:
    - 初始化: 创建KB服务实例
    - 连贯性处理(lce方法):
      * 获取当前小节的上下文(前一节和后一节内容)
      * 使用LLM模型优化当前小节内容
      * 确保与相邻内容的连贯性
    - 并行执行:
      * 收集所有小节索引并分组
      * 使用asyncio.gather并发执行处理任务
    """

    def __init__(self, kb_service: KBService = KBService()):
        self.kb_service = kb_service

    @timer
    @limmiter(semaphore_plus)
    async def lce(self, survey: Survey, i, j):
        """
        对survey的第i个section的第j个subsection进行连贯性处理
        
        实现逻辑:
        1. 获取上下文:
           - 提取前一个小节内容
           - 获取当前小节内容
           - 提取后一个小节内容
        2. 构建优化提示:
           - 使用LCE_PROMPT模板
           - 结合主题和上下文内容
        3. 调用LLM进行优化:
           - 发送优化提示到LLM
           - 获取优化后的内容
        4. 更新小节内容

        Args:
            survey (Survey): 综述对象
            i (int): section索引
            j (int): subsection索引

        Returns:
            Survey: 处理后的综述对象
        """
        section = survey.sections[i]
        subsection = section.subsections[j]

        # 获取前一个subsection内容
        prev_content = self._get_prev_content(survey, i, j)

        # 获取当前subsection内容
        current_content = subsection.content

        # 获取后一个subsection内容
        next_content = self._get_next_content(survey, i, j)
        prompt = LCE_PROMPT.format(
            topic=survey.title,
            prev_subsection=prev_content,
            next_subsection=next_content,
            subsection=current_content,
        )
        rsp = await llm.ainvoke(prompt)
        content = rsp.content
        subsection.content = content

    def _get_prev_content(self, survey: Survey, i: int, j: int) -> str:
        """
        获取前一个subsection的内容
        
        处理三种情况:
        1. 第一个section的第一个subsection - 返回空
        2. 其他section的第一个subsection - 返回上一section最后一个subsection
        3. 普通情况 - 返回当前section的前一个subsection
        """
        if i == 0 and j == 0:  # 第一个section的第一个subsection
            return ""
        elif i != 0 and j == 0:  # 其他section的第一个subsection
            prev_section = survey.sections[i - 1]
            prev_subsection = prev_section.subsections[-1]
            return prev_subsection.content
        else:
            # 其他section的非第一个subsection
            prev_subsection = survey.sections[i].subsections[j - 1]
            return prev_subsection.content

    def _get_next_content(self, survey: Survey, i: int, j: int) -> str:
        """
        获取后一个subsection的内容
        
        处理三种情况:
        1. 最后一个section的最后subsection - 返回空
        2. 其他section的最后subsection - 返回下一section第一个subsection
        3. 普通情况 - 返回当前section的下一个subsection
        """
        section_num = len(survey.sections)
        subsection_num = len(survey.sections[i].subsections)
        if (
            i == section_num - 1 and j == subsection_num - 1
        ):  # 最后一个section的最后一个subsection
            return ""
        elif (
            i != section_num - 1 and j == subsection_num - 1
        ):  # 其他section的最后一个subsection
            next_section = survey.sections[i + 1]
            next_subsection = next_section.subsections[0]
            return next_subsection.content
        else:
            next_subsection = survey.sections[i].subsections[j + 1]
            return next_subsection.content

    @timer
    async def run(self, survey: Survey) -> Survey:
        """
        执行综述重构的主流程
        
        处理步骤:
        1. 收集所有subsection索引并分组:
           - 按顺序编号(k)
           - 偶数号放入even_tasks
           - 奇数号放入odd_tasks
        2. 并行处理偶数组小节
        3. 并行处理奇数组小节
        4. 返回优化后的综述

        采用交替并行策略的原因:
        - 避免同时处理相邻小节
        - 保证处理质量的同时提高效率
        """
        # 收集所有需要处理的subsection索引
        even_tasks = []  # 偶数subsection任务
        odd_tasks = []  # 奇数subsection任务
        k = 0
        for i, section in enumerate(survey.sections):
            for j, _ in enumerate(section.subsections):
                if k % 2 == 0:
                    even_tasks.append((i, j))
                else:
                    odd_tasks.append((i, j))
                k += 1

        # 并发处理偶数subsection
        await asyncio.gather(*[self.lce(survey, i, j) for i, j in even_tasks])

        # 并发处理奇数subsection
        await asyncio.gather(*[self.lce(survey, i, j) for i, j in odd_tasks])

        return survey

def save_cache(survey: Survey, file_path: str):
    # 确保目录存在
    import pickle

    os.makedirs(os.path.dirname(file_path), exist_ok=True)
    with open(file_path, "w") as f:
        f.write(survey.text)

    with open(file_path + ".pkl", "wb") as f:
        pickle.dump(survey, f)


@timer
async def gen_survey(topic: str, final_file_path: str):
    kb_service = KBService(semaphore=semaphore_rag)
    logger.info(f"Starting survey generation for topic: {topic}")
    # 生成outline
    gen_outline = GenOutline(kb_service)
    survey = await gen_outline.run(topic)  # 100.07 s
    survey_with_outline_path = "data/cache/survey_with_outline.md"
    save_cache(survey, survey_with_outline_path)
    # 生成内容
    gen_content = GenContent(kb_service)
    survey = await gen_content.run(survey)  # 39.53 s
    survey_with_content_path = "data/cache/survey_with_content.md"
    save_cache(survey, survey_with_content_path)
    # 优化重构survey
    refine_survey = RefineSurvey()
    survey = await refine_survey.run(survey)  # 36.06 s
    survey_final_path = "data/cache/survey_final.md"
    save_cache(survey, survey_final_path)
    logger.info(f"Writing final survey to: {final_file_path}")
    with open(final_file_path, "w") as file:
        file.write(survey.text)
    logger.info("Survey generation completed successfully")
    return survey


async def gen_test():
    topics = [
        "Moe",
        "What methods are there to enhance the planning capabilities of large models",
        # "What is the technological development roadmap for multimodal large models",
        # "Explainable AI",
    ]
    for i, topic in enumerate(topics):
        await gen_survey(topic, f"data/cache/survey_final_{i}.md")


if __name__ == "__main__":
    import asyncio
    import pickle

    # with open("data/cache/survey.pkl", "rb") as f:
    #     survey = pickle.load(f)
    # refine_survey = RefineSurvey()
    # asyncio.run(refine_survey.run(survey))
    # with open("data/cache/survey_final.md", "w") as f:
    #     f.write(survey.text)

    asyncio.run(gen_test())
