from typing import List

from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_core.documents.base import Document
from langchain_core.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field

from langchain_workflow.llm import ollama_llm
from logger import logger


class ContextInfoInput(BaseModel):
    chunk_content: str = Field(description="Content of the file fragment")
    nearby_chunks: str = Field(description="Content near the file fragment")


class ChunkWithContextInfoResult(BaseModel):
    """Chunk with context information"""

    context: str = Field(
        description="Context information for locating the text fragment to be retrieved, concise yet informative, not exceeding 50 characters"
    )


class ContextSpliter:
    """为文章片段提供上下文信息，使用大语言模型分析"""

    def __init__(self, full_document):
        self.full_document: List[Document] = full_document
        self.full_text = "\n".join([doc.page_content for doc in self.full_document])
        self.llm_chain = self._create_llm_chain()
        logger.debug(f"成功加载文档，总长度: {len(self.full_text)} 字符")

    def _create_llm_chain(self) -> LLMChain:
        prompt_template = """
        -- Task Description --
        Generate concise context information for a given text chunk to facilitate accurate retrieval within an article. Use the provided chunk, article introduction, and surrounding content.

        -- Input --
        <Target Chunk>:
        ```
        {chunk_content}
        ```
        ---
        <Surrounding Content>:
        ```
        {nearby_chunks}
        ```

        -- Output Requirements --
        1. Provide concise yet comprehensive context information in 50 words or less.
        2. Focus on key topics, themes, or unique identifiers that distinguish this chunk.
        3. Include relevant section headings or structural information if available.
        4. Avoid repeating exact phrases from the chunk unless they are crucial identifiers.
        5. Use the following format for your output:
        {format_instructions}

        Remember: The goal is to create a unique "fingerprint" for this chunk that aids in its retrieval.
        """
        parser = PydanticOutputParser(pydantic_object=ChunkWithContextInfoResult)
        format_instructions = parser.get_format_instructions()
        prompt = PromptTemplate(
            input_variables=["header_content", "chunk_content", "nearby_chunks"],
            template=prompt_template,
            partial_variables={"format_instructions": format_instructions},
        )
        return prompt | ollama_llm | parser

    def get_context(self, input_data: ContextInfoInput) -> ChunkWithContextInfoResult:
        """为单个文件片段提供上下文信息"""
        max_retries = 3
        result = ChunkWithContextInfoResult(
            context="",
        )
        for attempt in range(max_retries):
            try:
                chunk_with_context: ChunkWithContextInfoResult = self.llm_chain.invoke(
                    input_data.model_dump()
                )
                return chunk_with_context
            except Exception as e:
                logger.warning(f"尝试 {attempt + 1}/{max_retries} 失败: {str(e)}")
                if attempt == max_retries - 1:
                    logger.error(f"所有重试都失败，无法为片段生成上下文信息: {str(e)}")
        return result

    def run(self) -> List[Document]:
        """为文档的每个段落添加上下文信息

        此方法遍历文档中的所有段落，为每个段落生成上下文信息。
        它使用文档的开头部分和周围的段落作为上下文来源。

        Returns:
            List[Document]: 返回添加了上下文信息的文档段落列表
        """
        logger.info("开始为文档片段添加上下文信息")

        from tqdm import tqdm

        total_paragraphs = len(self.full_document)
        for i, paragraph in tqdm(
            enumerate(self.full_document),
            total=total_paragraphs,
            desc="处理段落",
            ncols=100,
        ):
            # 获取附近的两个chunk段落内容, i = 3 , start = 0, end = 7
            nearby_size = 5  # 前后5个chunk
            start = max(0, i - nearby_size)
            end = min(total_paragraphs, i + nearby_size + 1)
            nearby_chunks = " ".join(
                [doc.page_content for doc in self.full_document[start:end]]
            )

            input_data = ContextInfoInput(
                chunk_content=paragraph.page_content,
                nearby_chunks=nearby_chunks,
            )
            result = self.get_context(input_data)

            self.full_document[i].metadata["context"] = result.context
            logger.debug(f"为片段 {self.full_document[i]}")
        logger.info(f"完成上下文信息添加，共处理 {len(self.full_document)} 个段落")
        return self.full_document


if __name__ == "__main__":
    from langchain_workflow.splitter import get_all_documents

    documents = ContextSpliter(get_all_documents()).run()
    print(documents[0])
