import asyncio
from datetime import datetime
import logging
from pathlib import Path
import re
from typing import List, Dict, Any, Tuple
import nest_asyncio
from nltk.tokenize import sent_tokenize
from research_agent.core.find_statement_citation import FindStatementCitation
from research_agent.core.query import Query
from research_agent.core.reference_processor import ReferenceProcessor
from functools import partial
# 配置日志

logger = logging.getLogger(__name__)

# 应用nest_asyncio以支持嵌套事件循环
nest_asyncio.apply()


class CitationPipeline:
    """
    文献引用处理流水线
    用于处理文档中的引用，包括分段、查找引用、检索文献等功能
    """

    def __init__(self, topic: str, content: str, min_length: int = 2000, max_length: int = 2100,draft_iteration_output_dir:Path=None):
        """
        初始化引用处理流水线

        Args:
            topic (str): 文档主题
            content (str): 文档内容
            min_length (int): 分段最小长度
            max_length (int): 分段最大长度
            draft_iteration_output_dir (Path): 草稿迭代输出目录
        """
        self.topic = topic
        self.content = content
        self.min_length = min_length
        self.max_length = max_length

        # 初始化组件
        self.find_statement_citation = FindStatementCitation()
        self.query = Query()
        self.processor = ReferenceProcessor()

        # 并发控制——文献检索——query_by_content
        self.semaphore = asyncio.Semaphore(30)

        # 存储处理结果
        self.sections = []
        self.merged_sections = []
        self.statement_list = []
        self.parsed_draft_infos = []
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        # 确保在使用之前定义 draft_iteration_output_dir
        draft_iteration_output_dir = Path(draft_iteration_output_dir/f"{timestamp}")
        draft_iteration_output_dir.mkdir(exist_ok=True)
        self.draft_iteration_output_dir = draft_iteration_output_dir

    def split_by_primary_headers(self) -> List[str]:
        """
        使用正则表达式按一级和二级标题分割文本

        Returns:
            List[str]: 分割后的文本段落列表
        """
        headers = re.split(r'(?m)^(##+)\s+', self.content.strip())
        sections = []

        for i in range(1, len(headers), 2):
            header = headers[i]
            content = headers[i + 1].strip() if i + 1 < len(headers) else ""
            sections.append(f"\n{header} {content}")

        self.sections = sections
        return sections

    def merge_sections(self) -> List[str]:
        """
        合并文本段落，确保每个段落在指定长度范围内

        Returns:
            List[str]: 合并后的文本段落列表
        """
        merged_sections = []
        current_section = ""

        for section in self.sections:
            if len(current_section) + len(section) < self.min_length:
                current_section += section
            else:
                if self.min_length <= len(current_section) <= self.max_length:
                    merged_sections.append(current_section)
                    current_section = section
                else:
                    if len(current_section) > self.max_length:
                        merged_sections.append(current_section)
                        current_section = section
                    else:
                        combined_section = current_section + section
                        if len(combined_section) <= self.max_length:
                            current_section = combined_section
                        else:
                            merged_sections.append(current_section)
                            current_section = section

        if current_section:
            merged_sections.append(current_section)

        self.merged_sections = [m for m in merged_sections if m != ""]
        return self.merged_sections

    def update_sections(self, merged_sections: List[str], results: List[List[Tuple[int, str]]]) -> str:
        """
        更新段落中的句子内容。

        Args:
            merged_sections (List[str]): 合并后的段落列表。
            results (List[List[Tuple[int, str]]]): 每个段落对应的更新结果，包含句子索引和更新内容。

        Returns:
            str: 更新后的新段落内容。
        """
        new_section = []

        for i, section in enumerate(merged_sections):
            update_section = section
            sentences = sent_tokenize(update_section)

            # 遍历当前 section 对应的结果
            for result in results[i]:
                update_section = re.sub(
                    re.escape(sentences[result[0]]), 
                    lambda m:result[1], 
                    update_section)

            new_section.append(update_section)

        # 返回更新后的新段落
        return "".join(new_section)

    def replace_citations_with_num(self, content: str) -> Tuple[str, List[str]]:
        """
        处理文中的引文，生成参考文献列表并替换文中的引文为序号。

        Args:
            content (str): 输入的文本内容，包含引文。

        Returns:
            Tuple[str, List[str]]: 更新后的内容和参考文献列表。
        """
        # 定义引文的正则表达式模式
        citation_pattern = r"<sup>(.*?)</sup>"

        # 提取引文
        citations = re.findall(citation_pattern, content)

        # 去重并生成参考文献列表
        unique_citations = list(dict.fromkeys(citations))  # 保留顺序去重
        reference_list = [f"[{i + 1}] {citation}" for i,
                          citation in enumerate(unique_citations)]

        # 替换文中的引文为序号
        for i, citation in enumerate(unique_citations):
            content = content.replace(
                f"<sup>{citation}</sup>", f"<sup>{i + 1}</sup>")

        return content, reference_list

    async def find_statement_citations(self) -> List[Dict]:
        """
        在合并后的段落中查找引用

        Returns:
            List[Dict]: 引用信息列表
        """
        try:
            self.statement_list = await self.find_statement_citation.process_all_sections(
                sections=self.merged_sections,
                topic=self.topic
            )
            return self.statement_list
        except Exception as e:
            logger.error(f"查找引用时发生错误: {str(e)}")
            raise


    async def prepare_draft_info(self) -> List[Dict]:
        """优化草稿信息准备过程"""
        try:
            # 预处理数据
            section_data = []
            for index, citation_finder_result in enumerate(self.statement_list):
                sentences = sent_tokenize(self.merged_sections[index])
                section_data.append({
                    'hydes': [r["statement_hyde"] for r in citation_finder_result],
                    'evidence_spans': [r["evidence_spans"] for r in citation_finder_result],
                    'keywords': [r["keywords"] for r in citation_finder_result],
                    'original_statements': [
                        "\n".join(
                            sentences[min(r["evidence_spans"]):max(r["evidence_spans"]) + 1])
                        for r in citation_finder_result
                    ]
                })

            # 对每个section进行并行处理，考虑分批任务
            async def process_section(index, data):
                # 分批处理每个section的查询任务
                batch_size = 10  # 每批处理5个查询，可以根据实际情况调整
                tasks = []
                for i in range(0, len(data['hydes']), batch_size):
                    batch_hydes = data['hydes'][i:i + batch_size]
                    batch_original_statements = data['original_statements'][i:i + batch_size]
                    # 批量查询每个section的查询任务
                    batch_tasks = [
                        self.query.query_by_content(h+"\n"+o, top_k=20)
                        for h, o in zip(batch_hydes, batch_original_statements)
                    ]
                    tasks.extend(batch_tasks)  # 将所有批次任务合并成一个大任务列表

                # 执行所有任务并返回结果
                retrieved_papers = await asyncio.gather(*tasks, return_exceptions=True)
                return {
                    'section': self.merged_sections[index],
                    'hyde': data['hydes'],
                    'evidence_spans': data['evidence_spans'],
                    'retrieved_papers': retrieved_papers,
                    'original_statement': data['original_statements'],
                    'keywords': data['keywords']
                }

            # 处理所有sections的任务
            async def process_all_sections(section_data):
                tasks = [process_section(idx, data)
                         for idx, data in enumerate(section_data)]
                # 控制并发数量并等待所有section的处理结果
                return await asyncio.gather(*tasks, return_exceptions=True)

            # 调用并获取处理结果
            self.parsed_draft_infos = await process_all_sections(section_data)

            return self.parsed_draft_infos

        except Exception as e:
            logger.error(f"准备草稿信息时发生错误: {str(e)}")
            raise

    async def process_drafts(self) -> Any:
        """
        处理所有草稿

        Returns:
            Any: 处理结果
        """
        try:
            return await self.processor.process_drafts(self.parsed_draft_infos)
        except Exception as e:
            logger.error(f"处理草稿时发生错误: {str(e)}")
            raise

    async def process_single_hyde(
        self,
        hyde: str,
        evidence_index: List[int],
        retrieved_papers: List[Dict],
        keywords: str,
        merge_section: str
    ) -> Any:
        """
        处理单个hyde

        Args:
            hyde (str): hyde语句
            evidence_index (List[int]): 证据索引
            retrieved_papers (List[Dict]): 检索到的文献
            keywords (str): 关键词
            merge_section (str): 合并的段落

        Returns:
            Any: 处理结果
        """
        try:
            return await self.processor.process_hyde(
                hyde=hyde,
                evidence_index=evidence_index,
                retrieved_papers=retrieved_papers,
                keywords=keywords,
                merge_section=merge_section
            )
        except Exception as e:
            logger.error(f"处理单个hyde时发生错误: {str(e)}")
            raise

    def find_citation_max_score(self, final_survey):
        # 提取所有引文，并计算每个序号的最高分数
        citations = re.findall(r"<sup>(.*?)</sup><ss>(.*?)</ss>", final_survey)
        max_scores = {}
        for index, score in citations:
            score = float(score)
            if index not in max_scores or score > max_scores[index]:
                max_scores[index] = score
        return max_scores
    # 定义替换函数，保留分数最高的引文，其余删除

    def reduce_citation(self, match, max_scores):
        index = match.group(1)
        score = float(match.group(2))
        if score == max_scores[index] or score >= 20:
            return match.group(0)  # 保留原始文本
        else:
            return ""  # 删除低分引用

    def process_final_survey(self, final_survey):
        max_scores = self.find_citation_max_score(final_survey)
        reduce_citation_with_scores = partial(
            self.reduce_citation, max_scores=max_scores)
        final_survey = re.sub(r"<sup>(.*?)</sup><ss>(.*?)</ss>",
                              reduce_citation_with_scores, final_survey)
        final_survey = re.sub(r"<ss>.*?</ss>", "", final_survey)
        return final_survey

    async def pipeline_reference(self):
        logger.info("开始处理引用pipeline")
        self.split_by_primary_headers()
        self.merge_sections()
        logger.info("已合并段落")
        # ===================查找引用===================
        await self.find_statement_citations()
        logger.info("已完成find_statement")
        # ===================检索文献===================
        await self.prepare_draft_info()
        logger.info("已完成prepare_draft_info")
        # ===================向量计算===================
        results = await self.process_drafts()
        logger.info("已完成process_drafts")
        
        # ===================更新段落===================
        new_content = self.update_sections(self.merged_sections, results)
        logger.info("已完成update_sections")

        # 保存结果到raw_reference_survey.md文件
        with open(self.draft_iteration_output_dir / "raw_reference_survey.md", "w", encoding="utf-8") as file:
            file.write(new_content)
        logger.info("已保存结果到raw_reference_survey.md")

        new_content, reference_list = self.replace_citations_with_num(
            new_content)
        logger.info("已完成replace_citations_with_num")
        final_survey = new_content + "\n\n" + \
            "# References\n\n" + "\n".join(reference_list)
        final_survey = self.process_final_survey(final_survey)
        logger.info("已完成process_final_survey")

        return final_survey


async def main():
    """
    主函数，用于演示Pipeline的使用
    """
    # 读取示例文件
    with open("test.md", "r", encoding="utf-8") as file:
        content = file.read()

    # 创建pipeline实例
    pipeline = CitationPipeline(
        topic="Technological Roadmap of Multi-Modal Large Models: Evolution, Methodologies, and Future Directions",
        content=content
    )

    # 执行处理流程
    pipeline.split_by_primary_headers()
    pipeline.merge_sections()
    await pipeline.find_statement_citations()
    await pipeline.prepare_draft_info()
    results = await pipeline.process_drafts()
    new_content = pipeline.update_sections(pipeline.merged_sections, results)
    new_content, reference_list = pipeline.replace_citations_with_num(
        new_content)
    final_survey = new_content + "\n\n" + \
        "# References\n\n" + "\n".join(reference_list)
    final_survey = pipeline.process_final_survey(final_survey)
    return final_survey

if __name__ == "__main__":
    asyncio.run(main())
