import logging
import asyncio
from typing import List, Dict, Any, Tuple, Optional
from nltk.tokenize import sent_tokenize
import time
from research_agent.core.embedding_model import EmbeddingModel_speed
from research_agent.core.utils import chunking
from research_agent.core.query import Query
from research_agent.core.gene_hyde import GenStatementHyde
import re
from research_agent.core.config import Config
# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class ReferenceProcessor:
    """处理文献引用的主类，整合所有相关功能"""

    def __init__(self):
        """
        初始化处理器

        Args:
            concurrency: 并发处理的最大数量
        """
        self.configs = Config()
        self.embedding_model = EmbeddingModel_speed()
        self.gene_statement_hyde = GenStatementHyde()
        self.query = Query()

    async def _update_sentence(self, sentences: List[str], evidence_index: List[int], reference_info: str) -> Tuple[Optional[int], Optional[str]]:
        """
        更新句子信息

        Args:
            sentences: 句子列表
            evidence_index: 证据索引
            reference_info: 引用信息

        Returns:
            更新后的目标索引和句子内容
        """
        if evidence_index:
            target_index = max(evidence_index)
            sentences[target_index] += reference_info
            return target_index, sentences[target_index]
        return None, None

    async def _process_papers_content(self, retrieved_paper_list: List[Dict[str, Any]]) -> List[str]:
        """
        处理论文内容

        Args:
            retrieved_paper_list: 检索到的论文列表

        Returns:
            处理后的论文内容列表
        """
        loop = asyncio.get_running_loop()
        papers_content = await loop.run_in_executor(
            None,
            lambda: [r["entity"]["chunk_text"] for r in retrieved_paper_list]
        )
        return await loop.run_in_executor(None, chunking, papers_content)

    async def _handle_rerank_failure(self, statement: str, rerank_papers_content: List[str],
                                     rerank_papers: List[Dict[str, Any]], rerank_threshold: float) -> str:
        """
        处理重排序失败的情况

        Args:
            statement: 查询语句
            rerank_papers_content: 论文内容
            rerank_papers: 论文信息
            rerank_threshold: 重排序阈值

        Returns:
            处理后的引用信息
        """
        chunk_size = 8  # 每8个文档一次rerank
        sub_rerank_retrieved_paper_list = [
            rerank_papers[i:i + chunk_size] for i in range(0, len(rerank_papers), chunk_size)
        ]
        sub_rerank_retrieved_paper_content = [
            chunking(rerank_papers_content[i:i + chunk_size]) for i in range(0, len(rerank_papers_content), chunk_size)
        ]
        # 创建异步任务
        rerank_tasks = [self.embedding_model.rerank_documents(
            query=statement, documents=sublist) for sublist in sub_rerank_retrieved_paper_content]
        # 并行执行所有任务
        rerank_results = await asyncio.gather(*rerank_tasks)
        logger.info(
            f"第二次重排序结果: {[result['relevance_score'] for rerank_result in rerank_results for result in rerank_result['results']]}")
        reference_info = ""
        for rerank_result, papers_list in zip(rerank_results, sub_rerank_retrieved_paper_list):
            if not rerank_result:
                continue
            for result in rerank_result["results"]:
                if result["relevance_score"] > rerank_threshold:
                    score = result["relevance_score"]
                    paper_index = result["index"]
                    reference_paper = papers_list[paper_index]
                    paper_title = re.sub(
                        r'<sup>.*?</sup>', '', reference_paper['entity']['paper_title'])
                    original_filename = re.findall(
                        r"Data_+(.*?)_with", reference_paper['entity']['original_filename'])[0]
                    original_filename = re.sub(r"[\d]", "", original_filename)
                    original_filename = re.sub(r"[_]", " ", original_filename)
                    year = reference_paper['entity']['year']
                    chunk_id = reference_paper['entity']['chunk_id']
                    reference_info += (
                        f"<sup>{paper_title} "
                        f"{original_filename},"
                        f"{year}, "
                        f"chunk {chunk_id}</sup>"
                        f"<ss>{score}</ss>"
                    )
        return reference_info

    def _format_reference(self, reference_paper: Dict[str, Any]) -> str:
        """
        格式化引用信息

        Args:
            reference_paper: 引用论文信息

        Returns:
            格式化后的引用字符串
        """
        paper_title = re.sub(
            r'<sup>.*?</sup>', '', reference_paper['entity']['paper_title'])
        original_filename = re.findall(
            r"Data_+(.*?)_with", reference_paper['entity']['original_filename'])[0]
        original_filename = re.sub(r"[\d]", "", original_filename)
        original_filename = re.sub(r"[_]", " ", original_filename)
        year = reference_paper['entity']['year']
        chunk_id = reference_paper['entity']['chunk_id']
        return (f"<sup>{paper_title} "
                f"{original_filename},"
                f"{year}, "
                f"chunk {chunk_id}</sup>")

    async def process_hyde(self, hyde: str, original_statement: str, evidence_index: List[int],
                           retrieved_papers: List[Dict[str, Any]], keywords: str,
                           merge_section: str, cos_threshold: float = 0.5,
                           rerank_threshold: float = 19) -> Tuple[Optional[int], Optional[str]]:
        """
        处理单个HYDE

        Args:
            hyde: HYDE语句
            evidence_index: 证据索引
            retrieved_papers: 检索到的论文
            keywords: 关键词
            merge_section: 合并的章节
            cos_threshold: 余弦相似度阈值
            rerank_threshold: 重排序阈值

        Returns:
            处理结果的目标索引和更新后的句子
        """
        loop = asyncio.get_running_loop()
        logger.info("开始处理 HYDE: %s", hyde)
        start_time = time.time()

        sentences = await loop.run_in_executor(None, sent_tokenize, merge_section)
        failed_papers_id = []
        reference_info = ""

        for attempt in range(2):
            logger.info("尝试次数: %d", attempt + 1)
            papers_content = await self._process_papers_content(retrieved_papers)

            cos_scores = await self.embedding_model.get_cos_scores(original_statement, papers_content)
            # logger.info("计算的余弦分数: %s", cos_scores)
            if attempt+1 == 2 and all(score < cos_threshold for score in cos_scores):
                print("尝试次数: %d", attempt + 1)
                break
            if any(score > cos_threshold for score in cos_scores):
                try:
                    logger.info("开始重排序")
                    rerank_papers = [y for x, y in enumerate(
                        retrieved_papers) if cos_scores[x] > cos_threshold]
                    rerank_papers_content = [
                        x["entity"]["chunk_text"] for x in rerank_papers]
                    rerank_results = await self.embedding_model.rerank_documents(
                        query=original_statement, documents=rerank_papers_content)
                    if not rerank_results:
                        logger.warning("没有重排序结果")
                        reference_info = await self._handle_rerank_failure(
                            statement=original_statement, rerank_papers_content=rerank_papers_content, rerank_papers=rerank_papers, rerank_threshold=rerank_threshold)
                        break
                    if rerank_results and any(result["relevance_score"] > rerank_threshold for result in rerank_results["results"]):
                        logger.info("第一次重排序结果: %s", [
                                    result["relevance_score"] for result in rerank_results["results"]])
                        reference_info = self._process_rerank_results(
                            rerank_results, rerank_papers, rerank_threshold)
                        logger.info("第一次重排序成功")
                        break
                    if all(result["relevance_score"] < rerank_threshold for result in rerank_results["results"]):
                        logger.info("第一次重排序结果: %s", [
                                    result["relevance_score"] for result in rerank_results["results"]])
                        failed_papers_id += [r["id"] for r in retrieved_papers]
                        logger.warning(
                            "第一次重排序分数均小于 %f，重新生成 HYDE。", rerank_threshold)
                        new_hyde, core_questions = await self.gene_statement_hyde.generate_statement_hyde(statements=hyde, keywords=keywords)
                        rerank_retrieved_paper_tasks = [self.query.query_by_content(
                            q, top_k=10) for q in [new_hyde] + core_questions]
                        # rerank_retrieved_paper_tasks = [self.query.query_by_content(
                        #     q, top_k=10) for q in [new_hyde]]
                        rerank_retrieved_paper_list = await asyncio.gather(*rerank_retrieved_paper_tasks)

                        unique_ids = set()
                        rerank_retrieved_paper_set_list = []
                        # 遍历嵌套列表，去重
                        for sublist in rerank_retrieved_paper_list:
                            for item in sublist:
                                if item["id"] not in unique_ids:
                                    unique_ids.add(item["id"])  # 添加ID到集合
                                    rerank_retrieved_paper_set_list.append(
                                        item)  # 添加到去重后的列表
                        rerank_retrieved_paper_list = rerank_retrieved_paper_set_list
                        rerank_retrieved_paper_list = [
                            paper for paper in rerank_retrieved_paper_list if paper["id"] not in failed_papers_id]
                        logger.info(
                            f"第二次重排序检索到的论文数量: {len(rerank_retrieved_paper_list)}")
                        rerank_papers_content = [
                            x["entity"]["chunk_text"] for x in rerank_retrieved_paper_list]
                        rerank_papers_content = await loop.run_in_executor(None, chunking, rerank_papers_content)
                        logger.info("第二次重排序")
                        reference_info = await self._handle_rerank_failure(
                            statement=original_statement, rerank_papers_content=rerank_papers_content, rerank_papers=rerank_retrieved_paper_list, rerank_threshold=rerank_threshold)
                        break
                except Exception as e:
                    logger.error("重排序失败: %s", e)
                    reference_info = await self._handle_rerank_failure(
                        hyde, papers_content, retrieved_papers, rerank_threshold)
                    break

            # 如果相似度不够，重新生成HYDE
            failed_papers_id.extend(paper["id"] for paper in retrieved_papers)
            new_hyde, core_questions = await self.gene_statement_hyde.generate_statement_hyde(
                statements=hyde, keywords=keywords)

            # 重新检索文献
            retrieved_papers = await self._retrieve_new_papers(
                new_hyde, core_questions, failed_papers_id)
            hyde = new_hyde

        logger.info("处理 HYDE 完成，耗时: %.2f 秒", time.time() - start_time)
        return await self._update_sentence(sentences, evidence_index, reference_info)

    async def _retrieve_new_papers(self, hyde: str, core_questions: List[str],
                                   failed_papers_id: List[str]) -> List[Dict[str, Any]]:
        """
        检索新的论文

        Args:
            hyde: HYDE语句
            core_questions: 核心问题列表
            failed_papers_id: 失败的论文ID列表

        Returns:
            检索到的新论文列表
        """
        queries = [hyde] + core_questions
        # queries = [hyde]
        tasks = [self.query.query_by_content(q, top_k=10) for q in queries]
        results = await asyncio.gather(*tasks)
        cos_unique_ids = set()
        cos_retrieved_paper_set_list = []
        # 遍历嵌套列表，去重
        for sublist in results:
            for item in sublist:
                if item["id"] not in cos_unique_ids:
                    cos_unique_ids.add(item["id"])  # 添加ID到集合
                    cos_retrieved_paper_set_list.append(
                        item)  # 添加到去重后的列表
        # 展平结果并过滤已失败的论文
        return [paper for paper in cos_retrieved_paper_set_list if paper["id"] not in failed_papers_id]

    def _process_rerank_results(self, rerank_results: Dict[str, Any],
                                papers: List[Dict[str, Any]],
                                threshold: float) -> str:
        """
        处理重排序结果

        Args:
            rerank_results: 重排序结果
            papers: 论文列表
            threshold: 阈值

        Returns:
            处理后的引用信息
        """
        reference_info = ""
        for item in rerank_results["results"]:
            if item["relevance_score"] > threshold:
                paper_index = item["index"]
                score = item["relevance_score"]
                if paper_index < len(papers):
                    reference_info += self._format_reference(
                        papers[paper_index]) + f"<ss>{score}</ss>"
        return reference_info

    async def process_drafts(self, parsed_draft_infos: List[Dict[str, Any]]) -> List[List[Tuple[Optional[int], Optional[str]]]]:
        """
        处理多个草稿

        Args:
            parsed_draft_infos: 解析后的草稿信息列表

        Returns:
            处理结果列表
        """
        all_tasks = []
        for draft in parsed_draft_infos:
            task_group = [
                self.process_hyde(
                    hyde, original_statement, evidence_index, retrieved_papers,
                    keywords, draft["section"],
                    cos_threshold=self.configs.COS_THRESHOLD,
                    rerank_threshold=self.configs.RERANK_THRESHOLD
                )
                for hyde, original_statement, evidence_index, retrieved_papers, keywords in zip(
                    draft["hyde"],
                    draft["original_statement"],
                    draft["evidence_spans"],
                    draft["retrieved_papers"],
                    draft["keywords"]
                )
            ]
            all_tasks.extend(task_group)

        results = await asyncio.gather(*all_tasks)

        # 重组结果
        final_output = []
        current_index = 0
        for draft in parsed_draft_infos:
            count = len(draft["hyde"])
            final_output.append(results[current_index:current_index + count])
            current_index += count

        return final_output
