import copy
import time

from agents.base_agent import BaseAgent
from my_utils.data_type import QueryResult
from prompts.two_stage_outline import STAGE_ONE_DETAIL_OUTLINE_GENERATE_PROMPT, \
    STAGE_TWO_DETAIL_OUTLINE_GENERATE_PROMPT
import asyncio
import random
import json

from my_utils.outline_parser import OutlineUtils
from my_utils.token_count import glm_tokenize_text
import re
from my_utils.data_io import save_datas, load_files
from concurrent.futures import ThreadPoolExecutor


def parse_attrs(attr_str):
    attrs = {}
    pattern = r'(\w+)="(.*?)"'
    matches = re.findall(pattern, attr_str)
    for key, value in matches:
        attrs[key] = value
    return attrs


def parse_tag(tag_content):
    tag_content = tag_content.strip()
    if tag_content.startswith('/'):
        return {'type': 'end', 'name': tag_content[1:]}
    if tag_content.endswith('/'):
        tag_content = tag_content[:-1].strip()
        parts = tag_content.split(' ', 1)
        name = parts[0]
        attrs_str = parts[1] if len(parts) > 1 else ''
        attrs = parse_attrs(attrs_str)
        return {'type': 'self_closing', 'name': name, 'attrs': attrs}
    else:
        parts = tag_content.split(' ', 1)
        name = parts[0]
        attrs_str = parts[1] if len(parts) > 1 else ''
        attrs = parse_attrs(attrs_str)
        return {'type': 'start', 'name': name, 'attrs': attrs}


def xml_to_dict(xml_str):
    tags_str = re.findall(r'<(.*?)>', xml_str)
    parsed_tags = [parse_tag(tag) for tag in tags_str]

    stack = []
    root = None

    for tag in parsed_tags:
        if tag['type'] == 'start':
            # 更好的refs处理方式
            refs_str = tag['attrs'].get('refs', '[]')
            refs = []

            # 移除方括号和所有引号，然后分割
            if refs_str != '[]':
                # 先去除外部的方括号
                refs_str = refs_str.strip('[]')
                if refs_str:
                    # 分割字符串并移除每个元素中的引号和空格
                    items = refs_str.split(',')
                    for item in items:
                        # 移除所有引号和空格
                        clean_item = item.strip().strip("'\"")
                        if clean_item:
                            refs.append(clean_item)

            new_node = {
                'title': tag['attrs'].get('title', ''),
                'content': '',
                'refs': refs,
                'children': []
            }
            if stack:
                stack[-1]['children'].append(new_node)
            else:
                root = new_node
            stack.append(new_node)
        elif tag['type'] == 'self_closing':
            # 同样的refs处理逻辑
            refs_str = tag['attrs'].get('refs', '[]')
            refs = []

            if refs_str != '[]':
                refs_str = refs_str.strip('[]')
                if refs_str:
                    items = refs_str.split(',')
                    for item in items:
                        clean_item = item.strip().strip("'\"")
                        if clean_item:
                            refs.append(clean_item)

            new_node = {
                'title': tag['attrs'].get('title', ''),
                'refs': refs,
                'content': '',
                'children': []
            }
            if stack:
                stack[-1]['children'].append(new_node)
        elif tag['type'] == 'end':
            if stack:
                stack.pop()

    return root


def merge_first_layer_outlines(outlines):
    """ 合并多个大纲字典为一个字典，仅合并第一层 children 的 refs。 """

    if not outlines:
        return {}

    merged_outline = copy.deepcopy(outlines[0])  # 使用第一个大纲作为基础,并复制，避免修改原始数据
    merged_outline["refs"] = []  # 清空顶层的refs
    for child in merged_outline["children"]:
        child["refs"] = []  # 清空第一层 children 的 refs

    # 构建一个字典，键是 title，值是对应 title 的所有 refs 列表
    title_to_refs = {}
    for outline in outlines:
        for child in outline["children"]:
            title = child["title"]
            if title not in title_to_refs:
                title_to_refs[title] = []
            title_to_refs[title].extend(child["refs"])

    # 合并 refs 到 merged_outline
    for child in merged_outline["children"]:
        title = child["title"]
        if title in title_to_refs:
            child["refs"] = list(set(title_to_refs[title]))  # 去重

    return merged_outline


class DetailOutlineAgent(BaseAgent):
    def __init__(self, summary_type: str, origin_query: str, origin_result: dict,
                 paper_search_result: QueryResult = None):
        super().__init__()
        self.paper_search_result = paper_search_result
        self.summary_type = summary_type
        self.origin_query = origin_query
        self.origin_result = origin_result
        self.summary_type_map = {
            "A survey and review of a technical concept": """1.  Introduction
2.  Fundamental Principles and Core Mechanisms
3.  Key Technologies and Implementations
4.  Applications and Use Cases
5.  Future Trends and Research Directions
6.  Conclusion""",
            "A review of the current research status in a specific field": """1.  Introduction
2.  Key Research Areas and Themes
3.  Recent Advances and Breakthroughs
4.  Open Challenges and Research Gaps
5.  Emerging Trends and Future Directions
6.  Conclusion""",
            "A comparative analysis and review of multiple methods": """1.  Introduction
2.  Methods Overview and Description
3.  Comparative Analysis
4.  Experimental Evaluation and Benchmarking
5.  Discussion and Recommendations
6.  Conclusion""",
            "A review of the research lineage of a technical method": """1.  Introduction
2.  Origins and Early Developments
3.  Key Milestones and Breakthroughs
4.  Evolution of Variants and Extensions
5.  Current Status and Applications
6.  Future Trends and Research Directions
7.  Conclusion"""
        }
        self.detail_outline_str = ""
        self.reference_mark_str = ""
        self.detail_outline_dict = {}
        self.paper_index_dict = {}

    async def generate_stage_one(self):
        paper_groups = []
        paper_ids = list(self.origin_result.keys())  # 获取所有的论文ID
        num_papers = len(paper_ids)  # 总论文数

        # 每次只处理6个chunk
        chunk_size = 6

        # 计算需要的组数
        num_groups = (num_papers + chunk_size - 1) // chunk_size  # 向上取整确保所有数据被分组

        global_index = 1  # 初始化全局唯一的 ID 索引

        # 根据计算出的组数进行循环
        for group_num in range(num_groups):
            paper_chunks = []
            start_index = group_num * chunk_size  # 当前组的起始索引
            end_index = min(start_index + chunk_size, num_papers)  # 当前组的结束索引
            index = 1  # 重置局部组内的 ID 索引
            for idx in range(start_index, end_index):
                paper_id = paper_ids[idx]
                chunk_content = self.origin_result[paper_id]
                # 拼接字符串内容
                temp = ""
                temp += f"<论文块{index}>\n"  # 每组内部的编号从 1 开始
                temp += f"<论文ID>paper_{global_index}</论文ID>\n"  # 全局唯一 ID
                self.paper_index_dict[f"paper_{global_index}"] = paper_id
                temp += f"<论文标题>{chunk_content['entity']['paper_title']}</论文标题>\n"
                temp += f"<论文块内容>{' '.join(chunk_content['entity']['chunk_text'].split()[:1800])}</论文块内容>\n"
                temp += f"</论文块{index}>\n"
                index += 1  # 组内编号递增
                global_index += 1  # 全局编号递增
                paper_chunks.append(temp)
            # 将每一组加入分组列表
            paper_groups.append(paper_chunks)

        prompt_list = []
        for i in range(len(paper_groups)):
            prompt_list.append(STAGE_ONE_DETAIL_OUTLINE_GENERATE_PROMPT.format(user_requirement=self.origin_query,
                                                                               summary_type=self.summary_type,
                                                                               outline_type=self.summary_type_map[
                                                                                   self.summary_type],
                                                                               paper_chunks="\n".join(paper_groups[i])))

        # 并发执行所有回答任务
        result_list = await self.run_loop(
            model_name="GLM_4_PLUS",
            user_inputs=prompt_list,
            temperature=0.2,
            max_wait_times=1000
        )
        xml_result_list = []
        for result_item in result_list:
            matches = re.findall(r'(<root.*?>.*?</root>)', result_item, re.DOTALL)
            if matches:  # 确保找到了匹配项
                # 取第一个匹配的XML字符串
                xml_str = matches[0]
                xml_result_list.append(xml_to_dict(xml_str))
            else:
                # 处理没有找到匹配的情况
                print(f"Warning: No XML root element found in result")

        return xml_result_list, self.paper_index_dict

    async def generate_stage_two(self, first_layer_nodes):
        # self.paper_index_dict = load_files(["paper_index_dict.json"])[0]

        all_chunks = []
        # 将简洁ID替换为实际的论文ID
        for item in first_layer_nodes:
            item["refs"] = [self.paper_index_dict.get(ref, ref) for ref in item["refs"]]
        first_layer_titles = [node_item["title"] for node_item in first_layer_nodes]
        for node_item in first_layer_nodes:
            if len(node_item["refs"]) > 50:
                node_item["refs"] = random.sample(node_item["refs"], 50)

            chunk_content_list = ""
            for idx, paper_idx in enumerate(node_item["refs"]):
                paper_chunk = self.origin_result.get(paper_idx, None)
                simple_paper_id = ""
                # 将paper_idx与self.paper_index_dict的值匹配，获取对应值的健，保存在simple_paper_id中
                for key, value in self.paper_index_dict.items():
                    if paper_idx == str(value):  # 将 value 转换为字符串，以便进行子字符串查找
                        simple_paper_id = key
                        break
                    else:
                        continue

                if paper_chunk is None:
                    continue

                # 拼接字符串内容
                chunk_content_list += f"<Paper_Chunk_{idx}>\n"  # 每组内部的编号从 1 开始
                chunk_content_list += f"<paper_ID>{simple_paper_id}</paper_ID>\n"
                chunk_content_list += f"<Paper_Title>{paper_chunk['entity']['paper_title']}</Paper_Title>\n"
                chunk_content_list += f"<Paper_Chunk_Content>{' '.join(paper_chunk['entity']['chunk_text'].split()[:1500])}</Paper_Chunk_Content>\n"
                chunk_content_list += f"</Paper_Chunk_{idx}>\n\n"

            all_chunks.append(chunk_content_list)

            # chunk_content_list_tokens = glm_tokenize_text(chunk_content_list)
            # print(f"论文块的token数：{chunk_content_list_tokens}")

        prompt_list = []
        for i in range(len(all_chunks)):
            prompt_list.append(STAGE_TWO_DETAIL_OUTLINE_GENERATE_PROMPT.format(user_requirement=self.origin_query,
                                                                               review_type=self.summary_type,
                                                                               outline_type=self.summary_type_map[
                                                                                   self.summary_type],
                                                                               section_title=first_layer_titles[i],
                                                                               paper_chunks=all_chunks[i]))

        # 计算prompt_list各个值的token数
        # for prompt in prompt_list:
        #     print(f"prompt的token数：{glm_tokenize_text(prompt)}")

        # 并发执行所有回答任务
        result_list = await self.run_loop(
            model_name="GLM_4_PLUS",
            user_inputs=prompt_list,
            temperature=0,
            max_wait_times=1000
        )
        per_chapter_outline_list = []
        for result_item in result_list:
            per_chapter_outline_list.append(xml_to_dict(result_item))

        return per_chapter_outline_list


def supplement_refs_flexible(data, min_refs=80):
    """
    补充顶层章节 'refs' 列表，使其达到最小数量。
    如果没有任何章节达到 min_refs，则使用所有章节的 refs 作为源进行补充。
    """
    if not isinstance(data, dict) or 'children' not in data or not isinstance(data['children'], list):
        print("错误：输入的数据结构无效。需要一个包含 'children' 列表的字典。")
        return data

    top_level_chapters = data.get('children', [])
    if not top_level_chapters:
        print("信息：'children' 列表为空，无需操作。")
        return data

    for item in top_level_chapters:
        print(f"{item['title']}的长度为：{len(item['refs'])}\n")

    # 检查是否有任何章节满足 min_refs
    has_sufficient_donor = any(
        isinstance(ch, dict) and 'refs' in ch and isinstance(ch['refs'], list) and len(ch['refs']) >= min_refs
        for ch in top_level_chapters
    )

    # 识别需要补充的章节和构建引用池
    chapters_to_process = []  # 所有需要检查和可能补充的章节
    potential_donor_refs = []

    if has_sufficient_donor:
        for i, chapter in enumerate(top_level_chapters):
            if not isinstance(chapter, dict) or 'refs' not in chapter or not isinstance(chapter['refs'], list):
                print(f"警告：跳过索引 {i} 处的无效章节结构。")
                continue
            chapters_to_process.append(chapter)  # 所有章节都需要检查是否需要补充
            if len(chapter['refs']) >= min_refs:
                potential_donor_refs.extend(chapter['refs'])
    else:
        # 从所有章节收集
        for i, chapter in enumerate(top_level_chapters):
            if not isinstance(chapter, dict) or 'refs' not in chapter or not isinstance(chapter['refs'], list):
                print(f"警告：跳过索引 {i} 处的无效章节结构。")
                continue
            chapters_to_process.append(chapter)
            potential_donor_refs.extend(chapter['refs'])  # 从所有章节收集

    # 如果引用池为空（例如，所有章节 refs 都是空列表），则无法补充
    if not potential_donor_refs:
        return data

    # 创建唯一的、随机打乱的引用池
    unique_refs_pool = list(set(potential_donor_refs))
    random.shuffle(unique_refs_pool)
    pool_idx = 0

    # 遍历所有章节（因为现在补充源可能来自任何地方）并进行补充
    for chapter in chapters_to_process:
        # 确保章节结构有效（再次检查以防万一）
        if not isinstance(chapter, dict) or 'refs' not in chapter or not isinstance(chapter['refs'], list):
            continue  # 跳过无效结构

        refs_list = chapter['refs']
        num_needed = min_refs - len(refs_list)

        if num_needed <= 0:  # 如果章节已满足或超过要求，跳过
            continue

        # 使用集合来快速检查引用是否已存在于当前章节
        existing_refs_set = set(refs_list)
        added_count = 0

        # 从打乱后的池中抽取，直到满足数量或池耗尽
        start_pool_idx_for_this_chapter = pool_idx  # 记录开始位置，用于调试或判断是否有可用补充
        while added_count < num_needed and pool_idx < len(unique_refs_pool):
            candidate_ref = unique_refs_pool[pool_idx]
            pool_idx += 1

            if candidate_ref not in existing_refs_set:
                refs_list.append(candidate_ref)
                existing_refs_set.add(candidate_ref)
                added_count += 1

    if pool_idx >= len(unique_refs_pool):
        print("信息：全局引用池已耗尽。")

    return data


async def two_stage_generate_outline(origin_query, user_summary_type, origin_result):
    # 创建 DetailOutlineAgent 实例
    detail_outline_agent = DetailOutlineAgent(
        summary_type=user_summary_type,
        origin_query=origin_query,
        origin_result=origin_result
    )

    # 生成并解析大纲
    stage_one_outlines, paper_index_dict = await detail_outline_agent.generate_stage_one()

    save_datas(stage_one_outlines, "stage_one_outlines.json")

    xml_first_layer = merge_first_layer_outlines(stage_one_outlines)

    # 补充文献
    xml_first_layer = supplement_refs_flexible(xml_first_layer, min_refs=50)

    save_datas(xml_first_layer, "xml_first_layer.json")

    # xml_first_layer = load_files(["xml_first_layer.json"])[0]

    utils = OutlineUtils()
    # 获取所有叶子节点（最底层节点）信息
    first_layer_nodes = utils.get_all_leaf_nodes(xml_first_layer)

    per_chapter_outline_list = await detail_outline_agent.generate_stage_two(first_layer_nodes)

    # 拼接一级大纲和各章子小节大纲
    xml_first_layer["children"] = per_chapter_outline_list

    return xml_first_layer, paper_index_dict


if __name__ == '__main__':
    origin_query = "Latest Advances and Cross-modal Fusion Strategies in Multimodal Learning"
    user_summary_type = "A review of the current research status in a specific field"

    # 从文件读取 origin_paper_search_result
    try:
        with open('../examples_data/origin_paper_search_result.json', 'r', encoding='utf-8') as f:
            origin_result = json.load(f)
    except FileNotFoundError:
        print("文件 'origin_paper_search_result.json' 未找到，请确保文件存在。")
        origin_result = {}

    start_time = time.time()

    final_outline = two_stage_generate_outline(origin_query, user_summary_type, origin_result)

    end_time = time.time()
    print("大纲生成用时：", int((end_time - start_time) // 60), "分钟", int((end_time - start_time) % 60), "秒")

    # 保存最终结果
    save_datas(final_outline, "final_outline.json")
