import json
import re
import os, sys
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed
import threading

sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from general_agent import Agent
from chunk_conclusion import MultiChunkConclusionAgent

# from 人类群星闪耀时.RLQXSYS.api import api_tool
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from api import api_tool

CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(CURRENT_DIR))
from api import api_llm, api_prompt_engineering

class GenerateOutlineAgent(Agent):
    def __init__(self, model = "glm-4-flash"):
        super().__init__(model=model)
        # 定义模板
        self.CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
        template_dir = os.path.join(self.CURRENT_DIR, "template/outline")  # 模板文件夹路径
        template_files = {
            "rough_outline": "Rough_outline.txt",
            # "final_outline": "Final_outline.txt",
            "subsection_outline": "outline.txt",
            "secondary_outline": "Secondary_outline.txt",
            "chunks_to_outline":"map_chunks_to_outline.txt"
        }
        self.templates = {}
        for key, filename in template_files.items():
            file_path = os.path.join(template_dir, filename)
            with open(file_path, encoding="utf-8") as f:
                self.templates[key] = f.read()
        self.rough_outline_template = self.templates["rough_outline"]
        # self.final_outline_template = self.templates["final_outline"]
        self.secondary_outline_template = self.templates["secondary_outline"] 
        self.subsection_outline_template = self.templates["subsection_outline"] 
        self.chunks_to_outline_template = self.templates["chunks_to_outline"]


    #初始大纲
    def generate_outline(self, topic, paper_list):
        rough_outline = self.generate_rough_outline(
            topic=topic, 
            paper_list=paper_list
        )
        subsection_outline = self.generate_subsection_outline(
            topic=topic,
            paper_list=paper_list,
            rough_outline=rough_outline,
        )
        
        return subsection_outline
        
    def generate_rough_outline(self, topic, paper_list):
        prompt = self.PE.format(
            self.rough_outline_template, 
            topic=topic,
            paper_list=paper_list
        )
        def count_words(text):
            # 使用 split() 方法将字符串按空格分割成单词列表
            words = text.split()
            # 返回列表的长度，即单词的数量
            return len(words)
        print(f">> 生成 rough_outline, prompt 长度为{ count_words(prompt)} 字")
        response = self.LLM.chat(
            prompt=prompt,
            model=self.model,
        )
        rough_outline = self.PE.parse_to_json(response)
        return rough_outline
    
    # 分章节单独生成二级目录
    def generate_secondary_outline(self, topic, current_section, paper_list, rough_outline):
        prompt = self.PE.format(
            self.secondary_outline_template, 
            topic=topic,
            current_section=json.dumps(current_section, indent=4, ensure_ascii=False),
            section_name=current_section.get("name", "current section"),
            rough_outline=self.PE.as_json(rough_outline),
            paper_list=paper_list
        )
        print("-"*100)
        print(f">> 生成 subsection_outline, prompt 长度为{ len(prompt)}")
        response=self.LLM.chat(
            prompt=prompt,
            model=self.model,
        )
        secondary_outline = self.PE.parse_to_json(response)
        print("🔍 生成的二级目录:", self.PE.as_json(secondary_outline))
        print("-"*100)
        return secondary_outline
        
        
    def generate_subsection_outline(self, topic, paper_list, rough_outline):
        prompt = self.PE.format(
            self.subsection_outline_template, 
            topic=topic,
            rough_outline=self.PE.as_json(rough_outline),
            paper_list=paper_list
        )
        print("-"*100)
        print(f">> 生成 subsection_outline, prompt 长度为{ len(prompt)}")
        response=self.LLM.chat(
            prompt=prompt,
            model=self.model,
        )
        outline = self.PE.parse_to_json(response)
        print("🔍 生成的大纲 (outline):", self.PE.as_json(outline))
        print("-"*100)
        return outline
    
        # outline_list.append(subsection_outline)
        # prompt = self.PE.format(
        #     self.final_outline_template, 
        #     topic=topic,
        #     outline_list=outline_list,
        # )
        # final_outline=self.LLM.chat(
        #         prompt=prompt,
        #         model=self.model,
        # )
        # print("最终大纲："+final_outline)
    def map_chunks_to_outline(self, chunk_list, outline, max_workers=512):
        """
        处理 chunk_list，找到每个 chunk 对应的 subsection，并返回：
        [
            {
                "name": "Subsection Name",
                "description": "subsection 描述",
                "chunks": [
                    "chunk1 内容",
                    "chunk2 内容"
                ]
            },
            ...
        ]
        """
        subsection_list = self.extract_subsections(outline)  # 解析 outline

        # 初始化每个 subsection 的 chunks 列表
        for sub in subsection_list:
            sub["chunks"] = []

        lock = threading.Lock()  # 线程锁，防止并发修改数据

        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            future_to_chunk = {
                executor.submit(self.__map_single_chunk, subsection_list, chunk): chunk for chunk in chunk_list
            }
            for future in tqdm(as_completed(future_to_chunk), total=len(future_to_chunk), desc="Mapping Chunks"):
                chunk = future_to_chunk[future]
                try:
                    mapped_result = future.result()  # 获取 LLM 处理的映射结果
                    if mapped_result and "subsection" in mapped_result:
                        subsection_name = mapped_result["subsection"]
                        with lock:  # 确保多个线程不会同时修改 subsection_list
                            for sub in subsection_list:
                                if sub["name"] == subsection_name:
                                    sub["chunks"].append(chunk)  # 仅存储 chunk 内容
                except Exception as e:
                    print(f"Chunk processing failed: {e}")

        return subsection_list  # 返回最终的映射关系
    
    def __map_single_chunk(self, subsection_list, chunk):
        """
        处理单个 chunk，调用 LLM 进行映射，返回 {subsection: summary} 的 JSON 结构
        """
        outline_text = "\n".join([f"{sub['name']}: {sub['description']}" for sub in subsection_list])
        prompt = self.PE.format(
            self.chunks_to_outline_template,
            outline=outline_text,  # 传递完整的 outline
            chunk=chunk
        )
        response = self.LLM.chat(
            prompt=prompt,
            model=self.model,
        )
        mapped_chunks = self.PE.parse_to_json(response)
        return mapped_chunks

    #提取subsection的name与description
    def extract_subsections(self, data):
        #解析 outline，提取 subsection 列表
        subsections_list = []
        for section in data.get("sections", []):  # 遍历 sections
            for subsection in section.get("subsections", []):  # 遍历 subsections
                subsections_list.append({
                    "name": subsection["name"],
                    "description": subsection["description"]
                })
        return subsections_list
    


if __name__=="__main__":
    model = "gpt-4o-mini"
    # model = "glm-4-airx"
    GO_agent=GenerateOutlineAgent(model = model)
    # topic = "text2sql是什么？"
    # topic = "什么是损失函数"
    topic = "多模态大模型的技术发展路线是什么样的？"
    print(">> 检索...")
    # chunk_list=api_tool.search_papers(topic)
    
    from query_rewriting import QueryRewritingAgent
    query_rewriting_agent = QueryRewritingAgent(model=model)
    query_list = query_rewriting_agent.rewrite(
        topic=topic,
        num_queries=32,
    )
    query_rewriting_agent.PE.print_json(query_list)
    
    
    chunk_list = query_rewriting_agent.retrieve(
        query_list=query_list,
        max_return_size=256
    )
    
    print(f"chunk_list: (共 {len(chunk_list)} 个结果)\n", json.dumps([
                                                                        {"id": ele["id"], "title": ele["entity"]["paper_title"]} for ele in chunk_list
                                                                    ], indent=2, ensure_ascii=False)[:4096] + "\n\n...\n")
    print("-"*100)
    print(">> MultiChunkConclusionAgent...")
    multi_agent = MultiChunkConclusionAgent(model=model, DEBUG=True, max_retries=1, timeout=10)
    chunk_list = multi_agent.conclude(
        topic=topic,
        chunk_list=chunk_list,
        return_type="chunk",
        return_relevant=True
    )
    
    simplified_chunk_list = [
        {"id": ele["id"], "总结": ele["总结"]} for ele in chunk_list if "总结" in ele
    ]
    print(f"simplified_chunk_list: (共 {len(simplified_chunk_list)} 个结果)\n", json.dumps(simplified_chunk_list, indent=2, ensure_ascii=False)[:256] + "\n\n...\n")
    # outline=GO_agent.generate_outline(topic, simplified_chunk_list)
    # subsection_list=GO_agent.map_chunks_to_outline(simplified_chunk_list, outline)
    # GO_agent.PE.print_json(subsection_list)
    
    rough_outline = GO_agent.generate_rough_outline(topic, simplified_chunk_list)
    print(rough_outline)
    
    for current_section in rough_outline["sections"]:
        section_topic = topic+" "+current_section["name"]+" "+current_section["description"]
        query_rewriting_agent = QueryRewritingAgent(model=model)
        query_list = query_rewriting_agent.rewrite(
            topic=section_topic,
            num_queries=32,
        )
    
        chunk_list = query_rewriting_agent.retrieve(
            query_list=query_list,
            max_return_size=256
        )
        multi_agent = MultiChunkConclusionAgent(model=model, DEBUG=True, max_retries=1, timeout=10)
        chunk_list = multi_agent.conclude(
            topic=section_topic,
            chunk_list=chunk_list,
            return_type="chunk",
            return_relevant=True
        )
        
        simplified_chunk_list = [
            {"id": ele["id"], "总结": ele["总结"]} for ele in chunk_list if "总结" in ele
        ]
        secondary_outline = GO_agent.generate_secondary_outline(topic, current_section, simplified_chunk_list, rough_outline)
