# -*- coding: utf-8 -*-
import asyncio
from utils import *
from globalvariables import *
from Agents.agent import Agent
from outline import Outline
from Agents.extractionagent import ExtractionAgent
from Agents.generationagent import GenerationAgent
from Agents.retrievalagent import RetrievalAgent
import time
import asyncio


class MasterAgent(Agent):
    def __init__(self, user_input: str, token: str):
        super().__init__(token)
        # 保存用户输入
        self.user_input = user_input
        # 创建3个智能体
        self.ext_agent = ExtractionAgent(token)
        self.ret_agent = RetrievalAgent(token)
        self.gen_agent = GenerationAgent(token)
        # 创建三类大模型的信号量
        semaphore_glm4plus = asyncio.Semaphore(65)
        semaphore_glm4flash = asyncio.Semaphore(150)
        semaphore_embedding3 = asyncio.Semaphore(45)
        self.semaphores = {"glm4plus": semaphore_glm4plus, "glm4flash": semaphore_glm4flash,
                           "embedding3": semaphore_embedding3}
        # 其他重要信息初始化
        self.ref_bank_max = 60
        self.keyinfo_ext_ratio = 3
        self.survey_lang = None
        self.survey_type = None
        self.entities = []
        self.topic = None
        self.paper_ids_set = set()
        self.paper_titles_set4outline = set()
        self.paper_titles_set_un_chosen4outline = set()
        self.signals = {}
        self.chunks4outline = []
        self.chunks_un_chosen4outline = {}
        self.papers_text_for_outline = []
        self.reference_chunks_c = []
        self.chunks_keyinfo = []
        self.chunks_keyinfo_emb = []
        self.outline = None
        self.title = ''
        self.claims = []
        self.outline_des = ""
        self.final_content = ""

    async def extract_basic_info(self):
        start = time.time()
        task_1 = asyncio.create_task(self.ext_agent.ExtractLanguage(self.user_input))
        task_2 = asyncio.create_task(self.ext_agent.ExtractType(self.user_input))
        # 提取用户期望的语种和综述类型
        self.survey_lang, self.survey_type = await asyncio.gather(task_1, task_2)
        self.topic = self.ext_agent.ExtractTopic(self.user_input, self.survey_lang, self.survey_type)
        end = time.time()
        print(f"提取综述文献需求的基本信息所花时间：{end - start}")
        print(
            f"survey_lang: {self.survey_lang}\nsurvey_type: {survey_type_int2str[self.survey_lang][self.survey_type]}\ntopic: {self.topic}")

    async def PrepareSurveyMaterials(self):
        # 进行初次检索
        await self.FirstReferenceRetrieval()
        # 开启两个并行的任务，分别是提取初检参考文献的关键信息并生成提纲；以及进行二次检索并提取关键信息
        task_1 = asyncio.create_task(self.KeyinfoExtractionAndOutlineGeneration())
        task_2 = asyncio.create_task(self.SecondRetrivalAndKeyinfoExtraction())
        await asyncio.gather(task_1, task_2)
        print("牛逼")

    async def WriteSurveyDraft(self):
        def getClaimStr(section):
            claims_str = ''
            ref_1 = section.data['reference_1']
            ref_2 = section.data['reference_2']
            ref = ref_1 + ref_2
            if len(ref) > 20:
                ref = random.sample(ref, 20)
            for index, ref_keyinfo in enumerate(ref):
                claims_str = claims_str + f"""[{index+1}] {ref_keyinfo["entity_category"]}-{ref_keyinfo["entity_name"]}: {ref_keyinfo["descriptive_linkage"]}[{index+1}]\n"""
            return claims_str, ref

        async def process_leaf(semaphore, outline, identifier, topic):
            async with semaphore:
                await gen_leaf_content(outline, identifier, topic)
        async def gen_leaf_content(outline, identifier, topic):
            section_leaf = outline.get_node(identifier)
            # 先写个核心描述
            generate_description_input = generate_description_prompts[self.survey_lang] % (
                outline.showSurveyOutline(),
                topic,
                section_leaf.data['title']
            )
            try:
                # 利用大模型生成核心主旨内容
                response_text_des = await self.chat_async(
                    generate_description_input,
                    response_format_type="text",
                    language=self.survey_lang,
                    do_sample=False
                )
                leaf_section_1 = outline.get_node(section_leaf.identifier)
                leaf_section_1.data['description'] = response_text_des
            except Exception as e:
                print(f"Error processing leaf {section_leaf.identifier}: {e}")

            # 获取当前章节的声明块
            claims_str, listed_ref_info = getClaimStr(section_leaf)
            section_leaf.data["listed_ref"] = listed_ref_info
            # 生成大模型输入
            generate_draft_input = generate_draft_prompts[self.survey_lang] % (
                topic,
                outline.showSurveyOutline(format=1),
                f"- {section_leaf.tag} {section_leaf.data['title']}",
                section_leaf.data['description'],
                claims_str
            )
            response_text = ''  # 这句有用，别删
            try:
                # 利用大模型生成内容并写入对应子章节
                response_text = await self.chat_async(
                    generate_draft_input,
                    response_format_type="text",
                    language=self.survey_lang,
                    do_sample=False,
                    max_tokens=4095
                )
                leaf_section_2 = outline.get_node(section_leaf.identifier)
                leaf_section_2.data['content'] = response_text
            except Exception as e:
                print(f"Error processing leaf {section_leaf.identifier}: {e}")
            if type(response_text) == str:
                used_ref_nums = extract_sup_numbers(response_text)
                used_ref_info = {}
                for indexInText in used_ref_nums:
                    if indexInText-1<len(listed_ref_info) and indexInText-1>=0:
                        used_ref_info[str(indexInText)] = listed_ref_info[indexInText - 1]
                section_leaf.data["used_ref"] = used_ref_info

        async def process_2rd_leaf(semaphore, outline, identifier, topic):
            async with semaphore:
                await gen_leaf_2rd_content(outline, identifier, topic)

        async def gen_leaf_2rd_content(outline, identifier, topic):
            section_2rd_leaf = outline.get_node(identifier)
            # 获取其子章节的关键信息
            for child in self.outline.get_children(identifier):
                for ref_keyinfo_of_child in child.data['reference_1']:
                    section_2rd_leaf.data['reference_1'].append(ref_keyinfo_of_child)
                for ref_keyinfo_of_child in child.data['reference_2']:
                    section_2rd_leaf.data['reference_2'].append(ref_keyinfo_of_child)
            # 获取其子章节内容
            children_section_content = outline.getSurveyContent(section_2rd_leaf.identifier)
            # 获取章节声明
            claims_str, listed_ref_info = getClaimStr(section_2rd_leaf)
            section_2rd_leaf.data["listed_ref"] = listed_ref_info
            # 生成大模型输入
            generate_2rd_draft_input = generate_2rd_draft_prompts[self.survey_lang] % (
                section_2rd_leaf.data['title'],
                children_section_content,
                topic,
                outline.showSurveyOutline(format=1),
                claims_str,
                f"- {section_2rd_leaf.tag} {section_2rd_leaf.data['title']}",
                topic,
                section_2rd_leaf.data['title']
            )
            response_text = ''
            try:
                # 利用大模型生成内容并写入对应子章节
                response_text = await self.chat_async(
                    generate_2rd_draft_input,
                    response_format_type="text",
                    language=self.survey_lang,
                    do_sample=False,
                    max_tokens=4095
                )
                leaf_2rd_section = outline.get_node(section_2rd_leaf.identifier)
                leaf_2rd_section.data['content'] = response_text
            except Exception as e:
                print(f"Error processing 2nd-level leaf {section_2rd_leaf.identifier}: {e}")
            if type(response_text) == str:
                used_ref_nums = extract_sup_numbers(response_text)
                used_ref_info = {f"{indexInText}": listed_ref_info[indexInText - 1] for indexInText in used_ref_nums if (indexInText-1<len(listed_ref_info) and indexInText-1>=0)}
                section_2rd_leaf.data["used_ref"] = used_ref_info
        async def process_section(semaphore, section, topic):
            async with semaphore:
                await gen_section_content(section, topic)

        async def gen_section_content(section, topic):
            # 获取特定章节的子树
            subtree = self.outline.subtree(section.identifier)
            # 遍历所有内容为空的子章节
            for blank_node in subtree.all_nodes_itr():
                if isinstance(blank_node.data, dict) and 'content' in blank_node.data and blank_node.data[
                    'content'] is not None and blank_node.data['content'] == "":
                    # 处理内容为空字符串的情况
                    blank_node_content = self.outline.getSurveyContent(blank_node.identifier)
                    # 生成大模型输入
                    completion_draft_input = completion_draft_prompts[self.survey_lang] % (
                        f"- {blank_node.tag} {blank_node.data['title']}",
                        blank_node_content,
                        topic,
                        self.outline.showSurveyOutline(format=1),
                        topic,
                        f"- {blank_node.tag} {blank_node.data['title']}"
                    )
                    try:
                        # 利用大模型生成内容并写入对应子章节
                        response_text = await self.chat_async(
                            completion_draft_input,
                            response_format_type="text",
                            language=self.survey_lang,
                            do_sample=False,
                            max_tokens=4095
                        )
                        node_blank = self.outline.get_node(blank_node.identifier)
                        node_blank.data['content'] = response_text
                    except Exception as e:
                        print(f"Error processing leaf {blank_node.identifier}: {e}")
        start = time.time()
        print("撰写叶子节点内容")
        # 获取所有叶子节点
        leaves = self.outline.getSubTreeLeaves()
        # 对于可并行生成的叶子章节建立章节内容生成任务

        tasks_1 = [process_leaf(self.semaphores["glm4plus"], self.outline, leaf.identifier, self.topic) for leaf in leaves]
        nested_tasks_1 = []
        for i in range(0, len(tasks_1), 65):
            nested_tasks_1.append(tasks_1[i:i + 65])
        for batch in nested_tasks_1:
            await asyncio.gather(*batch)
        end_1 = time.time()
        print(f"撰写叶子节点内容耗时{end_1-start}s")
        print("撰写倒数第二级叶子节点内容")

        # 获取所有倒数第二级的节点
        section_2rd_leaves = []
        for section_leaf in leaves:
            parent_section = self.outline.parent(section_leaf.identifier)
            # 若该节点不是根节点且没有被收录至section_2rd_leaves列表中，则添加入该列表
            if parent_section.identifier != 'root' and parent_section not in section_2rd_leaves:
                section_2rd_leaves.append(parent_section)
        # 对于可并行生成的倒数第二级章节建立章节内容生成任务
        tasks_2 = [process_2rd_leaf(self.semaphores["glm4plus"], self.outline, leaf_2rd.identifier, self.topic) for leaf_2rd in section_2rd_leaves]
        # 并发执行内容生成任务
        await asyncio.gather(*tasks_2)
        end_2 = time.time()
        print(f"撰写倒数第二级叶子节点内容耗时{end_2-end_1}s")
        print("撰写剩余章节内容")
        # 获取所有章节
        sections = self.outline.get_children(self.outline.root)
        # 对于可并行生成的章节建立章节内容生成任务
        tasks_3 = [process_section(self.semaphores["glm4plus"], section, self.topic) for section in sections]
        # 并发执行内容生成任务
        await asyncio.gather(*tasks_3)
        end_3 = time.time()
        print(f"撰写剩余章节内容耗时{end_3-end_2}s")
        print("章节草案撰写完毕")
    async def IntegrateSurveyContent(self):
        start_time = time.time()
        print("开始生成综述摘要及总结")
        # 补全所有节点内容，比如摘要，结论及未来展望
        result = await self.gen_agent.GenerateTitleAbaCon(outline=self.outline, topic=self.topic,
                                                          survey_lang=self.survey_lang, survey_type=self.survey_type)
        # 返回的result是一个字典,要将字典中的title，abstract，conclusions存储到outline中
        print("存储综述摘要及总结")
        Increasetitle2outline(outline=self.outline, Title_abstract_conclusions_dict=result)
        end_time = time.time()
        print(f"生成综述摘要及总结花费时间：{end_time - start_time}s")
        print("开始生成综述参考文献列表")
        self.final_content = self.gen_agent.GenerateRefList(outline=self.outline)
        print("完成综述生成")



        # print(f"结果已成功保存到 review_test.md 文件，topic 为: {topic}")
        # print("开始优化引用数量")
        # start_time = time.time()
        # await self.gen_agent.process_md('review_test.md', self.outline, 'review_test.md')
        # print("完成优化引用数量")
        # end_time = time.time()
        # print("完成优化引用数量花费时间：", end_time - start_time)

        return self.final_content

    async def FirstReferenceRetrieval(self):
        # 参考文献初次检索
        start = time.time()
        # 生成主题query、关键词query
        print("开始生成检索查询")
        topic_query_1, topic_query_2, kw_queries = await self.gen_agent.GenerateQueries(topic=self.topic,
                                                                                        survey_lang=self.survey_lang)
        end_1 = time.time()
        print(f"生成检索查询耗时：{end_1 - start}s")
        # 根据主题query和关键词query在文献库中进行初次检索
        print("开始检索文献")
        # todo 最好在这里也把信号量放进去，毕竟里面有基于摘要的论文检索结果蒸馏，暂时不需要，蒸馏的论文数不会超过50篇(两个50集合的交集)
        self.paper_titles_set4outline, self.paper_titles_set_un_chosen4outline = await self.ret_agent.FirRetrieval(
            topic_query_1, topic_query_2, kw_queries)
        end_2 = time.time()
        print(f"初次检索文献数量：{len(self.paper_titles_set4outline) + len(self.paper_titles_set_un_chosen4outline)}")
        print(f"初次检索所花时间：{end_2 - end_1}s")

    async def SecondReferenceRetrieval(self):
        """
        :return: 二次检索论文标题列表
        """
        # 参考文献二次检索
        start = time.time()
        # 确定二次检索需要检索的论文数量，确定原则如下：
        ## 1. 文献候选集不超过75篇
        ## 2. 二次检索论文数量不超过初次检索数量的6倍
        ## 3. 文献候选集不低于56篇
        boundary_num = int(self.ref_bank_max / (self.keyinfo_ext_ratio + 1) + 0.5)  # 计算临界值，进行四舍五入
        first_ret_num = len(self.paper_titles_set4outline) + len(self.paper_titles_set_un_chosen4outline)  # 初次检索的论文数量
        if first_ret_num >= self.ref_bank_max:  # 如果初次检索的论文数量大于等于75篇，则在初次检索论文的没用于提纲生成的论文中取样75-25=50篇
            self.paper_titles_set_2 = random.sample(list(self.paper_titles_set_un_chosen4outline),
                                                    self.ref_bank_max - self.ret_agent.num_paper_max)
        elif first_ret_num >= boundary_num:  # 如果初次检索的论文数量大于等于临界值，则进行二次检索，补足75篇论文
            sec_ret_target_num = self.ref_bank_max - first_ret_num
            self.paper_titles_set_2 = await self.ret_agent.SecRetrieval(sec_ret_target_num,
                                                                        self.paper_titles_set4outline,
                                                                        self.paper_titles_set_un_chosen4outline,
                                                                        self.semaphores)
        elif first_ret_num >= self.ret_agent.num_paper_threshold:  # 当初次检索的文献数量大于生成大纲时候的最小论文限制且前两个条件都不满足的前提下，进行本次检索
            sec_ret_target_num = self.keyinfo_ext_ratio * first_ret_num
            self.paper_titles_set_2 = await self.ret_agent.SecRetrieval(sec_ret_target_num,
                                                                        self.paper_titles_set4outline,
                                                                        self.paper_titles_set_un_chosen4outline,
                                                                        self.semaphores)
        else:
            sec_ret_target_num = (self.keyinfo_ext_ratio+1) * self.ret_agent.num_paper_threshold - first_ret_num
            self.paper_titles_set_2 = await self.ret_agent.SecRetrieval(sec_ret_target_num,
                                                                        self.paper_titles_set4outline,
                                                                        self.paper_titles_set_un_chosen4outline,
                                                                        self.semaphores)
            papers_list = list(self.paper_titles_set_2)
            needed_papers = self.ret_agent.num_paper_threshold - len(self.paper_titles_set4outline)
            papers_to_add = papers_list[:needed_papers]
            self.paper_titles_set4outline.union(set(papers_to_add))
            self.paper_titles_set_2 = self.paper_titles_set_2 - set(papers_to_add)
            self.signals["Second_Retrieval_finished"] = True
        end_1 = time.time()
        # 将二次检索后的文献加入到self.paper_titles_set_un_chosen4outline中

        print(f"二次检索所花时间：{end_1 - start}s")

    async def KeyinfoExtractionAndOutlineGeneration(self):
        """
        :param chunks:
        :return O:
        基于文献chunks，综述类型和关键信息，生成综述提纲
        """
        # 当文章不够的时候就等待，直到self.paper_titles_set4outline的数量满足开始生成大纲的最低数量要求
        if len(self.paper_titles_set4outline) < self.ret_agent.num_paper_threshold:
            # 每个循环都会等待0.1秒，所以当文献数量不够，没有补充完成的话，就会一直在这里等待到signals信号量指示完成补充
            while not self.signals.get("Second_Retrieval_finished"):
                await asyncio.sleep(0.1)
        start = time.time()
        print("开始获取初次检索文献chunk")
        # 获取文献chunk的内容
        self.chunks4outline = await self.ret_agent.GetPaperContent_by_title(
            paper_titles_set=self.paper_titles_set4outline)
        end_1 = time.time()
        print(f"获取文献内容所花时间：{end_1 - start}s")

        print("开始提取参考文献关键信息")
        # 为提纲生成准备的参考文献chunk的关键信息提取
        self.chunks_keyinfo = await self.ext_agent.ExtractKeyInfo(chunks=self.chunks4outline,
                                                                  survey_type=self.survey_type,
                                                                  survey_lang=self.survey_lang,
                                                                  survey_topic=self.topic,
                                                                  semaphore=self.semaphores["glm4plus"],
                                                                  model_code="glm-4-plus")

        end_2 = time.time()
        print(f"提取为提纲生成准备的参考文献关键信息所花时间：{end_2 - end_1}s")
        print('\n' * 2)

        task_1 = asyncio.create_task(self.GenerateOutline(chunks_keyinfo=self.chunks_keyinfo,
                                                          survey_topic=self.topic,
                                                          survey_lang=self.survey_lang,
                                                          survey_type=self.survey_type,
                                                          semaphore=self.semaphores["glm4plus"]))
        task_2 = asyncio.create_task(self.EncodeKeyinfo1AndAllocateRef(chunks_keyinfo=self.chunks_keyinfo,
                                                                       token=self.token))
        await asyncio.gather(task_1, task_2)

    async def SecondRetrivalAndKeyinfoExtraction(self):
        async def judge_relevance_by_short_text(leaf_title, entity, semaphore):
            Relevance = False
            async with semaphore:
                judge_rele_input = judge_relevance_by_short_text_prompt % (entity.get("entity_name", "unknown"),
                                                                           entity.get("descriptive_linkage", "unknown"),
                                                                           leaf_title)
                resp = await self.chat_async(user_input=judge_rele_input,
                                             response_format_type="text",
                                             language="English", do_sample=False,
                                             model_code="glm-4-flash")
                if resp:
                    resp = resp.lower()

                if 'yes' in resp or 'true' in resp or '是' in resp:
                    Relevance = True
                return Relevance

        async def judge_relevance4leaf(leaf_title, top_k, semaphore):
            tasks = [judge_relevance_by_short_text(leaf_title=leaf_title,
                                                   entity=top_k[i][1],
                                                   semaphore=semaphore) for i in range(len(top_k))]
            bool_relevance = await asyncio.gather(*tasks)
            return bool_relevance

        await self.SecondReferenceRetrieval()
        # 获取文献chunk的内容
        self.chunks_set_2 = await self.ret_agent.GetPaperContent_by_title(
            paper_titles_set=self.paper_titles_set_2)
        print('chunks_un_chosen4outlinet提取任务开始')
        self.chunks_keyinfo_2 = await self.ext_agent.ExtractKeyInfo(chunks=self.chunks_set_2,
                                                                    survey_type=self.survey_type,
                                                                    survey_lang=self.survey_lang,
                                                                    survey_topic=self.topic,
                                                                    semaphore=self.semaphores["glm4flash"],
                                                                    model_code="glm-4-flash")
        print("\n")
        while not self.signals.get("outline_is_constructed", False):
            await asyncio.sleep(0.2)
        await asyncio.sleep(0.1)
        # 将编码得到的关键信息分配到大纲的各个叶子节点。
        outline_leaf_nodes = self.outline.getSubTreeLeaves()
        keyinfo_entities = []
        # 展平keyinfo，通过相似度计算为每个叶子章节寻找对应参考关键信息
        for chunk in self.chunks_keyinfo_2:
            keyinfo_entities.extend(chunk)
        top_k4leaves = []
        for leaf in outline_leaf_nodes:
            title_emb = np.array(leaf.data['title_emb'])
            keyinfos_with_similarity = []
            for keyinfo_entity in keyinfo_entities:
                entity_emb = np.array(keyinfo_entity.get('entity_name_emb', [1e-6] * 2048))
                similarity = cosine_similarity(title_emb, entity_emb)
                # 带有相似度的关键信息元组
                keyinfos_with_similarity.append((similarity, keyinfo_entity))
            # 获取相似度最高的k个
            top_k4leaves.append(sorted(keyinfos_with_similarity, key=lambda x: x[0], reverse=True)[:20])
        tasks = [judge_relevance4leaf(leaf.data["title"], top_k4leaves[index], self.semaphores["glm4flash"]) for
                 index, leaf in enumerate(outline_leaf_nodes)]
        bool_relevance_list = await asyncio.gather(*tasks)
        for index, leaf in enumerate(outline_leaf_nodes):
            leaf_references = []
            for jndex, bool_relevance in enumerate(bool_relevance_list[index]):
                if bool_relevance:
                    leaf_references.append(top_k4leaves[index][jndex][1])
            # 将参考信息赋给叶子节点
            leaf.data["reference_2"] = leaf_references

    async def GenerateOutline(self, chunks_keyinfo, survey_topic, survey_lang, survey_type, semaphore):
        # 利用关键信息基于大模型生成多份提纲文本，生成带标题和引用的提纲文本
        start_time = time.time()
        outlines_text = await self.gen_agent.GenerateOutlinesText(chunks_keyinfo=chunks_keyinfo,
                                                                  survey_topic=survey_topic,
                                                                  survey_lang=survey_lang,
                                                                  survey_type=survey_type)
        end = time.time()
        print(f"大模型生成多份初步提纲所花时间：{end - start_time}s")
        outline_text = self.gen_agent.MergeOutlinesText(outlines_text=outlines_text,
                                                        survey_lang=survey_lang,
                                                        survey_type=survey_type,
                                                        survey_topic=survey_topic)
        end_1 = time.time()
        print(f"大模型生成多份提纲并合并所花时间：{end_1 - end}s")
        print(outline_text)
        print('\n' * 2)
        # 将提纲解析为树结构
        print("开始解析提纲")
        self.outline = parseText2Outline(outline_text=outline_text)
        print("开始编码提纲")
        await self.EncodeLeaves(token=self.token, semaphore=self.semaphores["embedding3"])
        # 提纲构造完成
        print("提纲构造完成")
        self.signals["outline_is_constructed"] = True

        while not self.signals["encode_finish_1"]:
            await asyncio.sleep(0.2)
            print("等待初次检索关键信息编码完成")
        await asyncio.sleep(0.2)

    async def EncodeKeyinfo1AndAllocateRef(self, chunks_keyinfo, token):
        async def judge_relevance_by_short_text(leaf_title, entity, semaphore):
            Relevance = False
            async with semaphore:
                judge_rele_input = judge_relevance_by_short_text_prompt % (entity.get("entity_name", "unknown"),
                                                                           entity.get("descriptive_linkage", "unknown"),
                                                                           leaf_title)
                resp = await self.chat_async(user_input=judge_rele_input,
                                             response_format_type="text",
                                             language="English", do_sample=False,
                                             model_code="glm-4-flash")
                if resp:
                    resp = resp.lower()
                    if 'yes' in resp or 'true' in resp or '是' in resp:
                        Relevance = True
                return Relevance

        async def judge_relevance4leaf(leaf_title, top_k, semaphore):
            tasks = [judge_relevance_by_short_text(leaf_title=leaf_title,
                                                   entity=top_k[i][1],
                                                   semaphore=semaphore) for i in range(len(top_k))]
            bool_relevance = await asyncio.gather(*tasks)
            return bool_relevance

        group_size = 64
        # 存储原始列表的结构
        structure = []
        keyinfo_flat_list_1 = []
        # 遍历嵌套列表，得到一个铺平的keyinfo_flat_list
        for sub_list in chunks_keyinfo:
            sub_list_length = len(sub_list)
            structure.append(sub_list_length)
            for item in sub_list:
                # 对字符串进行操作
                keyinfo_flat_list_1.append(item.get("entity_name", "unknown"))
                # keyinfo_flat_list_2.append(item.get("entity_name", "unknown") + ": " + item.get("descriptive_linkage", "unknown"))
        # 对铺平的列表进行编码
        entity_name_emb = await get_embeddings_for_list(input_list=keyinfo_flat_list_1,
                                                        semaphore=self.semaphores["embedding3"],
                                                        token=token)
        emb_nested_list = []
        index = 0
        for length in structure:
            sub_list = entity_name_emb[index:index + length]
            emb_nested_list.append(sub_list)
            index += length
        # 将emb列表信息加入原嵌套列表(chunks_keyinfo)
        for i, sub_list in enumerate(chunks_keyinfo):
            sub_list_length = len(sub_list)
            structure.append(sub_list_length)
            for j, item in enumerate(sub_list):
                chunks_keyinfo[i][j]["entity_name_emb"] = emb_nested_list[i][j]

        # 传递信号，表示编码完成
        self.signals["encode_finish_1"] = True

        # 等待大纲构建完成的信号
        while not self.signals.get("outline_is_constructed", False):
            await asyncio.sleep(0.2)
        await asyncio.sleep(0.1)
        # 将编码得到的关键信息分配到大纲的各个叶子节点。
        outline_leaf_nodes = self.outline.getSubTreeLeaves()
        keyinfo_entities = []
        # 展平keyinfo，通过相似度计算为每个叶子章节寻找对应参考关键信息
        for chunk in chunks_keyinfo:
            keyinfo_entities.extend(chunk)
        top_k4leaves = []
        for leaf in outline_leaf_nodes:
            title_emb = np.array(leaf.data['title_emb'])
            keyinfos_with_similarity = []
            for keyinfo_entity in keyinfo_entities:
                entity_emb = np.array(keyinfo_entity.get('entity_name_emb', [1e-6] * 2048))
                similarity = cosine_similarity(title_emb, entity_emb)
                # 带有相似度的关键信息元组
                keyinfos_with_similarity.append((similarity, keyinfo_entity))
            # 获取相似度最高的k个
            top_k4leaves.append(sorted(keyinfos_with_similarity, key=lambda x: x[0], reverse=True)[:20])
        tasks = [judge_relevance4leaf(leaf.data["title"], top_k4leaves[index], self.semaphores["glm4flash"]) for
                 index, leaf in enumerate(outline_leaf_nodes)]
        bool_relevance_list = await asyncio.gather(*tasks)
        for index, leaf in enumerate(outline_leaf_nodes):
            leaf_references = []
            for jndex, bool_relevance in enumerate(bool_relevance_list[index]):
                if bool_relevance:
                    leaf_references.append(top_k4leaves[index][jndex][1])
            # 将参考信息赋给叶子节点
            leaf.data["reference_1"] = leaf_references

    async def EncodeLeaves(self, token, semaphore):
        leaves = self.outline.getSubTreeLeaves()
        strings = [leaf.data["title"] for leaf in leaves]
        leaf_emb = await get_embeddings_for_list(input_list=strings, semaphore=semaphore, token=token)
        for index, leaf in enumerate(leaves):
            identifier = leaf.identifier
            tmp_node = self.outline.get_node(identifier)
            tmp_node.data["title_emb"] = leaf_emb[index]

    async def SectionDraft(self):
        # 生成除了摘要、总结和参考文献列表以外的的节点内容
        print("开始生成章节草案")
        await self.gen_agent.GenerateSectionDraft(outline=self.outline,
                                                  topic=self.topic,
                                                  survey_lang=self.survey_lang,
                                                  reference_chunks_c=self.reference_chunks_c)


    async def main(self):
        start = time.time()
        end_1 = time.time()
        # 提取用户需求的基本信息
        print("开始提取用户需求")
        await self.extract_basic_info()
        end_2 = time.time()
        print(f"提取用户需求结束，耗时{end_2 - end_1}s")
        print(f"最初至此总耗时: {end_2 - start}s")
        # 综述撰写的前期资料准备
        print("开始准备综述撰写资料")
        await self.PrepareSurveyMaterials()
        print("准备综述撰写资料结束")
        end_3 = time.time()
        print(f"准备综述撰写资料结束，耗时{end_3 - end_2}s")
        print(f"最初至此总耗时: {end_3 - start}s")
        print("开始撰写综述")
        await self.WriteSurveyDraft()
        end_4 = time.time()
        print(f"撰写综述草案结束，耗时{end_4 - end_3}s")
        print(f"最初至此总耗时: {end_4 - start}s")
        await self.IntegrateSurveyContent()
        end_5 = time.time()
        print(f"撰写综述结束，耗时{end_5 - end_4}s")
        print(f"最初至此总耗时: {end_5 - start}s")
        return self.final_content
if __name__ == '__main__':
    master = MasterAgent(user_input="Please help me introduce the development history of multimodal large models.", token="efedb300c8468ae1315a5474228100f6.cZbs8qwDylqU2GzD")
    loop = asyncio.get_event_loop()
    final_content = loop.run_until_complete(master.main())