import os
import re
import threading
import numpy as np
from tqdm import trange,tqdm
from src.model import APIModel
import time
from src.utils import tokenCounter
import copy
import json
import pickle
from src.prompt import SUBSECTION_WRITING_PROMPT, LCE_PROMPT, CHECK_CITATION_PROMPT, CONCLUSION_PROMPT,CHECK_INCOMPLETE_PROMPT,SECTION_3_PROMPT, SECTION_3_TABLE_PROMPT,SECTION_3_SUMMARY_PROMPT
from chatglm_api import *
from bs4 import BeautifulSoup
import concurrent.futures
import copy

import re

def extract_chapter_sections_regex(raw_survey, parsed_outline, chapter_num=3):
    # 确定目标章节标题（例如 "3 Methods"）
    chapter_title = next((s for s in parsed_outline["sections"] if s.startswith(f"{chapter_num} ")), None)
    if not chapter_title:
        return {}

    # 匹配整个章节内容的正则表达式
    chapter_pattern = re.compile(
        rf"^## {re.escape(chapter_title)}$\s*(.*?)(?=^## |\Z)", 
        flags=re.DOTALL | re.MULTILINE
    )
    chapter_match = chapter_pattern.search(raw_survey)
    if not chapter_match:
        return {}
    chapter_content = chapter_match.group(1).strip()

    # 提取所有小节标题和内容的正则表达式
    subsection_regex = re.compile(
        r"^### (3\.\d+.*?)$\s*(.*?)(?=^### |^## |\Z)", 
        flags=re.DOTALL | re.MULTILINE
    )
    subsections = subsection_regex.findall(chapter_content)

    # 转换为字典并保留原始换行格式
    return {
        title.strip(): content.rstrip("\n")  # 保留段落间换行，仅去除尾部空行
        for title, content in subsections
    }



def filter_str(topic):
    # 定义正则表达式，匹配所有非字母、数字、下划线和短横线的字符
    pattern = r'[^a-zA-Z0-9_-]'
    # 将不符合的字符替换为下划线
    filtered_topic = re.sub(pattern, '_', topic)
    return filtered_topic

def transform_outline_for_section1(parsed_outline):
    # 处理 sections
    sections = {}
    for i, section in enumerate(parsed_outline['sections'], start=1):
        sections[str(i)] = f"{i}. {section.split(' ', 1)[1]}"
    
    # 添加额外的 Conclusion 章节
    sections[str(len(sections) + 1)] = f"{len(sections) + 1}. Conclusion"
    
    # 处理 subsections
    subsections = {}
    for i, subsection_list in enumerate(parsed_outline['subsections'], start=1):
        if i in [3,4]: 
            for j, subsection in enumerate(subsection_list, start=1):
                subsection_key = f"{i}.{j}"
                subsection_value = (str(i), f"{i}.{j} {subsection.split(' ', 1)[1]}")
                subsections[subsection_key] = subsection_value
    
    return sections, subsections

def remove_duplicates(lst):
    seen = set()  # 用于记录已经出现过的字符串
    result = []   # 用于存储去重后的结果
    for item in lst:
        if item not in seen:  # 如果当前字符串没有出现过
            seen.add(item)    # 将其加入已出现的集合
            result.append(item)  # 并将其加入结果列表
    return result

def transform_journal_name(db_name: str) -> str:
    # 去掉文件名后缀
    db_name = db_name.replace("_with_whole_text.db", "")
    
    # 处理 Journal 类别，允许可能存在的年份
    journal_match = re.match(r"Journal_Paper_Meta_Data_(.+?)_?([0-9]{4})?$", db_name)
    if journal_match:
        journal_name = journal_match.group(1).replace("_", " ")
        year = journal_match.group(2)
        # return f"{journal_name}, {year}" if year else journal_name
        return f"{journal_name}" if year else journal_name
    
    # 处理 Conference 类别
    conf_match = re.match(r"Conf_Paper_Meta_Data_([A-Za-z]+)[_]?([0-9]{4})", db_name)
    if conf_match:
        # return f"{conf_match.group(1)}, {conf_match.group(2)}"
        return f"{conf_match.group(1)}"
    
    # 兜底，尝试提取前面的部分（避免粘连）
    fallback_match = re.match(r"Conf_Paper_Meta_Data_([A-Za-z0-9]+)", db_name)
    if fallback_match:
        return fallback_match.group(1).replace("_", " ")
    
    return db_name  # 兜底返回原始字符串

def process_citation(citation):
    request_res = query_paper_by_id(citation.split(',')[0], top_k=1)['data']
    if len(request_res) == 0:
        return None
    
    cur_id = citation.split(',')[0]
    details = request_res[0]['original_filename']
    title = request_res[0]['paper_title']

    merge = [title] + [" " + transform_journal_name(details)] + [" " + str(request_res[0]['year'])] + [citation.split(',')[-1]]
    return citation, ','.join(merge)

class subsectionWriter():
    
    def __init__(self, model:str, api_key:str, api_url:str,  database) -> None:
        
        self.model, self.api_key, self.api_url = model, api_key, api_url
        self.api_model = APIModel(self.model, self.api_key, self.api_url)
        self.api_model_air = APIModel("glm-4-air", self.api_key, self.api_url)
        self.db = database
        self.token_counter = tokenCounter()
        self.input_token_usage, self.output_token_usage = 0, 0

    def process_subsection(self, index, topic, parsed_outline, outline, rag_num, subsection_len, MAX_TOKENS):
        """
        处理单个 subsection：文献检索 -> 解析 -> 计算模板 -> 截断
        """
        i, j = index
        subsection = parsed_outline['subsections'][i][j]
        description = parsed_outline['subsection_descriptions'][i][j]
        section_name = parsed_outline['sections'][i]

        # **1️⃣ 文献检索**
        references_info = search_papers(query=description, top_k=rag_num)

        # **2️⃣ 解析文献文本**
        paper_texts = ''
        for item in references_info['data']:
            if 'entity' in item and 'paper_id' in item['entity']:
                paper_texts += f'---\n\npaper_title: {item["entity"]["paper_title"]}\n\n'
                paper_texts += f'paper_id: {item["entity"]["paper_id"]}\n\n'
                paper_texts += f'chunk {item["entity"]["chunk_id"]}\n\n'
                paper_texts += f'paper_content:\n\n{item["entity"]["chunk_text"]}\n---\n'

        # **3️⃣ 计算模板**
        fixed_template = self.__generate_prompt(
            SUBSECTION_WRITING_PROMPT,
            paras={
                'OVERALL OUTLINE': outline,
                'SUBSECTION NAME': subsection,
                'DESCRIPTION': description,
                'TOPIC': topic,
                'SECTION NAME': section_name,
                'WORD NUM': str(subsection_len),
                'CITATION NUM': str(8),
            }
        )
        fixed_tokens = len(self.token_counter.encoding.encode(fixed_template))
        remaining_tokens = MAX_TOKENS - fixed_tokens

        # **4️⃣ 截断文本**
        paper_text_str = BeautifulSoup(paper_texts, 'html.parser').get_text()
        paper_tokens = len(self.token_counter.encoding.encode(paper_text_str))

        if paper_tokens > remaining_tokens:
            tokens = self.token_counter.encoding.encode(paper_text_str)[:remaining_tokens]
            truncated_text = self.token_counter.encoding.decode(tokens).rsplit('. ', 1)[0] + '. '
        else:
            truncated_text = paper_text_str

        # **5️⃣ 生成最终 prompt**
        final_prompt = self.__generate_prompt(
            fixed_template,
            paras={
                'PAPER LIST': truncated_text
            }
        )
        return (index, truncated_text, final_prompt)

    def write(self, topic, outline, rag_num = 30, subsection_len = 500, refining = True, reflection=True):
        # from bs4 import BeautifulSoup
        # BeautifulSoup(outline, 'html.parser')
        parsed_outline = self.parse_outline(outline=outline)
        
        input_file = f"./temp/{topic}_Content.pkl"

        try:
            with open(input_file, 'rb') as file:
                section_content = pickle.load(file)
            print(f"Subsection with description loaded from {input_file}")
        except FileNotFoundError:
            print("No subsection content, generate it...")
            section_content = [[] for _ in range(len(parsed_outline['sections']))]
            all_indices = [(i, j) for i in range(len(parsed_outline['sections'])) 
                                    for j in range(len(parsed_outline['subsections'][i]))]

            # **预分配一维列表，保证顺序**
            section_paper_texts_truncate = [None] * len(all_indices)
            all_prompts = [None] * len(all_indices)
            section_indices = [None] * len(all_indices)
            subsection_indices = [None] * len(all_indices)

            MAX_TOKENS = 108000  # 128k token 限制

            # **使用 `ProcessPoolExecutor` 并行执行**
            with concurrent.futures.ProcessPoolExecutor() as executor:
                future_to_index = {
                    executor.submit(self.process_subsection, index, topic, parsed_outline, outline, rag_num, subsection_len, MAX_TOKENS): index
                    for index in all_indices
                }

                for future in concurrent.futures.as_completed(future_to_index):
                    index = future_to_index[future]
                    i, j = index
                    _, truncated_text, final_prompt = future.result()
                    
                    # **按照一维索引顺序存储**
                    pos = all_indices.index(index)
                    section_paper_texts_truncate[pos] = truncated_text
                    all_prompts[pos] = final_prompt  
                    section_indices[pos] = i
                    subsection_indices[pos] = j

            # 3️⃣ 并行调用 batch_chat 生成所有初稿
            all_contents = self.api_model.batch_chat(all_prompts, temperature=0.3,max_threads=20)
            all_contents = [c.replace('<format>', '').replace('</format>', '') for c in all_contents]

            # import ipdb;ipdb.set_trace()
            
            # 4️⃣ 再生成所有引用检查 prompts
            # citation_prompts = []
            # for i, content in enumerate(all_contents):
            #     sec_idx = section_indices[i]
            #     sub_idx = subsection_indices[i]

            #     citation_prompts.append(self.__generate_prompt(CHECK_INCOMPLETE_PROMPT, paras={
            #         'SUBSECTION': content, 
            #         'TOPIC': topic, 
            #     }))
            # time.sleep(0.3)

            # checked_contents = self.api_model.batch_chat(citation_prompts, temperature=0.3,max_threads=15)
            # checked_contents = [c.replace('<format>', '').replace('</format>', '') for c in checked_contents]
            checked_contents = all_contents

            # import ipdb;ipdb.set_trace()
            # 6️⃣ 还原回 section 结构
            for i, content in enumerate(checked_contents):
                sec_idx = section_indices[i]
                section_content[sec_idx].append(content)

            with open(input_file, 'wb') as file:
                pickle.dump(section_content, file)
            
            print(f"Subsection with description generated and saved to {input_file}")
        print("Subsection is completed.")

        sections_dict, subsections_dict = transform_outline_for_section1(parsed_outline)
        from test_draw import create_mindmap
        create_mindmap(parsed_outline["title"],sections_dict,subsections_dict,filter_str(topic)+"_Fig_1",wrap_levels=[0])

        # import ipdb;ipdb.set_trace()
        raw_survey = self.generate_document(parsed_outline, section_content)

        with open(f'{input_file}.md', 'w', encoding='utf-8') as f:
            f.write(raw_survey)
            
        prompt = self.__generate_prompt(CONCLUSION_PROMPT, paras={'OVERALL OUTLINE': outline, 'TOPIC':topic, 'SURVEY':raw_survey})
        print("Writing conclusion...")
        conclusion = self.api_model.chat(prompt, temperature=0.3)
        raw_survey = raw_survey + "\n## 6 Conclusion\n" + conclusion + '\n'

        raw_survey_with_references, raw_references = self.process_references(raw_survey)


        with open(f'{input_file}_final.md', 'w', encoding='utf-8') as f:
            f.write(raw_survey_with_references)

        # ? 插入第一章的插图
        image_markdown = f"![Survey structure]({filter_str(topic)}_Fig_1.png)\n\nFig. 1. The structure of this survey\n\n"
        # 找到要插入图片的位置
        insert_position = raw_survey_with_references.find("## 2 Basic Notation and Related Definitions")
        # 在指定位置插入图片的Markdown语法
        raw_survey_with_references = raw_survey_with_references[:insert_position] + image_markdown + raw_survey_with_references[insert_position:]

        # ? 插入第二章的插图
        chapter_3_sections = extract_chapter_sections_regex(
            raw_survey_with_references, 
            parsed_outline, 
            chapter_num=3
        )
        chapter_3_section_names = list(chapter_3_sections.keys())
        chapter_3_sections_promts = []
        chapter_3_sections_summary_promt = self.__generate_prompt(
            SECTION_3_SUMMARY_PROMPT,
            paras={
                'SUB_TITLE': " ".join(chapter_3_section_names),
                "SUB_DES": " ".join(parsed_outline['subsection_descriptions'][2])
            }
            )
        chapter_3_sections_promts.append(chapter_3_sections_summary_promt)
        for i in range(len(chapter_3_section_names)):
            chapter_3_sections_promt = self.__generate_prompt(
            SECTION_3_PROMPT,
            paras={
                'SUBSECTION_CONTENT': chapter_3_sections[chapter_3_section_names[i]]
            }
            )
            chapter_3_sections_promts.append(chapter_3_sections_promt)
            chapter_3_sections_table_promt = self.__generate_prompt(
            SECTION_3_TABLE_PROMPT,
            paras={
                'SUB_CONTENT': chapter_3_sections[chapter_3_section_names[i]],
                'SUB_NAME':topic,
                'SUB_NAME':chapter_3_section_names[i]
            }
            )
            chapter_3_sections_promts.append(chapter_3_sections_table_promt)

        all_res = self.api_model_air.batch_chat(chapter_3_sections_promts, temperature=0,max_threads=20)
        chapter_3_sections_summary = all_res[0]
        rest_res = all_res[1:]
        chapter_3_sections_summary += " The structure of this section and the methods involved are shown in Fig. 2.\n"
        chapter_3_sections_all_methods = rest_res[::2]
        chapter_3_sections_all_tables = rest_res[1::2]

        # 将 chapter_3_section_names 转化为字典
        section_names_dict = {str(i+1): name for i, name in enumerate(chapter_3_section_names)}
        def process_methods(methods):
            # 处理 <sup>47</sup> 替换为 [47]
            methods = re.sub(r'<sup>(\d+)</sup>', r'[\1]', methods)  # 替换 <sup>47</sup> 为 [47]
            # 删除其他非数字的 <sup> 标签，包括空标签
            methods = re.sub(r'<sup>(?:\D*|)</sup>', '', methods)  # 删除不包含数字的 <sup> 标签，包括空标签
            return methods
        # 将 chapter_3_sections_all_methods 转化为字典
        sections_all_methods_dict = {}
        for i, methods in enumerate(chapter_3_sections_all_methods):
            if methods != 'None':
                section_number = str(i+1)
                section_name = chapter_3_section_names[i]
                processed_methods = process_methods(methods)
                sections_all_methods_dict[section_name.split()[0]] = (section_number, processed_methods)
        
        create_mindmap("Section 3 Method",section_names_dict,sections_all_methods_dict,filter_str(topic)+"_Fig_2",max_length=60,wrap_levels=[2])

         # ? 插入第三章的插图
        image_markdown = f"![Method structure]({filter_str(topic)}_Fig_2.png)\n\nFig. 2 The structure of the Method\n\n"
        # 找到要插入图片的位置
        insert_position = raw_survey_with_references.find("## 3 Methods")
        # 如果找到了 "## 3 Methods"
        if insert_position != -1:
            # 计算 "## 3 Methods" 这一行的结束位置
            insert_position += len("## 3 Methods\n")
            
            # 在指定位置插入图片的Markdown语法
            raw_survey_with_references = (
                raw_survey_with_references[:insert_position] + 
                '\n' + chapter_3_sections_summary + image_markdown + 
                raw_survey_with_references[insert_position:]
            )
        # 遍历每个章节标题
        for section_name, table in zip(chapter_3_section_names, chapter_3_sections_all_tables):
            section_header = f"## {section_name}"
            start_index = raw_survey_with_references.find(section_header)
            table = table.replace("```markdown\n", "").replace("```", "").strip()
            # 使用正则表达式替换非数字的<sup>标签内容
            table = re.sub(r'<sup>(?!\d+</sup>).*?</sup>', '', table)
            if start_index != -1:
                # 找到标题的结束位置
                end_index = raw_survey_with_references.find("\n\n", start_index)
                
                if end_index == -1:
                    end_index = len(raw_survey_with_references)
                
                # 在标题后面插入表格
                updated_content = raw_survey_with_references[:end_index] + "\n\n" + table + "\n\n" + raw_survey_with_references[end_index:]
                raw_survey_with_references = updated_content
        # import ipdb;ipdb.set_trace()

        # import ipdb;ipdb.set_trace()
        if refining:
            final_section_content = self.refine_subsections(topic, outline, section_content)
            refined_survey = self.generate_document(parsed_outline, final_section_content)
            
            prompt = self.__generate_prompt(CONCLUSION_PROMPT, paras={'OVERALL OUTLINE': outline, 'TOPIC':topic, 'SURVEY':refined_survey})
            print("Writing conclusion...")
            conclusion = self.api_model.chat(prompt, temperature=0.3)
            refined_survey = refined_survey + "\n## 6 Conclusion\n" + conclusion + '\n'

            refined_survey_with_references, refined_references = self.process_references(refined_survey)
            return raw_survey+'\n', raw_survey_with_references+'\n', raw_references, refined_survey+'\n', refined_survey_with_references+'\n', refined_references#, mindmap
        else:
            print("No refine mode")
            return raw_survey+'\n', raw_survey_with_references+'\n', raw_references#, mindmap

    def compute_price(self):
        return self.token_counter.compute_price(input_tokens=self.input_token_usage, output_tokens=self.output_token_usage, model=self.model)


    def refine_subsections(self, topic, outline, section_content):
        section_content_even = copy.deepcopy(section_content)
        print("Refining starts...")
        thread_l = []
        for i in range(len(section_content)):
            for j in range(len(section_content[i])):
                if j % 2 == 0:
                    if j == 0:
                        contents = [''] + section_content[i][:2]
                    elif j == (len(section_content[i]) - 1):
                        contents = section_content[i][-2:] + ['']  
                    else:
                        contents = section_content[i][j-1:j+2]
                    thread = threading.Thread(target=self.lce, args=(topic, outline, contents, section_content_even[i], j))
                    thread_l.append(thread)
                    thread.start()
        for thread in thread_l:
            thread.join()


        final_section_content = copy.deepcopy(section_content_even)

        thread_l = []
        for i in range(len(section_content_even)):
            for j in range(len(section_content_even[i])):
                if j % 2 == 1:
                    if j == (len(section_content_even[i]) - 1):
                        contents = section_content_even[i][-2:] + ['']  
                    else:
                        contents = section_content_even[i][j-1:j+2]
                    thread = threading.Thread(target=self.lce, args=(topic, outline, contents, final_section_content[i], j))
                    thread_l.append(thread)
                    thread.start()
        for thread in thread_l:
            thread.join()
        
        return final_section_content

    def write_subsection_with_reflection(self, paper_texts_l, topic, outline, section, subsections, subdescriptions, res_l, idx, rag_num = 20, subsection_len = 1000, citation_num = 8):
        
        prompts = []
        for j in range(len(subsections)):
            subsection = subsections[j]
            description = subdescriptions[j]

            prompt = self.__generate_prompt(SUBSECTION_WRITING_PROMPT,\
            paras={'OVERALL OUTLINE': outline,\
            'SUBSECTION NAME': subsection,\
            'DESCRIPTION':description,\
            'TOPIC':topic,\
            'PAPER LIST':BeautifulSoup(paper_texts_l[j], 'html.parser').get_text(), \
            'SECTION NAME':section, \
            'WORD NUM':str(subsection_len),\
            'CITATION NUM':str(citation_num)})
            prompts.append(prompt)
            # print(j,self.token_counter.num_tokens_from_list_string([prompt]))

        contents = self.api_model.batch_chat(prompts, temperature=0.3)
        
        contents = [c.replace('<format>','').replace('</format>','') for c in contents]

        prompts = []
        for content, paper_texts in zip(contents, paper_texts_l):
            prompts.append(self.__generate_prompt(CHECK_CITATION_PROMPT, paras={'SUBSECTION': content, 'TOPIC':topic, 'PAPER LIST':paper_texts}))
        # self.input_token_usage += self.token_counter.num_tokens_from_list_string(prompts)
        contents = self.api_model.batch_chat(prompts, temperature=0.3)
        # self.output_token_usage += self.token_counter.num_tokens_from_list_string(contents)
        contents = [c.replace('<format>','').replace('</format>','') for c in contents]
    
        res_l[idx] = contents
        return contents
        
    def __generate_prompt(self, template, paras):
        prompt = template
        for k in paras.keys():
            prompt = prompt.replace(f'[{k}]', paras[k])
        return prompt
    
    def generate_prompt(self, template, paras):
        prompt = template
        for k in paras.keys():
            prompt = prompt.replace(f'[{k}]', paras[k])
        return prompt
    
    def lce(self, topic, outline, contents, res_l, idx):

        prompt = self.__generate_prompt(LCE_PROMPT, paras={'OVERALL OUTLINE': outline,'PREVIOUS': contents[0],\
                                                                          'FOLLOWING':contents[2],'TOPIC':topic,'SUBSECTION':contents[1]})
        # self.input_token_usage += self.token_counter.num_tokens_from_string(prompt)
        # refined_content = self.api_model.chat(prompt, temperature=0.3).replace('<format>','').replace('</format>','')
        refined_content = self.api_model.chat(prompt, temperature=0.3).replace('<format>','').replace('</format>','')
        self.output_token_usage += self.token_counter.num_tokens_from_string(refined_content)
     #   print(prompt+'\n---------------------------------\n'+refined_content)
        res_l[idx] = refined_content
        return refined_content.replace('Here is the refined subsection:\n','')

    def parse_outline(self, outline):
        result = {
            "title": "",
            "sections": [],
            "section_descriptions": [],
            "subsections": [],
            "subsection_descriptions": []
        }
    
        # Split the outline into lines
        lines = outline.split('\n')
        
        for i, line in enumerate(lines):
            # Match title, sections, subsections and their descriptions
            if line.startswith('# '):
                result["title"] = line[2:].strip()
            elif line.startswith('## '):
                result["sections"].append(line[3:].strip())
                # Extract the description in the next line
                if i + 1 < len(lines) and lines[i + 1].startswith('Description:'):
                    result["section_descriptions"].append(lines[i + 1].split('Description:', 1)[1].strip())
                    result["subsections"].append([])
                    result["subsection_descriptions"].append([])
            elif line.startswith('### '):
                if result["subsections"]:
                    result["subsections"][-1].append(line[4:].strip())
                    # Extract the description in the next line
                    if i + 1 < len(lines) and lines[i + 1].startswith('Description:'):
                        result["subsection_descriptions"][-1].append(lines[i + 1].split('Description:', 1)[1].strip())

        return result

    def process_references(self, survey):

        citations = self.extract_citations(survey)
        # import ipdb;ipdb.set_trace()
        return self.replace_citations_with_numbers(citations, survey)

    def generate_document(self, parsed_outline, subsection_contents):
        document = []
        
        # Append title
        title = parsed_outline['title']
        document.append(f"# {title}\n")
        
        # Iterate over sections and their content
        for i, section in enumerate(parsed_outline['sections']):
            document.append(f"## {section}\n")
            # Append subsections and their contents
            for j, subsection in enumerate(parsed_outline['subsections'][i]):
                document.append(f"### {subsection}\n")
          #      document.append(f"{parsed_outline['subsection_descriptions'][i][j]}\n")
                # Append detailed content for each subsection
                if i < len(subsection_contents) and j < len(subsection_contents[i]):
                    document.append(subsection_contents[i][j] + "\n")
        
        return "\n".join(document)

    def process_outlines(self, section_outline, sub_outlines):
        res = ''
        survey_title, survey_sections, survey_section_descriptions = self.extract_title_sections_descriptions(outline=section_outline)
        res += f'# {survey_title}\n\n'
        for i in range(len(survey_sections)):
            section = survey_sections[i]
            res += f'## {i+1} {section}\nDescription: {survey_section_descriptions[i]}\n\n'
            subsections, subsection_descriptions = self.extract_subsections_subdescriptions(sub_outlines[i])
            for j in range(len(subsections)):
                subsection = subsections[j]
                res += f'### {i+1}.{j+1} {subsection}\nDescription: {subsection_descriptions[j]}\n\n'
        return res
    
    def generate_mindmap(self, subsection_citations, outline):
        to_remove = outline.split('\n')
        for _ in to_remove:
            if not '#' in _:
                outline = outline.replace(_,'')
        subsections = re.findall(pattern=r'### (.*?)\n', string=outline)
        for subs, _ in zip(subsections,range(len(subsections))):
            outline = outline.replace(subs, subs+'\n'+str(subsection_citations[_]))
        to_remove = re.findall(pattern=r'\](.*?)#', string=outline)
        for _ in to_remove:
            outline = outline.replace(_,'')
        return outline

    def extract_citations(self, markdown_text):
        # 正则表达式匹配方括号内的内容
        pattern = re.compile(r'\[(.*?)\]')
        matches = pattern.findall(markdown_text)
        # 分割引用，处理多引用情况，并去重
        citations = list()
        for match in matches:
            # 分割各个引用并去除空格
            parts = match.split(';')
            for part in parts:
                cit = part.strip()
                if cit not in citations:
                    citations.append(cit)
        return citations

    def replace_citations_with_numbers(self, citations, markdown_text):
        # import ipdb;ipdb.set_trace()

        filtered_citations = [citation for citation in citations if 'chunk' in citation]
        filtered_citations = [
            citation.split('paper_id: ')[1].strip() if 'paper_id:' in citation else citation
            for citation in filtered_citations
        ] # ?  保留这种情况'paper_id: 658b93a6939a5f40825d85b8, chunk 6'

        filtered_citations = remove_duplicates(filtered_citations)
        print(f"Valid citation format num is {len(filtered_citations)}")

        valid_citations = []
        paper_details = []
        
        results = {}

        with concurrent.futures.ThreadPoolExecutor(max_workers=12) as executor:
            # 提交任务，并记录每个任务的原始顺序
            future_to_citation = {executor.submit(process_citation, citation): citation for citation in filtered_citations}
            
            # 按任务提交的顺序处理结果
            for future in concurrent.futures.as_completed(future_to_citation):
                citation = future_to_citation[future]
                try:
                    result = future.result()
                    if result is not None:
                        valid_citation, detail = result
                        results[citation] = (valid_citation, detail)
                except Exception as e:
                    print(f"Error processing citation {citation}: {e}")

        for citation in filtered_citations:
            if citation in results:
                valid_citation, detail = results[citation]
                valid_citations.append(valid_citation)
                paper_details.append(detail)

        # import ipdb;ipdb.set_trace()
        # for citation in filtered_citations:
            # 打印处理结果和总耗时
            # print(f"Processed citation {citation} successfully")
            # print(f"Title: {title}")
            # print(f"Details: {details}")
            # print(f"Total time taken: {loop_elapsed_time:.2f} seconds")
            # print("-" * 40)  # 分隔线
        #     # TODO RAG崩了，模拟随机跳过一些 citation
        #     # import random
        #     # if random.random() < 0.2:  # 假设有 20% 的概率跳过
        #     #     continue
        #     # print("simulate RAG check citation...")
        #     # cur_id = citation.split(',')[0]
        #     # details = f"Journal_{cur_id}"  # 模拟期刊名称
        #     # title = f"Title_{cur_id}"  # 模拟论文标题
        #     # merge = [title] + [" " + transform_journal_name(details)] + [citation.split(',')[-1]]
        #     # valid_citations.append(citation)
        #     # paper_details.append(','.join(merge))
        id_to_number = {item: idx + 1 for idx, item in enumerate(valid_citations)}
        print(f"True valid citation num is {len(id_to_number)}")

        # 使用正则表达式提取所有的[]内内容
        pattern = re.compile(r'\[(.*?)\]')
        matches = pattern.findall(markdown_text)
        # 替换过程
        for match in matches:
            # 处理match中的paper_id:前缀，与filtered_citations的处理逻辑保持一致
            processed_match = match.split('paper_id: ')[-1].strip()
            
            # 使用完整的处理后的字符串作为匹配键
            citation_key = processed_match
            
            if citation_key in id_to_number:
                # 替换时需要保持原始match的格式（可能包含paper_id:前缀）
                # 但用处理后的key查找编号
                markdown_text = markdown_text.replace(
                    f"[{match}]", 
                    f"<sup>{id_to_number[citation_key]}</sup>"
                )
            else:
                markdown_text = markdown_text.replace(f"[{match}]", "")

        # 生成引用部分的编号
        references_section = '\n\n# References\n\n' + '\n\n'.join([f"[{idx + 1}] {item}" for idx, item in enumerate(paper_details)])
        # references = {num: titles_to_ids[title] for num, title in id_to_number.items()}

        return markdown_text + references_section, paper_details
