from prompt import ROUGH_OUTLINE_PROMPT, SUBSECTION_OUTLINE_PROMPT, EDIT_FINAL_OUTLINE_PROMPT, MERGING_OUTLINE_PROMPT, \
    EDIT_FINAL_OUTLINE_PROMPT2, SUBSECTION_WRITING_PROMPT, LCE_PROMPT
from zhipuai import ZhipuAI
from rag import *

api_key = "29a6e4a2ee21cc38d721fc63a135b6a5.s4l9JvW1FksKq0xo"

def __generate_prompt(template, paras):
    prompt = template
    for k in paras.keys():
        prompt = prompt.replace(f'[{k}]', paras[k])
    return prompt

def extract_title_sections_descriptions(outline):
    title = outline.split('Title: ')[1].split('\n')[0]
    sections, descriptions = [], []
    for i in range(100):
        if f'Section {i + 1}' in outline:
            sections.append(outline.split(f'Section {i + 1}: ')[1].split('\n')[0])
            descriptions.append(outline.split(f'Description {i + 1}: ')[1].split('\n')[0])
    return title, sections, descriptions

def extract_subsections_subdescriptions(outline):
    subsections, subdescriptions = [], []
    for i in range(100):
        if f'Subsection {i + 1}' in outline:
            subsections.append(outline.split(f'Subsection {i + 1}: ')[1].split('\n')[0])
            subdescriptions.append(outline.split(f'Description {i + 1}: ')[1].split('\n')[0])
    return subsections, subdescriptions

def process_outlines(section_outline, sub_outlines):
    res = ''
    survey_title, survey_sections, survey_section_descriptions = extract_title_sections_descriptions(
        outline=section_outline)
    res += f'# {survey_title}\n\n'
    for i in range(len(survey_sections)):
        section = survey_sections[i]
        res += f'## {i + 1} {section}\nDescription: {survey_section_descriptions[i]}\n\n'
        subsections, subsection_descriptions = extract_subsections_subdescriptions(sub_outlines[i])
        for j in range(len(subsections)):
            subsection = subsections[j]
            res += f'### {i + 1}.{j + 1} {subsection}\nDescription: {subsection_descriptions[j]}\n\n'
    return res

def remove_descriptions(text):
    """
    移除文本中所有以 "Description" 开头的行。

    Args:
        text (str): 包含多行文本的字符串。

    Returns:
        str: 移除以 "Description" 开头的行后的文本。
    """
    lines = text.split('\n')

    filtered_lines = [line for line in lines if not line.strip().startswith("Description")]

    result = '\n'.join(filtered_lines)

    return result

def consolidate_rag_result(rag_result, kind="all"):
    # 初始化变量
    related_paper_list = []  # 用于存储最终的字符串列表
    cur_content = ""  # 当前正在构建的字符串

    # 遍历 paper_id2chunks_list
    for paper_id2chunks in rag_result:
        # 将 paper_id2chunks 中的内容拼接成一个字符串
        if kind == "simple":
            content = (
                f"paper_title: {paper_id2chunks['paper_title']}\n"
                f"chunk_id: {paper_id2chunks['chunk_id']}\n"
                f"{paper_id2chunks['chunk']}\n"
            )
        elif kind == "abstract":
            content = (
                f"paper_title: {paper_id2chunks['paper_title']}\n"
                f"{paper_id2chunks['abstract']}\n"
            )
        elif kind == "introduction":
            content = (
                f"paper_title: {paper_id2chunks['paper_title']}\n"
                f"{paper_id2chunks['introduction']}\n"
            )
        elif kind == "related_works":
            content = (
                f"paper_title: {paper_id2chunks['paper_title']}\n"
                f"{paper_id2chunks['related_works']}\n"
            )
        else:
            content = (
                f"paper_title: {paper_id2chunks['paper_title']}\n"
                f"{paper_id2chunks['abstract']}\n"
                f"{paper_id2chunks['introduction']}\n"
                f"{paper_id2chunks['related_works']}\n"
            )

        # 如果当前字符串加上新内容后长度超过 100,000，则将当前字符串添加到 strings 列表中，并开始一个新的字符串
        if len(cur_content) + len(content) > 100000:
            related_paper_list.append(cur_content)
            cur_content = content
        else:
            cur_content += content

    # 将最后一个字符串添加到 strings 列表中
    if cur_content:
        related_paper_list.append(cur_content)

    # total_chars = sum(len(s) for s in related_paper_list)
    # 输出结果
    # for i, string in enumerate(related_paper_list):
    #     print(f"String {i + 1} (length: {len(string)})")
    return related_paper_list


def do_rag(keyword):
    paper_ids_set = set()
    result = search_papers(query=keyword, top_k=30)
    for i in range(len(result)):
        paper_ids_set.add(result[i]['entity']['paper_id'])
    result = query_by_title_contain(title=keyword, top_k=100)
    for i in range(len(result)):
        paper_ids_set.add(result[i]['paper_id'])
    result = query_by_chunk_contain(chunk=keyword, top_k=100)
    for i in range(len(result)):
        paper_ids_set.add(result[i]['paper_id'])
    paper_id2chunks_list = search_chunks_by_paper_id(list(paper_ids_set))
    return paper_id2chunks_list

def do_rag_simple(keyword, top_k=30):
    paper_id2chunks_list = []
    result = search_papers(query=keyword, top_k=top_k)
    for paper in result:
        paper_id, paper_title, chunk_id, chunk = paper['id'], paper['entity']['paper_title'], paper['entity'][
            'chunk_id'], paper['entity']['chunk_text']
        paper_id2chunks = {
            'paper_id': paper_id,
            'paper_title': paper_title,
            'chunk_id': chunk_id,
            'chunk': chunk,
        }
        paper_id2chunks_list.append(paper_id2chunks)
    return paper_id2chunks_list

def zhipu_api(prompt, model='glm-4-plus'):
    client = ZhipuAI(api_key=api_key)
    response = client.chat.completions.create(
        model=model,  # 请填写您要调用的模型名称
        messages=[
            {"role": "system", "content": "You are a helpful assistant."},
            # {"role": "user", "content": "以严肃，准确的写作方式，帮我写一段关于损失函数的调研综述，要求如下：\n1.主题结构清晰，章节标题契合\n2.前后逻辑关系或脉络关系通顺\n3.通过融合综述方法实现详细阐述技术或方法，而非简单的笼统总结\n4.内容长度不低于2000字"},
            # {"role": "user", "content": "In a serious and precise writing style, generate an outline for a survey on Aspect Based Sentiment Analysis"},
            {"role": "user", "content": prompt},
        ],
        stream=False,
    )
    # print(response.choices[0].message.content)
    return response.choices[0].message.content

def parse_outline(outline):
    result = {
        "title": "",
        "sections": [],
        "section_descriptions": [],
        "subsections": [],
        "subsection_descriptions": []
    }

    # Split the outline into lines
    lines = outline.split('\n')

    for i, line in enumerate(lines):
        # Match title, sections, subsections and their descriptions
        if line.startswith('# '):
            result["title"] = line[2:].strip()
        elif line.startswith('## '):
            result["sections"].append(line[3:].strip())
            # Extract the description in the next line
            if i + 1 < len(lines) and lines[i + 1].startswith('Description:'):
                result["section_descriptions"].append(lines[i + 1].split('Description:', 1)[1].strip())
                result["subsections"].append([])
                result["subsection_descriptions"].append([])
        elif line.startswith('### '):
            if result["subsections"]:
                result["subsections"][-1].append(line[4:].strip())
                # Extract the description in the next line
                if i + 1 < len(lines) and lines[i + 1].startswith('Description:'):
                    result["subsection_descriptions"][-1].append(lines[i + 1].split('Description:', 1)[1].strip())

    return result

def generate_document(parsed_outline, subsection_contents):
    document = []

    # Append title
    title = parsed_outline['title']
    document.append(f"# {title}\n")

    # Iterate over sections and their content
    for i, section in enumerate(parsed_outline['sections']):
        document.append(f"## {section}\n")
        # Append subsections and their contents
        for j, subsection in enumerate(parsed_outline['subsections'][i]):
            document.append(f"### {subsection}\n")
            #      document.append(f"{parsed_outline['subsection_descriptions'][i][j]}\n")
            # Append detailed content for each subsection
            if i < len(subsection_contents) and j < len(subsection_contents[i]):
                document.append(subsection_contents[i][j] + "\n")

    return "\n".join(document)

def extract_citations(markdown_text):
    # 匹配 [] 内的内容
    pattern = re.compile(r'\[(.*?)\]')
    matches = pattern.findall(markdown_text)

    # 初始化一个列表来存储保留的 matches
    filtered_matches = []

    # 遍历 matches
    i = 0
    while i < len(matches):
        # 判断元素是否为英文开头的字符串
        if re.match(r'^[a-zA-Z]', matches[i]):
            # 判断下一个元素是否是 0-9 的数字
            if i + 1 < len(matches) and re.match(r'^\d+$', matches[i + 1]):
                # 如果都符合条件，则保留
                filtered_matches.append(matches[i].strip())
                filtered_matches.append(matches[i + 1].strip())
                i += 2  # 跳过下一个元素，因为它已经被处理
            else:
                # 如果下一个元素不是数字，则跳过这两个元素
                i += 2
        else:
            # 如果不是英文开头的字符串，则跳过
            i += 1

    # 删除 raw_survey 中不符合条件的内容
    for match in matches:
        if match not in filtered_matches:
            markdown_text = markdown_text.replace(f'[{match}]', '')

    return filtered_matches, markdown_text

from tqdm import tqdm


# def replace_citations_with_numbers(citations, markdown_text):
#     ids = self.db.get_titles_from_citations(citations)
#
#     citation_to_ids = {citation: idx for citation, idx in zip(citations, ids)}
#
#     paper_infos = self.db.get_paper_info_from_ids(ids)
#     temp_dic = {p['id']: p['title'] for p in paper_infos}
#
#     titles = [temp_dic[_] for _ in tqdm(ids)]
#
#     ids_to_titles = {idx: title for idx, title in zip(ids, titles)}
#     titles_to_ids = {title: idx for idx, title in ids_to_titles.items()}
#
#     title_to_number = {title: num + 1 for num, title in enumerate(titles)}
#
#     title_to_number = {title: num + 1 for num, title in enumerate(title_to_number.keys())}
#
#     number_to_title = {num: title for title, num in title_to_number.items()}
#     number_to_title_sorted = {key: number_to_title[key] for key in sorted(number_to_title)}
#
#     def replace_match(match):
#         citation_text = match.group(1)
#
#         individual_citations = citation_text.split(';')
#
#         numbered_citations = [str(title_to_number[ids_to_titles[citation_to_ids[citation.strip()]]]) for citation in
#                               individual_citations]
#
#         return '[' + '; '.join(numbered_citations) + ']'
#
#     updated_text = re.sub(r'\[(.*?)\]', replace_match, markdown_text)
#
#     references_section = "\n\n## References\n\n"
#
#     references = {num: titles_to_ids[title] for num, title in number_to_title_sorted.items()}
#     for idx, title in number_to_title_sorted.items():
#         t = title.replace('\n', '')
#         references_section += f"[{idx}] {t}\n\n"
#
#     return updated_text + references_section, references
def replace_citations_with_numbers(citations, markdown_text):
    # 初始化计数器
    counter = 1
    # 用于记录每对 [英文][数字] 的第一次出现的序号
    pair_to_number = {}

    references_section = "\n\n## References\n\n"

    # 遍历 filtered_matches，每次处理一对 [英文][数字]
    for i in range(0, len(citations), 2):
        # 构造当前 [英文][数字] 对的键
        pair_key = (citations[i], citations[i+1])

        # 如果这对 [英文][数字] 已经记录过，则使用记录的序号
        if pair_key in pair_to_number:
            replacement_number = pair_to_number[pair_key]
        else:
            # 否则，分配一个新的序号并记录到字典中
            replacement_number = counter
            pair_to_number[pair_key] = replacement_number
            references_section += f"[{replacement_number}] {citations[i]}. [{citations[i + 1]}]\n\n"
            counter += 1

        # 构造要替换的模式 [英文][数字]
        pattern = re.compile(r'\[{}\s*\]\[\s*{}\]'.format(
            re.escape(citations[i]),  # 转义英文部分
            re.escape(citations[i+1]) # 转义数字部分
        ))

        # 替换为对应的序号
        markdown_text = pattern.sub(f'[{replacement_number}]', markdown_text)

    return markdown_text + references_section

def lce(topic, outline, contents, res_l, idx):
    prompt = __generate_prompt(LCE_PROMPT, paras={'OVERALL OUTLINE': outline, 'PREVIOUS': contents[0],
                                                  'FOLLOWING': contents[2], 'TOPIC': topic, 'SUBSECTION': contents[1]})
    refined_content = zhipu_api(prompt).replace('<format>', '').replace('</format>', '')
    #   print(prompt+'\n---------------------------------\n'+refined_content)
    res_l[idx] = refined_content
    return refined_content.replace('Here is the refined subsection:\n', '')

if __name__ == "__main__":
    topic = 'Aspect Based Sentiment Analysis'
    paper_id2chunks_list = do_rag(topic)
    print(len(paper_id2chunks_list))

    # 生成一级提纲
    id2section = {
        "0": "abstract",
        "1": "introduction",
        "2": "related_works",
    }
    outlines = []
    for i in range(3):
        section_num = 5

        outline_rag_result = consolidate_rag_result(paper_id2chunks_list, kind=id2section[str(i)])
        prompt = __generate_prompt(ROUGH_OUTLINE_PROMPT, paras={'PAPER LIST': outline_rag_result[0], 'TOPIC': topic,
                                                                'SECTION NUM': str(section_num)})
        outline = zhipu_api(prompt)
        print(outline)
        outlines.append(outline)

    outline_texts = ''
    for i, o in zip(range(len(outlines)), outlines):
        outline_texts += f'---\noutline_id: {i}\n\noutline_content:\n\n{o}\n'
    outline_texts += '---\n'
    prompt = __generate_prompt(MERGING_OUTLINE_PROMPT, paras={'OUTLINE LIST': outline_texts, 'TOPIC': topic})
    outline = zhipu_api(prompt)
    print(outline)

    survey_title, survey_sections, survey_section_descriptions = extract_title_sections_descriptions(outline)
    print(survey_title + '\n')
    for section_name, section_description in zip(survey_sections, survey_section_descriptions):
        print(f"Section Name: {section_name}\nDescription: {section_description}\n\n")

    # 生成二级提纲
    sub_sections_list = []
    for section_name, section_description in zip(survey_sections, survey_section_descriptions):
        query = f"topic: {topic}. section name: {section_name}."
        simple_rag_result = do_rag_simple(query)
        subsection_rag_result = consolidate_rag_result(simple_rag_result, kind='simple')
        prompt = __generate_prompt(SUBSECTION_OUTLINE_PROMPT,
                                paras={'OVERALL OUTLINE': outline, 'SECTION NAME': section_name,
                                        'SECTION DESCRIPTION': section_description, 'TOPIC': topic,
                                        'PAPER LIST': subsection_rag_result[0]})
        # print(prompt + '\n')
        sub_outlines = zhipu_api(prompt)
        # print(sub_outlines)
        sub_sections_list.append(sub_outlines)
        # prompts.append(prompt)

    merged_outline = process_outlines(outline, sub_sections_list)
    print(merged_outline)

    # 优化整体提纲
    prompt = __generate_prompt(EDIT_FINAL_OUTLINE_PROMPT2, paras={'OVERALL OUTLINE': merged_outline})
    print(prompt)

    final_outline = zhipu_api(prompt).replace('<format>\n', '').replace('</format>', '')
    print(final_outline)

    final_outline_wo_description = remove_descriptions(final_outline)
    parsed_outline = parse_outline(outline=final_outline)

    section_paper_texts = [[] for _ in range(len(parsed_outline['sections']))]
    section_references_ids = [[] for _ in range(len(parsed_outline['sections']))]
    for i in range(len(parsed_outline['sections'])):
        for j in range(len(parsed_outline['subsections'][i])):
            subsection_name = parsed_outline['subsections'][i][j]
            print(f"subsection.txt name: {subsection_name}")
            query = f"topic: {topic}. section name: {subsection_name}."
            simple_rag_result = do_rag_simple(query)
            paper_ids = []
            for paper in simple_rag_result:
                paper_ids.append(paper['paper_id'])
            section_references_ids[i].append(paper_ids)
            subsection_rag_result = consolidate_rag_result(simple_rag_result, kind='simple')
            section_paper_texts[i].append(subsection_rag_result)
    
    # 生成初稿
    section_contents = [[] for _ in range(len(parsed_outline['sections']))]
    for i in range(len(parsed_outline['sections'])):
        for j in range(len(parsed_outline['subsections'][i])):
            section = parsed_outline['sections'][i]
            subsection = parsed_outline['subsections'][i][j]
            description = parsed_outline['subsection_descriptions'][i][j]
            paper_list = section_paper_texts[i][j][0]
            subsection_len = 500
            prompt = __generate_prompt(SUBSECTION_WRITING_PROMPT,
                                    paras={'OVERALL OUTLINE': final_outline_wo_description,
                                            'SUBSECTION NAME': subsection,
                                            'DESCRIPTION': description, 'TOPIC': topic, 'PAPER LIST': paper_list,
                                            'SECTION NAME': section, 'WORD NUM': str(subsection_len)})
            subsection_text = zhipu_api(prompt)

            section_contents[i].append(subsection_text)

    raw_survey = generate_document(parsed_outline, section_contents)

    citations, filtered_raw_survey = extract_citations(raw_survey)
    raw_survey_with_references = replace_citations_with_numbers(citations, filtered_raw_survey)

    import copy

    section_content_even = copy.deepcopy(section_contents)

    for i in range(len(section_contents)):
        for j in range(len(section_contents[i])):
            if j % 2 == 0:
                if j == 0:
                    contents = [''] + section_contents[i][:2]
                elif j == (len(section_contents[i]) - 1):
                    contents = section_contents[i][-2:] + ['']
                else:
                    contents = section_contents[i][j - 1:j + 2]

                prompt = __generate_prompt(LCE_PROMPT, paras={'OVERALL OUTLINE': outline, 'PREVIOUS': contents[0],
                                                            'FOLLOWING': contents[2], 'TOPIC': topic,
                                                            'SUBSECTION': contents[1]})
                refined_content = zhipu_api(prompt).replace('<format>', '').replace('</format>', '')
                refined_content.replace('Here is the refined subsection:\n', '')
                #   print(prompt+'\n---------------------------------\n'+refined_content)
                section_content_even[i][j] = refined_content

    final_section_content = copy.deepcopy(section_content_even)

    for i in range(len(section_content_even)):
        for j in range(len(section_content_even[i])):
            if j % 2 == 1:
                if j == (len(section_content_even[i]) - 1):
                    contents = section_content_even[i][-2:] + ['']
                else:
                    contents = section_content_even[i][j - 1:j + 2]
                prompt = __generate_prompt(LCE_PROMPT, paras={'OVERALL OUTLINE': outline, 'PREVIOUS': contents[0],
                                                            'FOLLOWING': contents[2], 'TOPIC': topic,
                                                            'SUBSECTION': contents[1]})
                refined_content = zhipu_api(prompt).replace('<format>', '').replace('</format>', '')
                #   print(prompt+'\n---------------------------------\n'+refined_content)
                refined_content.replace('Here is the refined subsection:\n', '')
                final_section_content[i][j] = refined_content

    refined_survey = generate_document(parsed_outline, final_section_content)