from prompt import *

import api
from doc import Doc, Query, Collection, SearchResult, Paper
from config import zhipu_api_key, paper_search_api_url
from typing import List
import utils
import re


def filter_collection(collection: Collection, query: str, llm):
    rq = []
    for d in collection.docs:
        relativity_score.do("theme", query)
        relativity_score.do("paper_fragment", d.text)
        rq.append(str(relativity_score))
    result = llm.batch_query(rq)
    filtered_chunk_list = []
    for i, d in enumerate(collection.docs):
        relativity_result = result[i]
        if relativity_result == "strongly":
            filtered_chunk_list.append(d)
    return filtered_chunk_list


def summarize_weakly_chunk(collection: Collection, llm):
    rq = []
    indexes = []
    for index, d in enumerate(collection.docs):
        if d.summary:
            continue
        summary_weakly_chunk.do("paper_fragment", d.text)
        rq.append(str(summary_weakly_chunk))
        indexes.append(index)
    result = llm.batch_query(rq)
    for index, r in enumerate(result):
        collection.docs[indexes[index]].summary = r
    return collection


def review_info_to_str(review_info: dict):
    retstr = ""
    retstr += f"Title: {review_info['title']}\n"
    for index, section in enumerate(review_info["sections"]):
        retstr += f"Section {index+1}: {section['name']}\n{section['description']}\n"
        for index_subsection, subsection in enumerate(section["subsections"]):
            retstr += f"Subsection {index_subsection+1}: {subsection['subsection_name']}\nDescription {index_subsection+1}: {subsection['subsection_description']}\n"
    return retstr


def review_info_to_number_str(review_info: dict):
    retstr = ""
    retstr += f"Title: {review_info['title']}\n"
    for index, section in enumerate(review_info["sections"]):
        retstr += f"{index+1}. {section['name']}\n{section['description']}\n"
        for index_subsection, subsection in enumerate(section["subsections"]):
            retstr += f"  {index+1}.{index_subsection+1}. {subsection['subsection_name']}\n  Description: {subsection['subsection_description']}\n"
    return retstr


def review_info_to_md(review_info: dict):
    retstr = ""
    retstr += f"# {review_info['title']}\n\n"
    for section in review_info['sections']:
        retstr += f"## {section['name']}\n\n"
        for subsection in section['subsections']:
            retstr += f"### {subsection['subsection_name']}\n\n"
            retstr += f"{subsection['content']}\n\n"
    return retstr


def title_compare(title1: str, title2: str):
    d1 = title1.strip().strip('.').lower()
    d2 = title2.strip().strip('.').lower()
    return d1 == d2


def process_references(text: str, ref_all: List[dict]):
    pattern = re.compile(r'\[(.+?),\s*(\d+)\]')
    
    # 1. 先收集所有引用
    references = []
    matches = pattern.findall(text)
    
    for title, chunk_id in matches:
        title = title.strip()
        chunk_id = int(chunk_id)
        if (title, chunk_id) not in references:
            references.append((title, chunk_id))
    
    # 2. 按标题字典序排序
    references.sort(key=lambda x: x[0])

    # 3. 创建引用索引
    ref_index = {ref: idx + 1 for idx, ref in enumerate(references)}

    # 4. 替换文本中的引用
    new_text = pattern.sub(lambda match: f"<sup>{ref_index[(match.group(1).strip(), int(match.group(2)))]}</sup>", text)

    # 5. 生成参考文献文本
    ref_text = "## References\n\n"
    for ref_id, (title, chunk_id) in enumerate(references, start=1):
        ref_year = ""
        for r in ref_all:
            if title_compare(r['title'], title):
                ref_year = r['year']
                break
        ref_year = f"{ref_year}, " if ref_year else ""
        if title.endswith("."):
            title = title[:-1]
        ref_text += f"[{ref_id}] {title}. {ref_year}chunk {chunk_id}\n"
    
    return new_text, ref_text


def test_keyword_expand(query, file_path="", local=False, api_key=zhipu_api_key, base="results"):
    # 初始化 api
    if local:
        llm = api.LocalLLM()
    else:
        llm = api.ZhipuLLM(api_key, model="GLM-4-Flash", max_concurrent_requests=100)

    if local:
        llm_writer = api.LocalLLM()
    else:
        llm_writer = api.ZhipuLLM(api_key, model="GLM-4-Plus", max_concurrent_requests=100)

    ps = api.LiteratureSearchAPI(paper_search_api_url, save_json=False)

    # 分类
    # problem_classification.do("problem", query)
    # classify_result = llm.query(str(problem_classification))
    # overall_tokens += problem_classification.tokens
    # print(f"分类结果: {classify_result}")

    description_expand.do("theme", query)
    description_expand_result = llm.query(str(description_expand))

    keyword_expand_from_description.do("theme", query)
    keyword_expand_from_description.do("description", description_expand_result)
    keyword_expand_from_description_result = llm.query(str(keyword_expand_from_description))
    keywords = keyword_expand_from_description_result.split("\n")
    print(f"关键词扩展结果: {keywords}")

    filtered_keywords = []
    keyword_relativity_score.do("theme", query)
    for keyword in keywords:
        if keyword.startswith("- "):
            keyword = keyword[2:]
        keyword_relativity_score.do("keyword", keyword)
        keyword_relativity_score_result = llm.query(str(keyword_relativity_score))
        if "not" not in keyword_relativity_score_result:
            filtered_keywords.append({
                "keyword": keyword,
                "relativity": keyword_relativity_score_result
            })
    
    sr_list = ps.batch_search_by_query(
        [k["keyword"] for k in filtered_keywords],
        10
    )

    collection = Collection()

    for index, keyword in enumerate(filtered_keywords):
        sr = SearchResult.from_list(sr_list[index])
        for r in sr.docs:
            collection.add(r)
    
    collection.remove_duplicates()
    collection.dump()
    
    print(f"文献片段数量: {len(collection.docs)}")
    print(llm.get_usage_info())

    rough_outline.do("paper_list", collection.get_title_list())
    rough_outline.do("section_num", 5)
    rough_outline.do("topic", query)

    rough_outline_result = llm.query(str(rough_outline))
    title, sections, descriptions = rough_outline_parser(rough_outline_result)

    print(f"一级大纲生成完毕。")

    review_info = {
        "title": title,
        "sections": [{
            "name": sections[i],
            "description": descriptions[i],
            "subsections": []
        } for i in range(len(sections))]
    }

    utils.dump_json(review_info, "outline.json")

    collection = summarize_weakly_chunk(collection, llm)

    section_query_prompt = []
    for section, description in zip(sections, descriptions):

        sub_section_outline.do("topic", query)
        sub_section_outline.do("paper_list", collection.get_chunk_summary())
        sub_section_outline.do("overall_outline", rough_outline_result)
        sub_section_outline.do("section_name", section)
        sub_section_outline.do("section_description", description)

        section_query_prompt.append(str(sub_section_outline))

    sub_section_outline_result = llm.batch_query(section_query_prompt)
    for i, (result, section, description) in enumerate(zip(sub_section_outline_result, sections, descriptions)):
        subsections, subdescriptions = subsections_parser(result)
        review_info["sections"][i]["subsections"] = [
            {
                "subsection_name": subsections[j],
                "subsection_description": subdescriptions[j],
            } for j in range(len(subsections))
        ]

    utils.dump_json(review_info, "outline_with_subsections.json")
    print(f"小节大纲生成完毕。")
    print(llm.get_usage_info())

    advice_on_modify_outline.do("topic", query)
    advice_on_modify_outline.do("overall_outline", review_info_to_str(review_info))
    advice_on_modify_outline_result = llm_writer.query(str(advice_on_modify_outline))
    print(f"大纲建议生成完毕。")
    utils.dump_text(advice_on_modify_outline_result, "advice_on_modify_outline_result.md")

    modify_outline.do("topic", query)
    modify_outline.do("overall_outline", review_info_to_str(review_info))
    modify_outline.do("advice", advice_on_modify_outline_result)
    modify_outline_result = llm.query(str(modify_outline))
    print(f"大纲修改完毕。")

    utils.dump_text(modify_outline_result, "final_outline.md")
    review_info = modify_outline_parser(modify_outline_result)

    utils.dump_json(review_info, "final_outline.json")

    sr_subsection_extend_papers = []
    for section in review_info["sections"]:
        for subsection in section["subsections"]:
            sr_subsection_extend_papers.append(f"{section['name']} {subsection['subsection_name']}")
    extended_papers_list = ps.batch_search_by_query(
        [e for e in sr_subsection_extend_papers],
        5
    )
    extended_papers_list = [SearchResult.from_list(sr) for sr in extended_papers_list]
    # 收集所有的prompt
    all_summary_prompts = []
    doc_mapping = []  # 用于记录prompt和对应的文档
    for sr in extended_papers_list:
        for d in sr.docs:
            summary_weakly_chunk.do("paper_fragment", d.text)
            all_summary_prompts.append(str(summary_weakly_chunk))
            doc_mapping.append((sr, d))
    
    # 统一批量处理所有prompt
    all_summary_results = llm_writer.batch_query(all_summary_prompts)
    
    # 将结果分配回对应的文档
    for (sr, d), summary_result in zip(doc_mapping, all_summary_results):
        d.summary = summary_result
    i = 0
    for section in review_info["sections"]:
        for subsection in section["subsections"]:
            if "paper_list" not in subsection:
                subsection["paper_list"] = []
            for d in extended_papers_list[i].docs:
                subsection["paper_list"].append({
                    "title": d.title,
                    "chunk_id": d.entity.chunk_id,
                    "text": d.summary,
                    "year": d.entity.year
                })
            i += 1

    arr_prompt = []
    for d in collection.docs:
        arrange_papers.do("paper_title", d.title)
        arrange_papers.do("paper_text", d.summary)
        arrange_papers.do("overall_outline", review_info_to_number_str(review_info))
        arr_prompt.append(str(arrange_papers))

    arr_result = llm.batch_query(arr_prompt)
    for r, d in zip(arr_result, collection.docs):
        pr = arrange_papers_parser(r)
        if not pr:
            continue
        for p in pr:
            try:
                section_idx, subsection_idx = p.split(".")
                section_idx = int(section_idx)
                subsection_idx = int(subsection_idx)
                if "paper_list" not in review_info["sections"][section_idx-1]["subsections"][subsection_idx-1]:
                    review_info["sections"][section_idx-1]["subsections"][subsection_idx-1]["paper_list"] = []
                review_info["sections"][section_idx-1]["subsections"][subsection_idx-1]["paper_list"].append({
                    "title": d.title,
                    "chunk_id": d.entity.chunk_id,
                    "text": d.summary,
                    "year": d.entity.year
                })
            except Exception as e:
                print(f"Error: {e}")
                continue

    utils.dump_json(review_info, "final_outline_with_paper_list.json")
    print(f"文章分配完毕。")
    print(llm.get_usage_info())

    # 开始撰写
    with_table = []
    ref_all = []

    title = review_info['title']
    sections = review_info['sections']

    write_plan_prompt = []

    for section in sections:

        section_name = section['name']

        for subsection in section['subsections']:

            subsection_name = subsection['subsection_name']
            subsection_description = subsection['subsection_description']

            paper_list = subsection['paper_list']
            ref_all.extend(paper_list)
            if len(paper_list) > 8:
                with_table.append(subsection_name)
            
            subsection_writing.do("topic", title)
            subsection_writing.do("overall_outline", review_info_to_str(review_info))
            subsection_writing.do("subsection_name", subsection_name)
            subsection_writing.do("section_name", section_name)
            subsection_writing.do("description", subsection_description)
            subsection_writing.do("paper_list", paper_list)

            write_plan_prompt.append(str(subsection_writing))
    
    write_plan_result = llm_writer.batch_query(write_plan_prompt)
    write_plan_result = [subsection_writing_parser(r) for r in write_plan_result]

    j = 0
    for index_section, section in enumerate(sections):
        for index_subsection, subsection in enumerate(section['subsections']):
            review_info['sections'][index_section]['subsections'][index_subsection]['content'] = write_plan_result[j]
            j += 1

    utils.dump_json(review_info, "final_result.json")
    print(f"撰写完毕。")

    # 尝试加入表格
    print(with_table)
    table_prompt = []
    map_subsection = {}
    for section_name in with_table:
        for index_section, section in enumerate(sections):
            for index_subsection, subsection in enumerate(section['subsections']):
                if subsection['subsection_name'] == section_name:
                    map_subsection[section_name] = (index_section, index_subsection)
                    find_table_insert_position.do("section", subsection['content'])
                    table_prompt.append(str(find_table_insert_position))
    table_result = llm_writer.batch_query(table_prompt)
    to_be_inserted_prompt = []
    to_be_inserted_list = []
    for section_name, result in zip(with_table, table_result):
        if result == "Yes":
            section_idx, subsection_idx = map_subsection[section_name]
            c = sections[section_idx]['subsections'][subsection_idx]['content']
            insert_table_prompt.do("section", c)
            to_be_inserted_prompt.append(str(insert_table_prompt))
            to_be_inserted_list.append((section_idx, subsection_idx))
    to_be_inserted_result = llm_writer.batch_query(to_be_inserted_prompt)
    for r, (section_idx, subsection_idx) in zip(to_be_inserted_result, to_be_inserted_list):
        r = insert_table_parser(r)
        sections[section_idx]['subsections'][subsection_idx]['content'] = r + "\n\n" + sections[section_idx]['subsections'][subsection_idx]['content']

    # 处理结构和引用
    final_text = review_info_to_md(review_info)
    utils.dump_text(final_text, "final_text.md")

    final_text, references = process_references(final_text, ref_all)
    final = f"{final_text}\n\n{references}"
    utils.dump_text(final, "final_text_with_references.md")
    print(f"处理结构和引用完毕。")

    print(f"已全部完成。")

    utils.dump_text(text=final, file_path=file_path, base=base)

    print(f"总消耗: \n flash: {llm.get_usage_info()} \n plus: {llm_writer.get_usage_info()}")


if __name__ == "__main__":
    test_keyword_expand("Retrieval-Augmented Generation", file_path="review.md", local=False, base="results")
    # keyword_expand.do("num", 5)
    # keyword_expand.do("theme", "Poisoning Attacks")
    # bq = []
    # llm = api.ZhipuLLM(zhipu_api_key, model="GLM-4-Plus", max_concurrent_requests=200)
    # for i in range(200):
    #     bq.append(str(keyword_expand))
    
    # result = llm.batch_query(bq)
    # print(result[:5])
    # print(llm.get_usage_info())