from prompt import *
import api
from doc import Doc, Query
from config import zhipu_api_key, paper_search_api_url
import utils
from typing import Dict, Tuple
import re

def flow(query, api_key=zhipu_api_key, save_loc=None, local=False):

    overall_tokens = 0

    # 初始化 api
    if local:
        llm = api.LocalLLM()
    else:
        llm = api.ZhipuLLM(api_key)

    ps = api.LiteratureSearchAPI(paper_search_api_url, save_json=False)

    # 分类
    # problem_classification.do("problem", query)
    # classify_result = llm.query(str(problem_classification))


    # 关键词扩展
    keyword_expand.do("num", 5)
    keyword_expand.do("theme", query)
    keyword_expand_result = llm.query(str(keyword_expand))
    overall_tokens += keyword_expand.tokens
    keywords = keyword_expand_result.split("\n")
    print(f"关键词扩展结果: {keywords}")


    # 文献检索
    doc_list = []
    title_list = []
    for keyword in keywords:
        query_paper = Query.from_list(ps.search_by_query(keyword, 5))
        doc_list.extend(query_paper.doc_list)
        title_list.extend(query_paper.title_list)

    # 大纲生成
    rough_outline.do("paper_list", title_list)
    rough_outline.do("section_num", 7)
    rough_outline.do("topic", query)

    rough_outline_result = llm.query(str(rough_outline))
    overall_tokens += rough_outline.tokens
    title, sections, descriptions = rough_outline_parser(rough_outline_result)

    print(f"一级大纲生成完毕。")

    result_dict = {}
    result_dict["title"] = title
    result_dict["sections"] = []

    section_query_prompt = []
    query_list = []

    for section, description in zip(sections, descriptions):

        query_p = Query.from_list(ps.search_by_query(section, 20))
        query_list.append(query_p)
        # todo: 优化检索数量，k值高一些，然后通过相似度筛选

        sub_section_outline.do("topic", query_p)
        sub_section_outline.do("paper_list", query_p.title_list)
        sub_section_outline.do("overall_outline", rough_outline_result)
        sub_section_outline.do("section_name", section)
        sub_section_outline.do("section_description", description)

        section_query_prompt.append(str(sub_section_outline))
        overall_tokens += sub_section_outline.tokens

    sub_section_outline_result = llm.batch_query(section_query_prompt)

    for result, query, section, description in zip(sub_section_outline_result, query_list, sections, descriptions):
        subsections, subdescriptions = subsections_parser(result)
        print(f"生成小节信息 [{subsections}]")

        papers = []
        for t, d in zip(query.title_list, query.doc_list):
            papers.append({
                "title": utils.clean_text(t),
                "paper_id": d.paper_id,
                "chunk_id": d.chunk_id,
                "chunk_text": d.chunk_text.origin
            })
        sub = []
        for s, sd in zip(subsections, subdescriptions):
            sub.append({
                "title": s,
                "description": sd
            })

        result_dict["sections"].append({
            "title": section,
            "description": description,
            "subsections": sub,
            "papers": papers
        })
    # todo: 优化大纲，不要出现重复内容

    # utils.dump_text(rough_outline_result, "rough_outline_result.md")
    # utils.dump_json(result_dict, "result_dict.json")

    print(f"完整大纲生成完毕")

    utils.dump_json(result_dict, "outline_over.json")

    # Next Part

    # rough_outline_result = utils.load_text("output_1739450801.880725.txt")
    # result_dict = utils.load_json("output_1739450801.881655.json")

    final_dict = {}
    all_papers = []

    title = result_dict['title']
    sections = result_dict['sections']

    final_dict['title'] = title
    final_dict['sections'] = []

    section_list = []

    for section in sections:

        title_2nd = section['title']
        description_2nd = section['description']

        sub_list = []
        sub_query = []

        for subsection in section['subsections']:

            title_3rd = subsection['title']
            description_3rd = subsection['description']

            sub_list.append({
                'title': title_3rd,
                'description': description_3rd,
            })

            papers_content = ""
            for p in section['papers']:
                p_title = p['title']
                p_text = p['chunk_text']
                papers_content += f"<paper_title>{p_title}</paper_title>\n<paper_text>{p_text[:1000]}</paper_text>\n\n"
                all_papers.append(p)
            
            subsection_writing.do("topic", title)
            subsection_writing.do("overall_outline", rough_outline_result)
            subsection_writing.do("subsection_name", title_3rd)
            subsection_writing.do("section_name", title_2nd)
            subsection_writing.do("description", description_3rd)
            subsection_writing.do("paper_list", papers_content)
            subsection_writing.do("word_num", 500)

            sub_query.append(str(subsection_writing))
            overall_tokens += subsection_writing.tokens
        
        sub_query_result = llm.batch_query(sub_query)
        print(f"小节 [{title_2nd}] 完成。")

        for idx, sub_result in enumerate(sub_query_result):
            sub_list[idx]['content'] = sub_result

        section_list.append({
            'title': title_2nd,
            'description': description_2nd,
            'subsections': sub_list
        })

    final_dict['sections'] = section_list

    utils.dump_json(final_dict, "final_dict.json")

    # 生成摘要
    abstract_writing.do("overall_passage", rough_outline_result)
    abstract_writing.do("topic", query)
    abstract_result = llm.query(str(abstract_writing))
    overall_tokens += abstract_writing.tokens

    final_dict['abstract'] = abstract_result

    print(f"摘要生成完毕。")

    # 生成正文

    final_text = final_format(final_dict)
    utils.dump_text(final_text, "final_text.md")
    print(f"正文生成完毕。")

    # 修饰引用
    def process_citations(markdown_text: str) -> Tuple[str, Dict[int, str]]:

        titles = [p['title'] for p in all_papers]
        
        title_to_number = {title: num + 1 for num, title in enumerate(sorted(set(titles)))}
        title_lower_to_number = {title.lower(): num + 1 for num, title in enumerate(sorted(set(titles)))}
        number_to_title = {num: title for title, num in title_to_number.items()}
        
        def replace_match(match: re.Match) -> str:
            try:
                citation_text = match.group(1)
                individual_citations = [cite.strip() for cite in citation_text.split(';')]
                
                numbered_citations = []
                for citation in individual_citations:
                    number = title_to_number.get(citation) or title_lower_to_number.get(citation.lower())
                    if number:
                        numbered_citations.append(str(number))
                
                if not numbered_citations:
                    # print(f"未找到引用: {citation_text}")
                    return ""
                
                return f"<sup>{'; '.join(numbered_citations)}</sup>"
            except Exception as e:
                print(f"处理引用时出错: {e}")
                return match.group(0)
        
        # 更新引用
        updated_text = re.sub(r'\[(.*?)\]', replace_match, markdown_text)
        
        # 生成参考文献部分，移除方括号
        references_section = "\n\n## References\n\n"
        
        for num in sorted(number_to_title.keys()):
            title = number_to_title[num]
            clean_title = title.replace('\n', ' ').strip()
            references_section += f"[{num}] {clean_title}\n\n"
        
        return updated_text + references_section

    updated_text = process_citations(final_text)

    if save_loc:
        utils.dump_text(updated_text, save_loc)
    else:
        utils.dump_text(updated_text, "final_text_with_citations.md")

    # print(f"修饰引用完毕。")
    print(f"总共使用tokens数量: {overall_tokens}")

if __name__ == "__main__":
    flow("loss function", save_loc="../1.md")
    # flow("Text2SQL研究现状如何，面临哪些挑战", save_loc="../2.md")
    # flow("有哪些方法可以提升大模型的规划能力，各自优劣是什么", save_loc="../3.md")
    # flow("多模态大模型的技术发展路线是什么样的", save_loc="../4.md")