import copy
import time
import asyncio

from agents.paper_reviser import revise_paper
from agents.new_vewsion_paper_search_agent import search_paper_loop
from agents.paper_writer import write_paper
from agents.two_stage_outline_agent import two_stage_generate_outline
from agents.query_understand_and_rewrite_agent import paper_type_classfier, paper_query_rewrite
import argparse

from my_utils.api_request import set_llm_api_token
from my_utils.compress_text import parallel_compressing_process_with_another
from my_utils.data_io import load_files, save_datas, calculate_file_hash
from model_config.config import GLM_4_MODEL_UPPER_LIST


def main():
    # 创建 ArgumentParser 对象
    parser = argparse.ArgumentParser(description="解析 topic 和 token")

    # 添加 topic 参数
    # parser.add_argument('--topic', type=str, required=True, default="", help='用户问题')
    parser.add_argument('--topic', type=str, required=False, default="在多模态模型的训练上，文本和视觉是如何做对齐的，请对比分析下不同方法的优劣", help='用户问题')

    # 添加 token 参数
    parser.add_argument('--token', type=str, required=False, default="", help='API Token')

    # 解析命令行参数
    args = parser.parse_args()

    # 获取解析后的参数
    topic = args.topic

    api_token = args.token

    # 设置调用大模型的API Token
    set_llm_api_token(GLM_4_MODEL_UPPER_LIST, api_token)

    # 测试程序的用时
    start_time = time.time()

    # 第一步：问题重写+综述类别识别。输入：用户原始问题，输出：重写后的问题，综述类别
    user_summary_type = paper_type_classfier(user_requirement=topic)
    keywords = paper_query_rewrite(user_requirement=topic,classification=user_summary_type)

    start_time_search = time.time()
    # 第二步：文献搜集，输入：用户原始问题，综述类别，输出：论文检索结果
    origin_result = asyncio.run(search_paper_loop(topic,user_summary_type,keywords=keywords,papers_count=200,rerank=True))
    save_datas(origin_result, "origin_paper_search_result.json")
    end_time_search = time.time()
    print("文献搜集用时：", int((end_time_search - start_time_search) // 60), "分钟", int((end_time_search - start_time_search) % 60), "秒")

    # origin_result = load_files(["origin_paper_search_result.json"])[0]

    start_time_parallel = time.time()
    # 第三步：详细大纲生成。
    # 并行执行生成大纲和压缩任务
    (detail_outline, paper_index_dict), compressed_texts = parallel_compressing_process_with_another(
        uncompressed_text_list=[item['entity']['chunk_text'] for item in origin_result.values()],
        parallel_function=two_stage_generate_outline,
        parallel_args=[topic, user_summary_type, origin_result],
        compression_rate=0.9,
        model_path="./bert-base-multilingual-cased",
    )
    compressed_result = copy.deepcopy(origin_result)
    for paper_id, compressed_text in zip(origin_result.keys(), compressed_texts):
        compressed_result[paper_id]['entity']['chunk_text'] = compressed_text
    save_datas(compressed_result, "compressed_result.json")

    # detail_outline, paper_index_dict = two_stage_generate_outline(topic,user_summary_type,origin_result)

    end_time_parallel = time.time()
    print("并行用时：", int((end_time_parallel - start_time_parallel) // 60), "分钟", int((end_time_parallel - start_time_parallel) % 60), "秒")
    save_datas(detail_outline, "detail_outline.json")
    save_datas(paper_index_dict, "paper_index_dict.json")

    # 读取前序数据
    # origin_result, compressed_result, paper_index_dict, detail_outline = load_files(["origin_paper_search_result.json", "compressed_result.json", "paper_index_dict.json", "detail_outline.json"])

    # 第四步：论文编写
    paper_draft_dict, ref_id_list = asyncio.run(write_paper(detail_outline, paper_index_dict, compressed_result))
    save_datas(paper_draft_dict, "paper_draft_dict.json")
    save_datas(ref_id_list, "ref_id_list.json")

    # 读取前序数据
    # paper_draft_dict ,ref_id_list = load_files(["paper_draft_dict.json","ref_id_list.json"])

    # 第五步：论文修订
    paper_final = revise_paper(paper_draft_dict, ref_id_list=ref_id_list, selected_chunks=origin_result)

    # 保存最终论文
    save_datas(paper_final, f"{topic[:3]}.md")

    print("论文编写完成！")

    end_time = time.time()
    print("用时：", int((end_time - start_time) // 60), "分钟", int((end_time - start_time) % 60), "秒")


    hash_value = calculate_file_hash(f"examples_data/{topic[:3]}.md")

    print("当前文章对应的MD5码如下：", hash_value)


if __name__ == "__main__":
    main()
