from agent import QueryRewritingAgent
from agent import MultiChunkConclusionAgent
from agent import CitationAgent
from agent import GenerateOutlineAgent
from agent import Agent
import argparse, json
import time
import os


def main():
    parser = argparse.ArgumentParser(description="大模型科研工具V1.0")
    parser.add_argument('--topic', type=str, required=False, help='用户查询', default="explain to me about transformer")
    # parser.add_argument('--model', type=str, required=False, help='基座模型', default="glm-4-airx")
    # parser.add_argument('--excellent_model', type=str, required=False, help='优质模型', default="glm-4-plus")
    parser.add_argument('--model', type=str, required=False, help='基座模型', default="gpt-4o-mini")
    parser.add_argument('--excellent_model', type=str, required=False, help='优质模型', default="gpt-4o-mini")
    parser.add_argument('--token', type=str, required=False, help='API_KEY', default="")
    parser.add_argument('--debug', type=bool, required=False, help='DEBUG模式', default=True)
    parser.add_argument('--output_path', type=str, required=False, help='输出路径', default="review.md")
    parser.add_argument('--conclusion_timeout', type=float, required=False, help='最大chunk总结等待时延', default=30)
    
    args = parser.parse_args()
    if args.token:
        os.environ["ZHIPUAI_API_KEY"] = args.token
    extra_args = {}
    extra_args["excellent_model"] = args.excellent_model
    
    model = args.model
    topic = args.topic
    
    # 开始计时
    start_time = time.time()
    
    # 查询重写，并发查询chunk
    if args.debug:
        print(f">> 用户查询: 【{topic}】")
        print(">> 查询重写:")
    query_rewriting_agent = QueryRewritingAgent(model=model)
    query_list = query_rewriting_agent.rewrite(
        topic=topic,
        num_queries=16,
    )
    if args.debug:
        query_rewriting_agent.PE.print_json(query_list)
        print("-"*100)
    if args.debug:
        print(">> 并发查询chunk:")
    chunk_list = query_rewriting_agent.retrieve(
        query_list=query_list,
        max_return_size=64
    )
    
    if args.debug:
        print(f"chunk_list: (共 {len(chunk_list)} 个结果)\n", json.dumps([
                                                                            f'{ele["id"]}: {ele["entity"]["paper_title"]}' for ele in chunk_list
                                                                        ], indent=2, ensure_ascii=False) + "\n\n...\n")
        print("-"*100)
        
    
    
    # 批量生成总结
    if args.debug:
        print(">> 批量生成总结...")
    multi_agent = MultiChunkConclusionAgent(model=model, DEBUG=args.debug, max_retries=1)
    chunk_list = multi_agent.conclude(
        topic=topic,
        chunk_list=chunk_list,
        return_type="chunk",
        timeout=args.conclusion_timeout,
    )
    
    # 生成大纲
    paper_list = [
        {
            "id": ele["id"],
            "title": ele["entity"]["paper_title"], 
            "总结": ele["总结"]
        } for ele in chunk_list if ("总结" in ele and ele["总结"])
    ]
    if args.debug:
        print(">> 生成大纲:")
        print(">> paper_list（候选 chunk 数量）: ", len(paper_list))
    GO_agent = GenerateOutlineAgent(model=model)
    while True:
        try:
            rough_outline = GO_agent.generate_rough_outline(topic=topic, paper_list=paper_list)
            rough_outline_title = rough_outline["title"]
            rough_outline_sections = rough_outline["sections"]
            for i in range(len(rough_outline["sections"])):
                rough_outline["sections"][i]["context info"] = {}
                if i > 0:
                    rough_outline["sections"][i]["context info"]["former section name"] = rough_outline["sections"][i - 1]["name"]
                if i < len(rough_outline["sections"]) - 1:
                    rough_outline["sections"][i]["context info"]["next section name"] = rough_outline["sections"][i + 1]["name"]
            if args.debug:
                GO_agent.PE.print_json(rough_outline)
                print("-"*100)
            break
        except Exception as e:
            print("[error] 生成大纲失败，重新尝试中...")
            
    
            
    from concurrent.futures import ThreadPoolExecutor, as_completed
    total_raw_citation = {}
    total_chunk_list = []

    def process_section_concurrently(section):
        return process_section(section, topic, rough_outline, model, args, extra_args)

    # 使用 ThreadPoolExecutor 进行并发处理
    with ThreadPoolExecutor(max_workers=16) as executor:
        # 提交所有任务
        future_to_section = {executor.submit(process_section_concurrently, section): section for section in rough_outline["sections"]}

        for future in as_completed(future_to_section):
            section, raw_citation, current_chunk_list = future.result()
            total_raw_citation[section["name"]] = raw_citation
            total_chunk_list.extend(current_chunk_list)
    
    citation_agent = CitationAgent(model=model)
    
    final_total_raw_citation = f'# {rough_outline["title"]}\n\n'
    
    for section in rough_outline["sections"]:
        final_total_raw_citation += f'## {section["name"]}\n\n{total_raw_citation[section["name"]]}\n\n'
    
    formatted_citation, reference_suffix = citation_agent.format_citation(raw_citation=final_total_raw_citation, chunk_list=total_chunk_list)
    final_citation = formatted_citation + "\n\n" + reference_suffix
    print(">> 综述生成结果：")
    print("-"*100+"\n"+"-"*100+"\n"+"-"*100+"\n")
    print(final_citation)
    
    # 将 final_citation 输出到文件
    with open(args.output_path, 'w', encoding='utf-8') as file:
        file.write(final_citation)
    
    # 结束计时
    end_time = time.time()
    execution_time = end_time - start_time
    print(f"程序执行时间: {execution_time:.2f} 秒")

def process_section(section, topic, rough_outline, model, args, extra_args):
    if args.debug:
        print(">> [process_section] ...")
        print("-"*100+"\n"+"-"*100+"\n"+"-"*100+"\n")
        
    global_agent = Agent()
    section_topic = f'原始意图: {topic}\n综述标题（拟）: {rough_outline["title"]}\n当前章节: {global_agent.PE.as_json(section)}\n\n'
    
    query_rewriting_agent = QueryRewritingAgent(model=model)
    query_list = query_rewriting_agent.rewrite(
        topic=section_topic,
        num_queries=8,
    )
    chunk_list = query_rewriting_agent.retrieve(
        query_list=query_list,
        max_return_size=16
    )
    if args.debug:
        print(">> [section] 批量生成总结...")
    
    multi_agent = MultiChunkConclusionAgent(model=model, DEBUG=args.debug, max_retries=1)
    chunk_list = multi_agent.conclude(
        topic=section_topic,
        chunk_list=chunk_list,
        return_type="chunk",
        return_relevant=True,
        timeout=args.conclusion_timeout,
    )
    if args.debug:
        print(f"chunk_list: (共 {len(chunk_list)} 个结果)\n", json.dumps(chunk_list, indent=2, ensure_ascii=False)[:256] + "\n\n...\n")
        print("-"*100)
        
    cite_agent = CitationAgent(model=extra_args["excellent_model"], DEBUG=args.debug)
    raw_citation = cite_agent.cite_section(
        section_topic=section_topic,
        chunk_list=chunk_list,
        DEBUG=args.debug,
        enable_citation_formatting=False,
    )
    if "Request timed out" in raw_citation:
        cite_agent = CitationAgent(model=model, DEBUG=args.debug)
        raw_citation = cite_agent.cite_section(
            section_topic=section_topic,
            chunk_list=chunk_list,
            DEBUG=args.debug,
            enable_citation_formatting=False,
        )
    raw_citation = raw_citation.replace("```", "")
    if args.debug:
        print(">> raw_citation:")
        print("-"*100+"\n"+"-"*100+"\n"+"-"*100+"\n")
        print(raw_citation)
        print("-"*100+"\n"+"-"*100+"\n"+"-"*100+"\n")
    return section, raw_citation, chunk_list
        
    

if __name__=="__main__":
    main()