from agent import QueryRewritingAgent
from agent import MultiChunkConclusionAgent
from agent import CitationAgent
from agent import GenerateOutlineAgent
from agent import Agent
import argparse, json
import time
import os
from colorama import Fore, Style
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed
from tqdm import tqdm
import hashlib


def main():
    parser = argparse.ArgumentParser(description="大模型科研工具V1.0")
    parser.add_argument('--topic', type=str, required=False, help='用户查询', default="explain to me about transformer")
    parser.add_argument('--model', type=str, required=False, help='基座模型', default="glm-4-airx")
    parser.add_argument('--excellent_model', type=str, required=False, help='优质模型', default="glm-4-airx")
    # parser.add_argument('--model', type=str, required=False, help='基座模型', default="gpt-4o-mini")
    # parser.add_argument('--excellent_model', type=str, required=False, help='优质模型', default="gpt-4o")
    parser.add_argument('--token', type=str, required=False, help='API_KEY', default="")
    parser.add_argument('--debug', type=bool, required=False, help='DEBUG模式', default=True)
    parser.add_argument('--output_path', type=str, required=False, help='输出路径', default="review.md")
    parser.add_argument('--conclusion_timeout', type=float, required=False, help='最大chunk总结等待时延', default=10)
    
    args = parser.parse_args()
    if args.token:
        os.environ["ZHIPUAI_API_KEY"] = args.token
    extra_args = {}
    extra_args["excellent_model"] = args.excellent_model
    
    model = args.model
    topic = args.topic
    
    # 开始计时
    start_time = time.time()
    time_count = {}
    
    # 查询重写，并发查询chunk
    if args.debug:
        print(f">> 用户查询: 【{Fore.GREEN}{topic}{Style.RESET_ALL}】")
        print(f"{Fore.YELLOW}>> 查询重写:{Style.RESET_ALL}")
    query_rewriting_agent = QueryRewritingAgent(model=model)
    query_list = query_rewriting_agent.rewrite(
        topic=topic,
        num_queries=16,
    )
    if args.debug:
        query_rewriting_agent.PE.print_json(query_list)
        print("-"*100)
    if args.debug:
        end_time = time.time()
        execution_time = end_time - start_time
        time_count["查询改写"] = execution_time
        print(f"{Fore.CYAN}>>>> 当前程序执行时间: {execution_time:.2f} 秒{Style.RESET_ALL}")
        print(f"{Fore.YELLOW}>> 并发查询chunk:{Style.RESET_ALL}")
    raw_chunk_list = query_rewriting_agent.retrieve(
        query_list=query_list,
        max_return_size=128
    )
    
    # if args.debug:
    #     print(f"chunk_list: (共 {len(raw_chunk_list)} 个结果)\n", json.dumps([
    #                                                                         f'{ele["id"]}: {ele["entity"]["paper_title"]}' for ele in raw_chunk_list
    #                                                                     ], indent=2, ensure_ascii=False) + "\n\n...\n")
    #     print("-"*100)
        
    
    
    # 批量生成总结
    if args.debug:
        end_time = time.time()
        execution_time = end_time - start_time
        time_count["并发查询"] = execution_time
        print(f"{Fore.CYAN}>>>> 当前程序执行时间: {execution_time:.2f} 秒{Style.RESET_ALL}")
        print(f"{Fore.YELLOW}>> 批量生成总结...{Style.RESET_ALL}")
    multi_agent = MultiChunkConclusionAgent(model=model, DEBUG=args.debug, max_retries=1)
    chunk_list = multi_agent.conclude(
        topic=topic,
        chunk_list=raw_chunk_list,
        return_type="chunk",
        timeout=args.conclusion_timeout,
        expected_return_workers=128,
    )
    
    print(len(chunk_list))
    if len(chunk_list) < 32:
        chunk_list_ids = set([chunk["id"] for chunk in chunk_list])
        for raw_chunk in raw_chunk_list:
            if raw_chunk["id"] not in chunk_list_ids:
                raw_chunk["总结"] = raw_chunk["entity"]["chunk_text"][:512]
                chunk_list.append(raw_chunk)
    
    print(len(chunk_list))
    
    # 生成大纲
    paper_list = [
        {
            "id": ele["id"],
            "title": ele["entity"]["paper_title"], 
            "总结": ele["总结"]
        } for ele in chunk_list if ("总结" in ele and ele["总结"])
    ]
    paper_list = paper_list[:64]
    print(len(paper_list))
    

    if args.debug:
        end_time = time.time()
        execution_time = end_time - start_time
        time_count["批量生成总结"] = execution_time
        print(f"{Fore.CYAN}>>>> 当前程序执行时间: {execution_time:.2f} 秒{Style.RESET_ALL}")
        print(f"{Fore.YELLOW}>> 生成大纲:{Style.RESET_ALL}")
        print(f"{Fore.GREEN}>> paper_list（候选 chunk 数量）: {len(paper_list)} {Style.RESET_ALL}")
    GO_agent = GenerateOutlineAgent(model=model)
    # GO_agent = GenerateOutlineAgent(model=extra_args["excellent_model"])
    while True:
        try:
            rough_outline = GO_agent.generate_rough_outline(topic=topic, paper_list=paper_list)
            rough_outline_title = rough_outline["title"]
            rough_outline_sections = rough_outline["sections"]
            for i in range(len(rough_outline["sections"])):
                rough_outline["sections"][i]["context info"] = {}
                if i > 0:
                    rough_outline["sections"][i]["context info"]["former section name"] = rough_outline["sections"][i - 1]["name"]
                if i < len(rough_outline["sections"]) - 1:
                    rough_outline["sections"][i]["context info"]["next section name"] = rough_outline["sections"][i + 1]["name"]
            if args.debug:
                GO_agent.PE.print_json(rough_outline)
                print("-"*100)
            break
        except Exception as e:
            print(f"{Fore.RED}[error] 生成大纲失败，重新尝试中...{Style.RESET_ALL}")
            
    
            
    total_raw_citation = {}
    total_chunk_list = []

    
    if args.debug:
        end_time = time.time()
        execution_time = end_time - start_time
        time_count["生成大纲"] = execution_time
        print(f"{Fore.CYAN}>>>> 当前程序执行时间: {execution_time:.2f} 秒{Style.RESET_ALL}")
        print(f"{Fore.YELLOW}>> 生成单元内容:{Style.RESET_ALL}")

    # 进行并发处理所有单元
    with ProcessPoolExecutor(max_workers=32) as executor:
        # 提交所有任务
        future_to_section = {executor.submit(process_section_concurrently, section, topic, rough_outline, model, args, extra_args): section for section in rough_outline["sections"]}

        for future in tqdm(as_completed(future_to_section), total=len(future_to_section), desc=f"{Fore.LIGHTGREEN_EX}Processing sections{Style.RESET_ALL}"):
            section, raw_citation, current_chunk_list = future.result()
            total_raw_citation[section["name"]] = raw_citation
            total_chunk_list.extend(current_chunk_list)
    
    citation_agent = CitationAgent(model=model)
    
    final_total_raw_citation = f'# {rough_outline["title"]}\n\n'
    
    for section in rough_outline["sections"]:
        final_total_raw_citation += f'## {section["name"]}\n\n{total_raw_citation[section["name"]]}\n\n'
    
    formatted_citation, reference_suffix = citation_agent.format_citation(raw_citation=final_total_raw_citation, chunk_list=total_chunk_list)
    final_citation = formatted_citation + "\n\n" + reference_suffix
    # print(f"{Fore.YELLOW}>> 综述生成结果:{Style.RESET_ALL}")
    # print("-"*100+"\n"+"-"*100+"\n"+"-"*100+"\n")
    # print(final_citation)
    
    # 将 final_citation 输出到文件
    with open(args.output_path, 'w', encoding='utf-8') as file:
        file.write(final_citation)
    
    # 结束计时
    end_time = time.time()
    execution_time = end_time - start_time
    time_count["并发单元生成"] = execution_time
    total_word_count = count_words(final_citation)
    current_file_hash = calculate_file_hash(args.output_path)
    
    GO_agent.PE.print_json(time_count)
    print(f"{Fore.YELLOW}>> 当前文章 MD5 码:{Style.RESET_ALL} {Fore.LIGHTCYAN_EX}{current_file_hash} {Style.RESET_ALL}")
    print(f"{Fore.YELLOW}>> 总程序执行时间: {execution_time:.2f} 秒{Style.RESET_ALL}")
    print(f"{Fore.YELLOW}>> 总生成字数: {total_word_count} 字{Style.RESET_ALL}")
    print(f"{Fore.YELLOW}>> 生成速度: {total_word_count / execution_time} 字/秒{Style.RESET_ALL}")
    

def calculate_file_hash(file_path):
    hash_func =getattr(hashlib,"md5")()
    with open(file_path,"rb") as f:
        while chunk := f.read(8192):
            hash_func.update(chunk)
        return hash_func.hexdigest()
    
def count_words(text):
    # 使用 split() 方法将字符串按空格分割成单词列表
    words = text.split()
    # 返回列表的长度，即单词的数量
    return len(words)

def process_section_concurrently(section, topic, rough_outline, model, args, extra_args):
    return process_section(section, topic, rough_outline, model, args, extra_args)
def process_section(section, topic, rough_outline, model, args, extra_args):
    if args.debug:
        print(f'>> [process_section] ... {Fore.LIGHTCYAN_EX}{section["name"]}{Style.RESET_ALL}')
        # print("-"*100+"\n"+"-"*100+"\n"+"-"*100+"\n")
        
    global_agent = Agent()
    section_topic = f'Raw intention: {topic}\nSurvey title: {rough_outline["title"]}\nCurrent section: {global_agent.PE.as_json(section)}\n\n'
    
    query_rewriting_agent = QueryRewritingAgent(model=model)
    query_list = query_rewriting_agent.rewrite(
        topic=section_topic,
        num_queries=8,
    )
    raw_chunk_list = query_rewriting_agent.retrieve(
        query_list=query_list,
        max_return_size=32
    )
    if args.debug:
        print(f"{Fore.YELLOW}>> [section] 批量生成总结...{Style.RESET_ALL}")
    
    multi_agent = MultiChunkConclusionAgent(model=model, DEBUG=args.debug, max_retries=1)
    chunk_list = multi_agent.conclude(
        topic=section_topic,
        chunk_list=raw_chunk_list,
        return_type="chunk",
        return_relevant=True,
        timeout=args.conclusion_timeout,
        expected_return_workers=28,
    )
    # for chunk in chunk_list:
    #     chunk["总结"] = chunk["entity"]["chunk_text"]
    
    if len(chunk_list) < 16:
        chunk_list_ids = set([chunk["id"] for chunk in chunk_list])
        for raw_chunk in raw_chunk_list:
            if raw_chunk["id"] not in chunk_list_ids:
                raw_chunk["总结"] = raw_chunk["entity"]["chunk_text"][:512]
                chunk_list.append(raw_chunk)
    
    if args.debug:
        print(f"{Fore.GREEN}chunk_list: (共 {len(chunk_list)} 个结果)\n {Style.RESET_ALL}")
        print("-"*100)
        print(f'{Fore.YELLOW}>> [section] 撰写文献...{Style.RESET_ALL} {Fore.LIGHTCYAN_EX}{section["name"]}{Style.RESET_ALL}')
        
    cite_agent = CitationAgent(model=extra_args["excellent_model"], DEBUG=args.debug)
    raw_citation = cite_agent.cite_section(
        section_topic=section_topic,
        chunk_list=chunk_list,
        DEBUG=args.debug,
        enable_citation_formatting=False,
    )
    if "Request timed out" in raw_citation:
        cite_agent = CitationAgent(model=model, DEBUG=args.debug)
        raw_citation = cite_agent.cite_section(
            section_topic=section_topic,
            chunk_list=chunk_list,
            DEBUG=args.debug,
            enable_citation_formatting=False,
        )
    raw_citation = raw_citation.replace("```", "")
    # if args.debug:
    #     print(">> raw_citation:")
    #     print("-"*100+"\n"+"-"*100+"\n"+"-"*100+"\n")
    #     print(raw_citation)
    #     print("-"*100+"\n"+"-"*100+"\n"+"-"*100+"\n")
    if args.debug:
        print(f'{Fore.YELLOW}>> [section] 撰写文献完毕...{Style.RESET_ALL} {Fore.LIGHTCYAN_EX}{section["name"]}{Style.RESET_ALL}')
    return section, raw_citation, chunk_list
        
    

if __name__=="__main__":
    main()