import os
import json
import argparse
from src.agents.outline_writer import outlineWriter
from src.agents.writer import subsectionWriter
from src.agents.judge import Judge
# from src.database import database
from tqdm import tqdm
import time
import pickle
def remove_descriptions(text):
    lines = text.split('\n')
    
    filtered_lines = [line for line in lines if not line.strip().startswith("Description")]
    
    result = '\n'.join(filtered_lines)
    
    return result

def write(topic, model, section_num, subsection_len, rag_num, refinement):
    outline, outline_wo_description = write_outline(topic, model, section_num)

    if refinement:
        raw_survey, raw_survey_with_references, raw_references, refined_survey, refined_survey_with_references, refined_references = write_subsection(topic, model, outline, subsection_len = subsection_len, rag_num = rag_num, refinement = True)
        return refined_survey_with_references
    else:
        raw_survey, raw_survey_with_references, raw_references = write_subsection(topic, model, outline, subsection_len = subsection_len, rag_num = rag_num, refinement = False)
        return raw_survey_with_references

def write_outline(topic, model, section_num, outline_reference_num, db, api_key, api_url):
    outline_writer = outlineWriter(model=model, api_key=api_key, api_url = api_url, database=db)
    print(outline_writer.api_model.chat('hello'))
    # import ipdb;ipdb.set_trace()
    outline = outline_writer.draft_outline(topic, outline_reference_num, 30000, section_num)
    return outline, remove_descriptions(outline)

def write_subsection(topic, model, outline, subsection_len, rag_num, db, api_key, api_url, refinement = False):

    subsection_writer = subsectionWriter(model=model, api_key=api_key, api_url = api_url, database=db)
    if refinement:
        raw_survey, raw_survey_with_references, raw_references, refined_survey, refined_survey_with_references, refined_references = subsection_writer.write(topic, outline, subsection_len = subsection_len, rag_num = rag_num, refining = True)
        return raw_survey, raw_survey_with_references, raw_references, refined_survey, refined_survey_with_references, refined_references
    else:
        print("No refine mode.")
        raw_survey, raw_survey_with_references, raw_references = subsection_writer.write(topic, outline, subsection_len = subsection_len, rag_num = rag_num, refining = False)
        return raw_survey, raw_survey_with_references, raw_references, raw_survey, raw_survey_with_references, raw_references

def paras_args():
    parser = argparse.ArgumentParser(description='')
    parser.add_argument('--gpu',default='0', type=str, help='Specify the GPU to use')
    parser.add_argument('--saving_path',default='./output/', type=str)
    parser.add_argument('--section_num',default=5, type=int)
    parser.add_argument('--subsection_len',default=1000, type=int, help='Length of each subsection')
    parser.add_argument('--outline_reference_num',default=1500, type=int)
    parser.add_argument('--rag_num',default=40, type=int, help='Number of references to use for RAG')
    parser.add_argument('--api_url',default='https://open.bigmodel.cn/api/paas/v4/chat/completions', type=str, help='url for API request')

    parser.add_argument('--model',default='glm-4-flash', type=str, help='Model to use')
    # parser.add_argument('--model',default='glm-4-air', type=str, help='Model to use')
    parser.add_argument('--topic',default='Retrieval-augmented generation and Similarity Search', type=str, help='Topic to generate survey for')
    parser.add_argument('--token',default='xxxxxx', type=str, help='API key for the model')
    args = parser.parse_args()
    return args

def main(args):

    # db = database(db_path = args.db_path, embedding_model = args.embedding_model)
    db = None
    args.api_key = args.token

    if not os.path.exists(args.saving_path):
        os.mkdir(args.saving_path)
    if not os.path.exists("./temp"):
        os.mkdir("./temp")

    input_file = f"./temp/{args.topic}.pkl"

    try:
        with open(input_file, 'rb') as file:
            outline_with_description = pickle.load(file)
        print(f"Outline with description loaded from {input_file}")
    except FileNotFoundError:
        # 如果文件不存在，则需要重新生成
        outline_with_description, outline_wo_description = write_outline(args.topic, args.model, args.section_num, args.outline_reference_num, db, args.api_key, args.api_url)
        with open(f'{input_file}.md', 'w', encoding='utf-8') as f:
            f.write(outline_with_description)
        # 并保存到文件
        with open(input_file, 'wb') as file:
            pickle.dump(outline_with_description, file)
        print(f"Outline with description generated and saved to {input_file}")

    raw_survey, raw_survey_with_references, raw_references, refined_survey, refined_survey_with_references, refined_references = write_subsection(args.topic, args.model, outline_with_description, args.subsection_len, args.rag_num, db, args.api_key, args.api_url)

    with open(f'{args.topic}.md', 'w', encoding='utf-8') as f:
        f.write(refined_survey_with_references)
    print("生成完毕！")
    # with open(f'{args.topic}.json', 'w', encoding='utf-8') as f:
    #     save_dic = {}
    #     save_dic['survey'] = refined_survey_with_references
    #     save_dic['reference'] = refined_references
    #     f.write(json.dumps(save_dic, indent=4))

import time

if __name__ == '__main__':
    start_time = time.time()  # 记录开始时间

    args = paras_args()
    main(args)

    end_time = time.time()  # 记录结束时间
    elapsed_time = end_time - start_time  # 计算运行时间
    print(f"程序运行完成，总耗时：{elapsed_time:.2f} 秒")

# python ./main.py --topic "Retrieval-augmented generation and Similarity Search"
# python ./main.py --topic "Latest Advances and Cross-modal Fusion Strategies in Multimodal Learning"
# python ./main.py --topic "Comparative Analysis of Instruction Tuning Methods for Large Language Models"
# python ./main.py --topic "Review of the Development Trajectory of Object Detection Algorithms"