import os
import json
import argparse
from src.database import PaperDatabase
from src.agents.outline_writer import outlineWriter
from src.agents.writer import subsectionWriter
from tqdm import tqdm
import time
import tiktoken
import hashlib

def calculate_file_hash(file_path):
    hash_func = getattr(hashlib, 'md5')()

    with open(file_path, 'rb') as f:
        while chunk := f.read(4096):
            hash_func.update(chunk)
    return hash_func.hexdigest()

def count_tokens(text):
    encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
    return len(encoding.encode(text))


def paras_args():
    parser = argparse.ArgumentParser(description='')
    parser.add_argument('--gpu',default='0', type=str, help='Specify the GPU to use')
    parser.add_argument('--saving_path',default='output', type=str, help='Directory to save the output survey')
    parser.add_argument('--model',default='', type=str, help='Model to use')
    parser.add_argument('--topic',default='', type=str, help='Topic to generate survey for')
    parser.add_argument('--section_num',default=7, type=int, help='Number of sections in the outline')
    parser.add_argument('--subsection_len',default=700, type=int, help='Length of each subsection')
    parser.add_argument('--outline_reference_num',default=1500, type=int, help='Number of references for outline generation')
    parser.add_argument('--rag_num',default=60, type=int, help='Number of references to use for RAG')
    parser.add_argument('--api_url',default='', type=str, help='url for API request')
    parser.add_argument('--api_key',default='', type=str, help='API key for the model')
    parser.add_argument('--db_path',default='./database', type=str, help='Directory of the database.')
    parser.add_argument('--embedding_model',default='nomic-ai/nomic-embed-text-v1', type=str, help='Embedding model for retrieval.')
    args = parser.parse_args()
    return args

def main(args):
    '''
    # 创建 Local 数据库实例
    db = PaperDatabase(
        db_type="local",
        local_config={
            "db_path": "./database",
            "embedding_model": "./embedding_models/AI-ModelScope/nomic-embed-text-v1",
            "device": "cuda"  # 或 "cpu"
        }
    )
    '''
    # 创建 Atomgit 数据库实例
    db = PaperDatabase(
        db_type="atomgit",
        atomgit_config={
            "base_url": "http://180.184.65.98:38880/atomgit"
        }
    )

    model_name = args.model if args.model else 'xxx'
    api_key = args.api_key if args.api_key else 'xxxx'
    api_url = args.api_url if args.api_url else "xxxxxxxx"


    topic = args.topic
    print("topic is ", topic)

    saving_path= args.saving_path

    outline_writer = outlineWriter(model=model_name, api_key=api_key, api_url=api_url, database=db)
    subsection_writer = subsectionWriter(model=model_name, api_key=api_key, api_url=api_url, database=db)

    print("--- 准备写outline ---")
    print('--- 论文检索中 ---')
    outline = outline_writer.draft_outline(topic)

    print("outline",outline)
        
    raw_survey, raw_survey_with_references, raw_references, refined_survey, refined_survey_with_references, refined_references = subsection_writer.write(topic=topic, outline=outline, rag_num=30)

    if not os.path.exists(saving_path):
        os.mkdir(saving_path)

    with open(f'{saving_path}/{topic}.md', 'w') as f:
        f.write(refined_survey_with_references)
    with open(f'review.md', 'w') as f:
        f.write(refined_survey_with_references)

if __name__ == '__main__':
    start_time = time.time()

    args = paras_args()

    main(args)

    cost_time = time.time() - start_time
    with open('./review.md', 'r') as f:
        review_paper = f.read()
    paper_tokens = count_tokens(review_paper)

    print(f"Time cost :{round(cost_time/60, 2)} mintues, generated {round(paper_tokens/cost_time, 2)} tokens/s")

    print("当前文件的哈希值为：", calculate_file_hash('./review.md'))

    # python main.py --topic "Review of the Development Trajectory of Object Detection Algorithms"