import sys
import os

# 获取当前脚本所在的目录
current_dir = os.path.dirname(os.path.abspath(__file__))

# 将包含 knowledge_storm 目录的父目录添加到 sys.path
sys.path.insert(0, os.path.abspath(os.path.join(current_dir, '../')))

# 导入 knowledge_storm 模块
try:
    import knowledge_storm
    from knowledge_storm import STORMWikiRunnerArguments, STORMWikiRunner, STORMWikiLMConfigs
except ImportError as e:
    print(f"Error importing knowledge_storm: {e}")

from argparse import ArgumentParser
from knowledge_storm import STORMWikiRunnerArguments, STORMWikiRunner, STORMWikiLMConfigs
from knowledge_storm.lm import OpenAIModel
from knowledge_storm.rm import PaperRM
from knowledge_storm.utils import load_api_key

def main(args):
    load_api_key(toml_file_path='secrets.toml')
    
    lm_configs = STORMWikiLMConfigs()

    # use openai model

    openai_kwargs = {
        'api_key': os.getenv("OPENAI_API_KEY"),
        'temperature': 1.0,
        'top_p': 0.9,
    }

    #apikey检查
    print(f"API Key in openai_kwargs: {openai_kwargs['api_key']}")

    ModelClass = OpenAIModel

    model_name = os.getenv("MODEL_NAME")

    # STORM is a LM system so different components can be powered by different models.
    # For a good balance between cost and quality, you can choose a cheaper/faster model for conv_simulator_lm
    # which is used to split queries, synthesize answers in the conversation. We recommend using stronger models
    # for outline_gen_lm which is responsible for organizing the collected information, and article_gen_lm
    # which is responsible for generating sections with citations.
    conv_simulator_lm = ModelClass(model=model_name, max_tokens=500, **openai_kwargs)
    question_asker_lm = ModelClass(model=model_name, max_tokens=500, **openai_kwargs)
    outline_gen_lm = ModelClass(model=model_name, max_tokens=500, **openai_kwargs)
    article_gen_lm = ModelClass(model=model_name, max_tokens=500, **openai_kwargs)
    article_polish_lm = ModelClass(model=model_name, max_tokens=500, **openai_kwargs)

    lm_configs.set_conv_simulator_lm(conv_simulator_lm)
    lm_configs.set_question_asker_lm(question_asker_lm)
    lm_configs.set_outline_gen_lm(outline_gen_lm)
    lm_configs.set_article_gen_lm(article_gen_lm)
    lm_configs.set_article_polish_lm(article_polish_lm)

    engine_args = STORMWikiRunnerArguments(
        output_dir=args.output_dir,
        max_conv_turn=args.max_conv_turn,
        max_perspective=args.max_perspective,
        search_top_k=args.search_top_k,
        retrieve_top_k=args.retrieve_top_k,
        max_thread_num=args.max_thread_num,
    )

    # STORM is a knowledge curation system which consumes information from the retrieval module.
    # Currently, the information source is the Internet and we use search engine API as the retrieval module.

    rm = PaperRM(k=20,default_top_k=30)
    runner = STORMWikiRunner(engine_args, lm_configs, rm)

    topic = parser.parse_args().topic
    print(f"Topic: {topic}")
    runner.run(
        topic=topic,
        do_research=args.do_research,
        do_generate_outline=args.do_generate_outline,
        do_generate_article=args.do_generate_article,
        do_polish_article=args.do_polish_article,
    )
    runner.post_run()
    runner.summary()

if __name__ == '__main__':
    parser = ArgumentParser()
    # global arguments
    parser.add_argument('--topic', type=str, default='Deep learning',
                        help='The topic to research and generate an article.')
    parser.add_argument('--output-dir', type=str, default='./results',
                        help='Directory to store the outputs.')
    parser.add_argument('--max-thread-num', type=int, default=30,
                        help='Maximum number of threads to use. The information seeking part and the article generation'
                             'part can speed up by using multiple threads. Consider reducing it if keep getting '
                             '"Exceed rate limit" error when calling LM API.')
    # stage of the pipeline
    parser.add_argument('--do-research', action='store_true', default=True,
                        help='If True, simulate conversation to research the topic; otherwise, load the results.')
    parser.add_argument('--do-generate-outline', action='store_true', default=True,
                        help='If True, generate an outline for the topic; otherwise, load the results.')
    parser.add_argument('--do-generate-article', action='store_true', default=True,
                        help='If True, generate an article for the topic; otherwise, load the results.')
    parser.add_argument('--do-polish-article', action='store_true', default=True,
                        help='If True, polish the article by adding a summarization section and (optionally) removing '
                             'duplicate content.')
    # hyperparameters for the pre-writing stage
    parser.add_argument('--max-conv-turn', type=int, default=3,
                        help='Maximum number of questions in conversational question asking.')
    parser.add_argument('--max-perspective', type=int, default=3,
                        help='Maximum number of perspectives to consider in perspective-guided question asking.')
    parser.add_argument('--search-top-k', type=int, default=5,
                        help='Top k search results to consider for each search query.')
    # hyperparameters for the writing stage
    parser.add_argument('--retrieve-top-k', type=int, default=10,
                        help='Top k collected references for each section title.')
    parser.add_argument('--remove-duplicate', action='store_true',
                        help='If True, remove duplicate content from the article.')

    main(parser.parse_args())