import os
import json
import openai
import configs

llm_conf = configs.LLM()

openai.base_url = llm_conf.openai_base_url
openai.api_key = llm_conf.openai_api_key

def read_txt_file(path):
    with open(path, 'r') as f:
        content = f.read()
    return content

def read_json_file(path):
    with open(path, 'r') as f:
        data = json.load(f)
    return data

def save_output(output, save_dir, data_id):
    with open(save_dir + data_id + ".txt", 'w') as f:
        f.write(output)
    f.close()

def get_subfile(path):
    subfiles = [d for d in os.listdir(path) if os.path.isfile(os.path.join(path, d))]
    return subfiles

def run_review(mmd_file_path):
    system_prompt_dict = read_json_file(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'template.json'))
    instruction = system_prompt_dict['instruction_e']
    paper = read_txt_file(mmd_file_path)
    idx = paper.find('## References')
    paper = paper[:idx].strip()
    
    messages = [
        {"role": "system", "content": instruction},
        {'role': 'user', 'content': paper},
    ]

    response = openai.chat.completions.create(messages=messages,
                                              model=llm_conf.model,
                                              stream=True,
                                              temperature=0.0,
                                              max_tokens=8192)
    return response

if __name__ == '__main__':
    ans = run_review('_data/test.mmd')
    buff = ''
    for chunk in ans:
        if chunk and chunk.choices:
            msg = chunk.choices[0].delta
            if msg.content:
                buff += msg.content
                if len(buff) > 20:
                    print(buff, end='')
                    buff = ''
