from flask import render_template, request
from apps.Llama2 import llama2_dp

import subprocess, os

def llama2_answer(prompt_input):
    # results = llama_generator['generator'].text_completion(
    #     [prompt_input],
    #     max_gen_len=llama_generator['max_gen_len'],
    #     temperature=llama_generator['temperature'],
    #     top_p=llama_generator['top_p'],
    # )
    # for prompt, result in zip([prompt_input], results):
    #     print(prompt)
    #     print("\n**********************************\n")
    #     print(f"> {result['generation']}")
    #     print("\n==================================\n")
    
    # return prompt

    model_dir = '/media/iscas/eac0d290-ef12-427b-ae00-608cb742dfa0/Flask/LLMserver/apps/codellama'
    cmd = '''torchrun --nproc_per_node 2 %s/example_completion.py --prompt %s --ckpt_dir %s/CodeLlama-13b/ --tokenizer_path %s/CodeLlama-13b/tokenizer.model --max_seq_len 512 --max_batch_size 6'''%(model_dir, prompt_input, model_dir, model_dir)
    f = os.popen(cmd, 'r')
    # result = subprocess.run(['ls', '-la'], stdout=subprocess.PIPE)
    return f.read()

# 使用蓝图定义路由和视图，功能
@llama2_dp.route('/codellama', methods=['GET'])
def codellama():
    return "this is a codellama func"

@llama2_dp.route('/codellama-13b', methods=['POST', 'GET'])
def codellama_13b():
    if request.method == "POST":
        modeltype = request.form.get("modeltype")
        prompt_input = request.form.get("prompt")
        answer = llama2_answer(prompt_input)
    else:
        answer = ""
        prompt_input = ""
    
    return render_template('index.html', answer=answer, prompt=prompt_input)