from flask import render_template, request
from apps.LLM import llm_dp
from commonfunc import add_catelog, get_catelog_list, get_catelog_list_func, add_catelog_func
import uuid
import json
import openai
import time

import subprocess, os

openai.api_key = "sk-ODEQhf2Ug20Ett417d45724e6a3748E78b6b733a332d8979"
openai.api_base = 'https://api.xiaoai.plus/v1'

def llama3_answer(prompt_input):

    prompt_input = [[
        {"role": "user", "content": prompt_input}
        ]]

    code_file_path_tmp = os.path.join(os.path.dirname(__file__), str(uuid.uuid4()) + ".txt")
    code_file_path = os.path.join(os.path.dirname(__file__), str(uuid.uuid4()) + ".txt")
    
    model_dir = '/home/gh/meta-llama/llama3/llama3'
    # 定义要执行的命令
    command = [
        'torchrun', '--nproc_per_node', '1',
        '%s/example_chat_completion_v2.py' % model_dir,
        '--ckpt_dir', '%s/Meta-Llama-3-8B/' % model_dir,
        '--tokenizer_path', '%s/Meta-Llama-3-8B/tokenizer.model' % model_dir,
        '--max_seq_len', '8192',
        '--max_batch_size', '6',
        '--prompt', '%s' % json.dumps(prompt_input), # 此处必须用字符串形式
        '--code_file_path', code_file_path,
        '--code_file_path_tmp', code_file_path_tmp
    ]

    # 设置环境变量
    env = os.environ.copy()
    env['CUDA_VISIBLE_DEVICES'] = '3'

    # 执行命令
    result = subprocess.run(command, env=env, capture_output=True, text=True)
    # 打印输出和错误信息
    with open(code_file_path, "r") as rf:
        answer = rf.read()
    return answer

def gpt_answer(prompt_input, model_type):
    # prompt_input = [
    #     {"role": "user", "content": prompt_input}
    #     ]
    try:
        response = openai.ChatCompletion.create(
            model=model_type,
            messages=prompt_input,
            max_tokens=3000
        )
        reply = response["choices"][0]["message"]["content"]
    except:
        reply = "openai 返回值出错"
    
    return reply

@llm_dp.route('/generate', methods=['POST', 'GET'])
def code_exec_analyze():
    if request.method == "POST":
        request_json = request.get_json()
        prompt_input = request_json.get("prompt")
        modeltype = request_json.get("modeltype")
        if modeltype == "Llama3-8B":
            answer = llama3_answer(prompt_input)
        elif modeltype == "ChatGPT-3.5":
            prompt_input = [{"role": "user", "content": prompt_input}]
            answer = gpt_answer(prompt_input, "gpt-3.5-turbo")
        return json.dumps({"answer": answer})