from flask import render_template, request
from apps.ResearchGuide import researchGuide_dp
from commonfunc import add_catelog, get_catelog_list
from ssh_func import parse_content
import uuid
import json
import openai
import time

import subprocess, os

openai.api_key = "sk-ODEQhf2Ug20Ett417d45724e6a3748E78b6b733a332d8979"
openai.api_base = 'https://api.xiaoai.plus/v1'

def llama3_answer(prompt_input):

    prompt_input = [[
        {"role": "user", "content": prompt_input}
        ]]

    code_file_path_tmp = os.path.join(os.path.dirname(__file__), str(uuid.uuid4()) + ".txt")
    code_file_path = os.path.join(os.path.dirname(__file__), str(uuid.uuid4()) + ".txt")
    
    model_dir = '/home/gh/meta-llama/llama3/llama3'
    # 定义要执行的命令
    command = [
        'torchrun', '--nproc_per_node', '1',
        '%s/example_chat_completion_v2.py' % model_dir,
        '--ckpt_dir', '%s/Meta-Llama-3-8B/' % model_dir,
        '--tokenizer_path', '%s/Meta-Llama-3-8B/tokenizer.model' % model_dir,
        '--max_seq_len', '4096',
        '--max_batch_size', '6',
        '--prompt', '%s' % json.dumps(prompt_input), # 此处必须用字符串形式
        '--code_file_path', code_file_path,
        '--code_file_path_tmp', code_file_path_tmp
    ]

    # 设置环境变量
    env = os.environ.copy()
    env['CUDA_VISIBLE_DEVICES'] = '3'

    # 执行命令
    result = subprocess.run(command, env=env, capture_output=True, text=True)
    # 打印输出和错误信息
    print('stdout:', result.stdout)
    print('stderr:', result.stderr)
    # print(result)
    with open(code_file_path, "r") as rf:
        answer = rf.read()
    return answer

def gpt_answer(prompt_input, model_type):
    prompt_input = [
        {"role": "user", "content": prompt_input}
        ]
    try:
        response = openai.ChatCompletion.create(
            model=model_type,
            messages=prompt_input,
            max_tokens=3000
        )
        reply = response["choices"][0]["message"]["content"]
    except:
        reply = "openai 返回值出错"
    
    return reply

@researchGuide_dp.route('/generate', methods=['POST', 'GET'])
def llama3():
    if request.method == "POST":
        modeltype = request.form.get("modeltype")
        prompt_input = request.form.get("prompt")
        app_uuid = request.form.get("app_uuid")
        gen_code_only = request.form.get("code_only")
        # request_json = request.get_json()
        request_json = dict()
        print('request_json.get("code_only") =', request_json.get("code_only"))
        if request_json and request_json.get("code_only") == "true":
            if request_json.get("modeltype") == "Llama3-8B":
                answer = llama3_answer(request_json.get("prompt"))
            elif request_json.get("modeltype") == "ChatGPT-3.5":
                answer = gpt_answer(request_json.get("prompt"), "gpt-3.5-turbo")
            return json.dumps({"answer": answer})
        elif prompt_input == "否":
            catelog_info_user = dict(
                role = "User",
                uuid = app_uuid,
                content = prompt_input
            )
            add_catelog(catelog_info_user)
            json_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "Json/generate_paper.json")
            with open(json_path, 'r') as rf:
                answer = "您好，请填写更加写作辅助生成描述，请将下面json文件内容（未知的需求）替换为具体论文所需内容即可:\n" + rf.read()
            # answer = str(json.loads(json_path))
            time.sleep(1)
            catelog_info_LLM = dict(
                role = "LLM",
                uuid = app_uuid,
                content = answer
            )
            add_catelog(catelog_info_LLM)
        elif prompt_input == "请返回JSON模版":
            catelog_info_user = dict(
                role = "User",
                uuid = app_uuid,
                content = prompt_input
            )
            add_catelog(catelog_info_user)
            json_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "Json/generate_paper.json")
            with open(json_path, 'r') as rf:
                answer = "JSON模版为:\n" + rf.read() + "\n 补全后，请返回json文件"
            # answer = str(json.loads(json_path))
            time.sleep(1)
            catelog_info_LLM = dict(
                role = "LLM",
                uuid = app_uuid,
                content = answer
            )
            add_catelog(catelog_info_LLM)
        elif prompt_input == "是":
            catelog_info_user = dict(
                role = "User",
                uuid = app_uuid,
                content = prompt_input
            )
            add_catelog(catelog_info_user)
            answer = "生成完毕, 谢谢使用"
            time.sleep(1)
            catelog_info_LLM = dict(
                role = "LLM",
                uuid = app_uuid,
                content = answer
            )
            add_catelog(catelog_info_LLM)
        else:
            prompt_input_new = "请根据函数JSON定义，生成符合要求的文章内容，每个段落不低于1000字，不用返回其他任何提示信息，JSON定义为：" + prompt_input
            if modeltype == "Llama3-8B":
                answer = llama3_answer(prompt_input_new)
            elif modeltype == "ChatGPT-3.5":
                answer = gpt_answer(prompt_input_new, "gpt-3.5-turbo")
            
            answer = answer.replace("```", "")
            catelog_info_user = dict(
                role = "User",
                uuid = app_uuid,
                content = prompt_input
            )
            add_catelog(catelog_info_user)
            time.sleep(1)

            local_metaapp_dir_path = os.path.join(os.path.dirname(__file__) + "/", "%s"%app_uuid)
            try:
                os.makedirs(local_metaapp_dir_path)
            except:
                pass # 目录已经存在
            file_path = local_metaapp_dir_path+ ".txt"
            # code_file_path_target = parse_content(app_uuid, prompt_input, answer)
            # if prompt_input.startswith("LLM"):
            answer = "您好，已经按照您的要求生成了文档，程序文件路径为：{file_path}，请问该程序是否符合要求？请回答是或否。\n".format(file_path=file_path) + answer
            catelog_info_LLM = dict(
                role = "LLM",
                uuid = app_uuid,
                content = answer
            )
            add_catelog(catelog_info_LLM)
    else:
        answer = ""
        prompt_input = ""
        app_uuid = request.args.get('uuid')
    catelog_list = get_catelog_list(app_uuid)
    return render_template('paper_index.html', answer=answer, prompt=prompt_input, app_uuid=app_uuid, catelog_list=catelog_list)