from flask import render_template, request
from apps.Llama3 import llama3_dp
from commonfunc import add_catelog, get_catelog_list, get_catelog_list_func, add_catelog_func, add_code_analysis, add_code_exec_analysis, get_catelog_code_analysis_list, get_catelog_code_exec_analysis_list
from ssh_func import parse_content, scp_download
import uuid
import json
import openai
import time

import subprocess, os

openai.api_key = "sk-ODEQhf2Ug20Ett417d45724e6a3748E78b6b733a332d8979"
openai.api_base = 'https://api.xiaoai.plus/v1'

def llama3_answer(prompt_input):

    prompt_input = [[
        {"role": "user", "content": prompt_input}
        ]]

    code_file_path_tmp = os.path.join(os.path.dirname(__file__), str(uuid.uuid4()) + ".txt")
    code_file_path = os.path.join(os.path.dirname(__file__), str(uuid.uuid4()) + ".txt")
    
    model_dir = '/home/gh/meta-llama/llama3/llama3'
    model_dir = '/media/gh/meta-llama/llama3/llama3' # 20241112 调试星座代码,把原来的目录删除,移动到新的路径下
    # 定义要执行的命令
    command = [
        'torchrun', '--nproc_per_node', '1',
        '%s/example_chat_completion_v2.py' % model_dir,
        '--ckpt_dir', '%s/Meta-Llama-3-8B/' % model_dir,
        '--tokenizer_path', '%s/Meta-Llama-3-8B/tokenizer.model' % model_dir,
        '--max_seq_len', '8192',
        '--max_batch_size', '6',
        '--prompt', '%s' % json.dumps(prompt_input), # 此处必须用字符串形式
        '--code_file_path', code_file_path,
        '--code_file_path_tmp', code_file_path_tmp
    ]

    # 设置环境变量
    env = os.environ.copy()
    env['CUDA_VISIBLE_DEVICES'] = '3'

    # 执行命令
    result = subprocess.run(command, env=env, capture_output=True, text=True)
    # 打印输出和错误信息
    print('stdout:', result.stdout)
    print('stderr:', result.stderr)
    # print(result)
    with open(code_file_path, "r") as rf:
        answer = rf.read()
    return answer

def gpt_answer(prompt_input, model_type):
    # prompt_input = [
    #     {"role": "user", "content": prompt_input}
    #     ]
    try:
        response = openai.ChatCompletion.create(
            model=model_type,
            messages=prompt_input,
            max_tokens=3000
        )
        reply = response["choices"][0]["message"]["content"]
    except:
        reply = "openai 返回值出错"
    
    return reply

@llama3_dp.route('/funcgenerate', methods=['GET'])
def func_generate():
    
    app_uuid = request.args.get('uuid')
    local_metaapp_dir_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "metaappfunc/%s"%app_uuid)
    try:
        os.makedirs(local_metaapp_dir_path)
    except:
        pass # 目录已经存在
    # 从远程服务器上下载 metaapp 相关的 JSON 文件
    SSH_TARGET_PATH = '/media/truthsource/metaapp/' #末尾的反斜杠不能丢
    target_json_folder_location = os.path.join(SSH_TARGET_PATH, '%s/function/func.txt'%app_uuid) # metaapp 对应的文件夹路径
    func_file_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "metaappfunc", app_uuid, "func.txt")
    scp_download(target_json_folder_location, func_file_path)
    if os.path.exists(func_file_path):
        with open(func_file_path, 'r') as file:
            # 读取文件中的函数名称
            functions = file.read().splitlines()
    return render_template('multifunc.html', functions=functions, app_uuid=app_uuid)

@llama3_dp.route('/code/execanalysis', methods=['POST', 'GET'])
def code_exec_analyze():
    if request.method == "POST":
        modeltype = request.form.get("modeltype")
        code = request.form.get("code")
        app_uuid = request.form.get("app_uuid")

        catelog_info_user = dict(
                    role = "User",
                    uuid = app_uuid,
                    content = code,
                )
        add_code_exec_analysis(catelog_info_user)

        prompt_input_new = f"请对代码执行结果{code}进行分析，并给出详细的解决方案"

        if modeltype == "Llama3-8B":
            answer = llama3_answer(prompt_input_new)
        elif modeltype == "ChatGPT-3.5":
            prompt_input_new = [{"role": "user", "content": prompt_input_new}]
            answer = gpt_answer(prompt_input_new, "gpt-3.5-turbo")
        
        answer = answer.replace("```", "")
        catelog_info_LLM = dict(
                    role = "LLM",
                    uuid = app_uuid,
                    content = answer,
                )
        add_code_exec_analysis(catelog_info_LLM)
        
    else:
        code = ""
        answer = ""
        app_uuid = request.args.get('uuid')

    catelog_list = get_catelog_code_exec_analysis_list(app_uuid)
    return render_template('codeexec.html', answer=answer, code=code, app_uuid=app_uuid, catelog_list=catelog_list)

@llama3_dp.route('/code/analyze', methods=['POST', 'GET'])
def code_analyze():
    if request.method == "POST":
        modeltype = request.form.get("modeltype")
        code = request.form.get("code")
        analysis_type = request.form.get("analysis_type")
        app_uuid = request.form.get("app_uuid")

        catelog_info_user = dict(
                    role = "User",
                    uuid = app_uuid,
                    content = code,
                )
        add_code_analysis(catelog_info_user)

        prompt_input_new = f"请对代码{code}进行{analysis_type}分析"

        if modeltype == "Llama3-8B":
            answer = llama3_answer(prompt_input_new)
        elif modeltype == "ChatGPT-3.5":
            prompt_input_new = [{"role": "user", "content": prompt_input_new}]
            answer = gpt_answer(prompt_input_new, "gpt-3.5-turbo")
        
        answer = answer.replace("```", "")
        catelog_info_LLM = dict(
                    role = "LLM",
                    uuid = app_uuid,
                    content = answer,
                )
        add_code_analysis(catelog_info_LLM)
        
    else:
        code = ""
        answer = ""
        app_uuid = request.args.get('uuid')

    catelog_list = get_catelog_code_analysis_list(app_uuid)
    return render_template('codeanaly.html', answer=answer, code=code, app_uuid=app_uuid, catelog_list=catelog_list)

@llama3_dp.route('/appgenerate', methods=['POST', 'GET'])
def app_generate():
    if request.method == "POST":
        modeltype = request.form.get("modeltype")
        prompt_input = request.form.get("prompt")
        app_uuid = request.form.get("app_uuid")
        code_type = "C++"

        if prompt_input == "是":
            catelog_info_user = dict(
                role = "User",
                uuid = app_uuid,
                content = prompt_input
            )
            add_catelog(catelog_info_user)
            answer = "代码生成完毕, 谢谢使用"
            time.sleep(1)
            catelog_info_LLM = dict(
                role = "LLM",
                uuid = app_uuid,
                content = answer
            )
            add_catelog(catelog_info_LLM)
        else:
            catelog_info_LLM = dict(
                    role = "User",
                    uuid = app_uuid,
                    content = prompt_input
                )
            add_catelog(catelog_info_LLM)

            json_of_input = json.loads(prompt_input)
            prompt_1 = f'''用{code_type}语言生成一个可执行代码，软件功能为:{json_of_input['description']}.主要包括:{';'.join(json_of_input['function'])}.功能拆分为子函数,
                不同函数之间通过星号进行分割,对于生成细节不明确的函数,只用在函数主体进行中文功能描述,
                不用给出具体实现细节,使用UNCERTAIN_前缀方式来命名生成细节不明确的的子函数名称,命名格式为UNCERTAIN_函数名称,只用返回可执行代码即可,不用返回其他信息.'''
            code_prompt = [{"role": "user", "content": prompt_1}]
            reply = gpt_answer(code_prompt, "gpt-4")
            code_prompt.append({"role": "assistant", "content": reply})
            catelog_info_LLM = dict(
                    role = "LLM",
                    uuid = app_uuid,
                    content = reply
                )
            add_catelog(catelog_info_LLM)
            prompt_2 = "请将使用UNCERTAIN_前缀方式来命名的函数名称以及函数的功能描述信息提取出来，以JSON格式返回，key为函数名称，value为函数功能描述信息，代码信息如下：" + reply
            code_prompt.append({"role": "user", "content": prompt_2})
            reply = gpt_answer(code_prompt, "gpt-4")
            code_prompt.append({"role": "assistant", "content": reply})
            catelog_info_LLM = dict(
                    role = "LLM",
                    uuid = app_uuid,
                    content = "下边是APP中生成细节不明确的函数,现在开始查询文档说明.\n" + reply
                )
            add_catelog(catelog_info_LLM)
            json_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "Json/文档5.json")
            with open(json_path, 'r') as rf:
                json_of_doc = json.load(rf)
            prompt_3 = f"下边是该软件功能的数据库相关功能的参考文档，请根据数据库参考文档，更新代码中关于数据表的相关操作代码，使其功能更明确。参考文档为:{json_of_doc['refer']}"
            code_prompt.append({"role": "user", "content": prompt_3})
            reply = gpt_answer(code_prompt, "gpt-4")
            code_prompt.append({"role": "assistant", "content": reply})
            catelog_info_LLM = dict(
                    role = "LLM",
                    uuid = app_uuid,
                    content = "查询文档结束，最终代码如下所示：\n"+reply
                )
            add_catelog(catelog_info_LLM)
            catelog_info_LLM = dict(
                    role = "LLM",
                    uuid = app_uuid,
                    content = "请问该程序是否符合要求？请回答是或否。"
                )
            add_catelog(catelog_info_LLM)

        answer = ''
    else:
        answer = ""
        prompt_input = ""
        app_uuid = request.args.get('uuid')

        if not get_catelog_list(app_uuid):
            json_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "Json/metaappmodel.json")
            with open(json_path, 'r') as rf:
                answer = "JSON模版为:\n" + rf.read() + "\n 补全后，请返回json文件"
            catelog_info_LLM = dict(
                role = "LLM",
                uuid = app_uuid,
                content = answer
            )
            add_catelog(catelog_info_LLM)

    catelog_list = get_catelog_list(app_uuid)
    return render_template('app_index.html', answer=answer, prompt=prompt_input, app_uuid=app_uuid, catelog_list=catelog_list)

@llama3_dp.route('/generate', methods=['POST', 'GET'])
def llama3():
    if request.method == "POST":
        modeltype = request.form.get("modeltype")
        prompt_input = request.form.get("prompt")
        app_uuid = request.form.get("app_uuid")
        funcname = request.form.get("funcname")
        gen_code_only = request.form.get("code_only")
        # request_json = request.get_json()
        request_json = dict()
        print('request_json.get("code_only") =', request_json.get("code_only"))
        if request_json and request_json.get("code_only") == "true":
            if request_json.get("modeltype") == "Llama3-8B":
                answer = llama3_answer(request_json.get("prompt"))
            elif request_json.get("modeltype") == "ChatGPT-3.5":
                prompt_input = [{"role": "user", "content": request_json.get("prompt")}]
                answer = gpt_answer(prompt_input, "gpt-3.5-turbo")
            return json.dumps({"answer": answer})
        elif prompt_input == "否":
            if funcname:
                catelog_info_user = dict(
                    role = "User",
                    uuid = app_uuid,
                    content = prompt_input,
                    func_name = funcname
                )
                add_catelog_func(catelog_info_user)
            else:
                catelog_info_user = dict(
                    role = "User",
                    uuid = app_uuid,
                    content = prompt_input
                )
                add_catelog(catelog_info_user)
            json_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "Json/metaapp.json")
            with open(json_path, 'r') as rf:
                answer = "您好，请填写更加详细函数生成描述，请将下面json文件内容（未知的需求）替换为具体生成函数所需内容即可:\n" + rf.read()
            # answer = str(json.loads(json_path))
            time.sleep(1)
            if funcname:
                catelog_info_LLM = dict(
                    role = "LLM",
                    uuid = app_uuid,
                    content = answer,
                    func_name = funcname
                )
                add_catelog_func(catelog_info_LLM)
            else:
                catelog_info_LLM = dict(
                    role = "LLM",
                    uuid = app_uuid,
                    content = answer
                )
                add_catelog(catelog_info_LLM)
        elif prompt_input == "请返回JSON模版":
            if funcname:
                catelog_info_user = dict(
                    role = "User",
                    uuid = app_uuid,
                    content = prompt_input,
                    func_name = funcname
                )
                add_catelog_func(catelog_info_user)
            else:
                catelog_info_user = dict(
                    role = "User",
                    uuid = app_uuid,
                    content = prompt_input
                )
                add_catelog(catelog_info_user)
            json_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "Json/metaapp.json")
            with open(json_path, 'r') as rf:
                answer = "JSON模版为:\n" + rf.read() + "\n 补全后，请返回json文件"
            # answer = str(json.loads(json_path))
            time.sleep(1)
            if funcname:
                catelog_info_LLM = dict(
                    role = "LLM",
                    uuid = app_uuid,
                    content = answer,
                    func_name = funcname
                )
                add_catelog_func(catelog_info_LLM)
            else:
                catelog_info_LLM = dict(
                    role = "LLM",
                    uuid = app_uuid,
                    content = answer
                )
                add_catelog(catelog_info_LLM)
        elif prompt_input == "是":
            if funcname:
                catelog_info_user = dict(
                    role = "User",
                    uuid = app_uuid,
                    content = prompt_input,
                    func_name = funcname
                )
                add_catelog_func(catelog_info_user)
                answer = "代码生成完毕, 谢谢使用"
                time.sleep(1)
                catelog_info_LLM = dict(
                    role = "LLM",
                    uuid = app_uuid,
                    content = answer,
                    func_name = funcname
                )
                add_catelog_func(catelog_info_LLM)
            else:
                catelog_info_user = dict(
                    role = "User",
                    uuid = app_uuid,
                    content = prompt_input
                )
                add_catelog(catelog_info_user)
                answer = "代码生成完毕, 谢谢使用"
                time.sleep(1)
                catelog_info_LLM = dict(
                    role = "LLM",
                    uuid = app_uuid,
                    content = answer
                )
                add_catelog(catelog_info_LLM)
        else:
            prompt_input_new = "请根据函数JSON定义，直接生成可执行代码，只用返回可执行代码，不用返回其他任何提示信息，JSON定义为：" + prompt_input
            if modeltype == "Llama3-8B":
                answer = llama3_answer(prompt_input_new)
            elif modeltype == "ChatGPT-3.5":
                prompt_input_new = [{"role": "user", "content": prompt_input_new}]
                answer = gpt_answer(prompt_input_new, "gpt-3.5-turbo")
            
            answer = answer.replace("```", "")
            if funcname:
                catelog_info_user = dict(
                    role = "User",
                    uuid = app_uuid,
                    content = prompt_input,
                    func_name = funcname
                )
                add_catelog_func(catelog_info_user)
            else:
                catelog_info_user = dict(
                    role = "User",
                    uuid = app_uuid,
                    content = prompt_input
                )
                add_catelog(catelog_info_user)
            time.sleep(1)
            
            code_file_path_target = parse_content(app_uuid, prompt_input, answer)
            # if prompt_input.startswith("LLM"):
            answer = "您好，已经按照您的要求生成了相应程序，程序文件路径为：{code_file_path}，请问该程序是否符合要求？请回答是或否。\n".format(code_file_path=code_file_path_target)
            if funcname:
                catelog_info_LLM = dict(
                    role = "LLM",
                    uuid = app_uuid,
                    content = answer,
                    func_name = funcname
                )
                add_catelog_func(catelog_info_LLM)
            else:
                catelog_info_LLM = dict(
                    role = "LLM",
                    uuid = app_uuid,
                    content = answer
                )
                add_catelog(catelog_info_LLM)
        if funcname:
            catelog_list = get_catelog_list_func(app_uuid, funcname)
        else:
            catelog_list = get_catelog_list(app_uuid)
    else:
        answer = ""
        prompt_input = ""
        app_uuid = request.args.get('uuid')
        funcname = request.args.get('funcname', '')
        if funcname:
            if not get_catelog_list_func(app_uuid, funcname):
                json_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "Json/metaappfunc.json")
                with open(json_path, 'r') as rf:
                    json_data = json.load(rf)
                    json_data['函数名称'] = funcname
                    json_string = json.dumps(json_data, indent=4, ensure_ascii=False)
                    answer = f"下面开展函数[{funcname}]]的生成工作:\n" + json_string + "\n 请将上述JSON内容补全后，请返回json文件"
                    catelog_info_LLM = dict(
                        role = "LLM",
                        uuid = app_uuid,
                        content = answer,
                        func_name = funcname
                    )
                    add_catelog_func(catelog_info_LLM)
            catelog_list = get_catelog_list_func(app_uuid, funcname)
        else:
            if not get_catelog_list(app_uuid):
                json_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "Json/metaappfunc.json")
                with open(json_path, 'r') as rf:
                    answer = f"下面开展函数的生成工作:\n" + rf.read() + "\n 请将上述JSON内容补全后，请返回json文件"
                    catelog_info_LLM = dict(
                        role = "LLM",
                        uuid = app_uuid,
                        content = answer,
                    )
                    add_catelog(catelog_info_LLM)
            catelog_list = get_catelog_list(app_uuid)

    return render_template('index.html', answer=answer, prompt=prompt_input, app_uuid=app_uuid, catelog_list=catelog_list, funcname=funcname)