from flask import Flask, request, jsonify, render_template
import subprocess
import argparse

app = Flask(__name__)

LLAMA_CLI_PATH = None
MODEL_PATH = None

@app.route('/<name>')
def index(name):
    return render_template('index.html', name=name)

@app.route('/generate', methods=['POST'])
def generate():
    print("in generate")
    number_cha = 512
    prompt = request.json['prompt']
    command = [
        LLAMA_CLI_PATH,
        "-m", MODEL_PATH,
        "-p", prompt, 
        "-n", str(number_cha),
    ]
    print(command)
    result = subprocess.run(command, capture_output=True, text=True)
    print("finish result")
    return jsonify({"response": result.stdout})

def parse_arguments():
    parser = argparse.ArgumentParser(description='Flask app with llama-cli path as argument')
    parser.add_argument('--llama-cli-path', type=str, required=True, help='Path to llama-cli executable')
    parser.add_argument('--model-path', type=str, required=True, help='Path to llm model')
    # 此处仿照上面一行添加模型路径
    return parser.parse_args()   #返回解析的命令行参数

if __name__ == '__main__':
    args = parse_arguments()
    print(["parse_arguments=", args])
    LLAMA_CLI_PATH = args.llama_cli_path
    MODEL_PATH = args.model_path

    app.run(host='0.0.0.0', port=5000)