import os
import flask
from gpt4all import GPT4All
from flask import Flask, request
from conf import config
import utils.utils as utils
import model.model as model

app = Flask(__name__)


@app.route(config.root_path + '/set_model', methods=['POST'])
def set_model():
    """
    request: JSON {"model":"orca-mini-3b.ggmlv3.q4_0.bin"}
    :return: ok
    """
    quest_str = request.get_json().get('model')
    config.model_name = quest_str
    model.model_ins = GPT4All(model_name=config.model_name, model_path=config.model_path, allow_download=False)
    return {"message": 'ok', "model": model.model_ins.config}


@app.route(config.root_path + '/get_model', methods=['GET'])
def get_model():
    """
    :return: model name collection
    """

    models = []
    for root, dirs, files in os.walk(config.model_path):
        for file in files:
            models.append(
                {"file_name": file, "size": utils.format_file_size(os.path.getsize(config.model_path + '/' + file))})
    return {"models": models, "current": config.model_name}


@app.route(config.root_path + '/quest', methods=['POST'])
def quest():
    """
    ask the model question, max_token is response limit
    request body: JSON {"quest":"who are you"}
    :return: STR "answer"
    """
    quest_str = request.get_json().get('quest')
    max_tokens = request.get_json().get('max_tokens') or 200
    streaming = bool(request.get_json().get('streaming'))
    if streaming:
        def gen():
            for token in model.model_ins.generate(quest_str, max_tokens, streaming=True):
                yield f'data: {token}\n\n'
            yield 'event: finish\ndata: finish.\n\n'

        return flask.Response(gen(), mimetype="text/event-stream")
    else:
        return model.model_ins.generate(prompt=quest_str, max_tokens=max_tokens, streaming=False)


if __name__ == '__main__':
    app.debug = True
    app.run()
