from sys import argv
from numpy import float32,squeeze,expand_dims
from json import load
from io import StringIO

from copy import deepcopy
from onnxruntime import InferenceSession
from flask import Flask, request
from pandas import read_csv

PORT = 5555
CHECK_POINTS = './checkpoints/'

if len(argv) > 1 and isinstance(argv[1], int):
    PORT = argv[1]

app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
    SECRET_KEY='model service',
)

# 加载模型
def load_model() -> tuple[dict[str:InferenceSession],dict[str:str]]:
    MODEL_INFER={}
    MODEL_INFO={}
    with open(f'{CHECK_POINTS}models.json') as jsonf:
        services = load(jsonf)
        for service in services:
            if 'input_shape' not in service.keys() or\
                  'output_shape' not in service.keys() or\
                    'onnx' not in service.keys() or\
                        'columns' not in service.keys():
                print("warning:%s with wrong formate",service['service_type'])
                continue
            MODEL_INFER[service['service_type']] = dict()
            model_names = []
            for model_name, model_file in service['onnx'].items():
                try:
                    MODEL_INFER[service['service_type']][model_name] = InferenceSession(CHECK_POINTS + model_file)
                    model_names.append(model_name)
                except Exception:
                    print('fail to load model:%s'%model_name)
            
            del service['onnx']
            service['models'] = model_names.copy()
            MODEL_INFO[service['service_type']]=service
    return MODEL_INFER,MODEL_INFO

MODEL_INFER,MODEL_INFO = load_model()
app.config.from_mapping(MODEL_INFER=MODEL_INFER)
app.config.from_mapping(MODEL_INFO=MODEL_INFO)

# 以列表的方式返回模型
def get_models() -> list[dict]:
    ans=[]
    models = app.config['MODEL_INFO']
    for _, info in models.items():
        # info["model_name"] = name
        ans.append(info)
    return ans


# 用于返回模型信息
@app.route('/', methods = ['GET'])
def index():
    return get_models()

# 用于验证服务可用
@app.route("/ping")
def ping():
    return "available!"

# 用于更新模型列表
@app.route('/refresh')
def refresh():
    del app.config['MODEL_INFER'], app.config['MODEL_INFO']
    MODEL_INFER,MODEL_INFO = load_model()
    app.config.from_mapping(MODEL_INFER=MODEL_INFER)
    app.config.from_mapping(MODEL_INFO=MODEL_INFO)
    return get_models()

# 用于模型推理
@app.route('/<string:service_type>/<string:name>', methods = ['POST'])
def model(service_type, name):
    # 根据name获取模型信息
    if service_type not in app.config['MODEL_INFO'].keys():
        return {'err':'unknown service_type.'}
    if name not in app.config['MODEL_INFO'][service_type]['models']:
        return {'err':'unknown model name in %s.'%service_type}
    service_info = app.config['MODEL_INFO'][service_type]
    # 获取得到的数据
    data = request.form.to_dict()
    data = data['data']

    data = read_csv(StringIO(data))[service_info["columns"]].values.astype(float32)

    # 判断是否符合要求
    # 数据shape能否对上
    batch_shape = deepcopy(service_info['input_shape'])
    per_batch_num = 1
    for i in batch_shape:
        if 0 == i:
            continue
        per_batch_num *= i
    batch_shape[0] = data.size // per_batch_num
    data = data.reshape(batch_shape)
    # 准备计算
    sess = app.config['MODEL_INFER'][service_type][name]
    ans = sess.run(
        [sess.get_outputs()[0].name],
        {sess.get_inputs()[0].name:data})[0]
    ans = squeeze(ans)
    if len(ans.shape) == 0:
        ans = expand_dims(ans,0)
    return {"ans":ans.tolist()}

app.run('0.0.0.0', PORT)
