import tensorflow as tf
import re
import os
import json

from tools.data_gen import preprocess_img, preprocess_img_from_Url
from model.train_model import model_fn

import numpy as np

# import serial
# OPTIONAL: control usage of GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.7
sess = tf.compat.v1.Session(config=config)

h5_weights_path = './jupyterLab/output_model/best.h5'


def init_artificial_neural_network(FLAGS):
    model = model_fn(FLAGS)
    model.load_weights(h5_weights_path, by_name=True)
    return model


# 测试图片
def prediction_result_from_img(model, imgurl, FLAGS):
    # 加载分类数据
    with open("./jupyterLab/data/garbage_classify_rule.json", 'r') as load_f:
        load_dict = json.load(load_f)
    if re.match(r'^https?:/{2}\w.+$', imgurl):
        test_data = preprocess_img_from_Url(imgurl, FLAGS.input_size)
    else:
        test_data = preprocess_img(imgurl, FLAGS.input_size)
    tta_num = 5
    predictions = [0 * tta_num]
    for i in range(tta_num):
        x_test = test_data[i]
        x_test = x_test[np.newaxis, :, :, :]
        prediction = model.predict(x_test)[0]
        # print(prediction)
        predictions += prediction
    pred_label = np.argmax(predictions, axis=0)
    print('-------深度学习垃圾分类预测结果----------')
    print(pred_label)
    print(load_dict[str(pred_label)])
    print('-------深度学习垃圾分类预测结果--------')
    return load_dict[str(pred_label)]