import numpy as np
import tensorflow.keras as K
import keras
from keras import models
import joblib


class Mymodel:

    def __init__(self, *args):
        if len(args) == 1:
            self.model = models.load_model(args[0])
        else:
            num_in = args[0]
            num_hidden = args[1]
            num_out = args[2]
            init = K.initializers.glorot_uniform(seed=1)
            simple_adam = K.optimizers.Adam()
            self.model = K.models.Sequential()
            self.model.add(K.layers.Dense(units=num_hidden, input_dim=num_in,
                                          kernel_initializer=init,
                                          activation='relu'))
            self.model.add(K.layers.Dense(units=num_out, kernel_initializer=init,
                                          activation='softmax'))
            self.model.compile(loss='categorical_crossentropy',
                               optimizer=simple_adam, metrics=['accuracy'])

    def train(self, train_x, train_y, lr=0.001, batch_size=32, epochs=1, shuffle=True, verbose=1):
        # tensorflow_cbk = keras.callbacks.TensorBoard(
        #     log_dir='D:\\Mycode\\python\\daqi\\model\\train\\lr=0.001,786,2020-2022,ec=200')
        simple_adam = K.optimizers.Adam(lr)
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=simple_adam, metrics=['accuracy'])
        # h = self.model.fit(train_x, train_y, batch_size=batch_size, epochs=epochs,
        #                    shuffle=shuffle, verbose=verbose, callbacks=[tensorflow_cbk])
        h = self.model.fit(train_x, train_y, batch_size=batch_size, epochs=epochs,
                           shuffle=shuffle, verbose=verbose)
        print('Training finished \n')

    def test(self, test_x, test_y, verbose=0):
        eval = self.model.evaluate(test_x, test_y, verbose=verbose)
        print("Evaluation on test data: loss = %0.6f accuracy = %0.2f%% \n"
              % (eval[0], eval[1] * 100))

    def pre(self, x):
        x = np.array(x, dtype=np.float32)
        x.shape = (1, 7)
        scaler = joblib.load('scaler.pkl')
        x = scaler.transform(x)
        res = list(self.model.predict(x)[0])
        k = ['优', '良', '轻度污染', '中度污染', '重度污染', '严重污染']
        mres = max(res)
        ki = res.index(mres)
        # print(k[ki])
        return ki

    def save(self, filename='mymodel.h5'):
        self.model.save(filename)

    def load_model(self, filename='mymodel.h5'):
        self.model = keras.models.load_model(filename)
