import tensorflow as tf
from tensorflow import keras
import numpy as np

import h5py
import tf_utils

class Load_data:
    def __init__(self):
        self.train_x, self.train_y, self.test_x, self.test_y, self.classes = tf_utils.load_dataset()
        # print(train_x.shape,train_y.shape,test_x.shape,test_y.shape)
        # (1080, 64, 64, 3) (1, 1080) (120, 64, 64, 3) (1, 120)
        self.train_x = self.train_x.astype(np.float32)/255.0
        self.test_x = self.test_x.astype(np.float32)/255.0
        self.train_y, self.test_y = self.train_y[0].astype(np.int32), self.test_y[0].astype(np.int32)
        self.train_num, self.test_num = self.train_x.shape[0], self.test_x.shape[0]

    def get_mini_batch(self, batch_size):
        index = np.random.randint(0,self.train_num, batch_size)
        return self.train_x[index,:],self.train_y[index]


class MyModel(keras.Model):
    def __init__(self):
        super().__init__()
        self.layer1 = keras.layers.Flatten()
        # self.layer2 = keras.layers.Dense(
        #     units=25,
        #     activation=tf.nn.relu
        # )
        self.layer3 = keras.layers.Dense(
            units=100,
            activation=tf.nn.relu
        )
        self.layer4 = keras.layers.Dense(
            units=6,
            activation=None
        )

    def call(self, inputs):
        x = self.layer1(inputs)
        # x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        output = tf.nn.softmax(x)
        return output




if __name__  == '__main__':
    epochs = 2000
    learning_rate = 0.00008
    batch_size = 64

    data = Load_data()
    model = MyModel()
    optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
    # print(data.test_x[0])
    # 训练模型
    cycle_times = int(data.train_num//batch_size * epochs)
    for i in range(cycle_times):
        X, y = data.get_mini_batch(batch_size)
        with tf.GradientTape() as tape:
            y_pred = model(X)
            loss_function = keras.losses.sparse_categorical_crossentropy(y_true=y, y_pred=y_pred)
            loss = tf.reduce_mean(loss_function)
        grads = tape.gradient(loss, model.variables)
        optimizer.apply_gradients(grads_and_vars=zip(grads,model.variables))
        if i%200 == 0:
            print('第{0}次更新，此时loss={1:.5f}'.format(i,loss))


    # 测试模型准确度
    Sparse_categorical_accuracy = keras.metrics.SparseCategoricalAccuracy()
    test_cycle_times = int(data.test_num//batch_size)
    for i in range(test_cycle_times):
        start_index, end_index = i*batch_size, (i+1)*batch_size
        test_y_pred = model(data.test_x[start_index:end_index])
        Sparse_categorical_accuracy.update_state(y_true=data.test_y[start_index:end_index],y_pred=test_y_pred)
    print('准确度：',Sparse_categorical_accuracy.result())
    # print(model(data.test_x[0:batch_size]),data.test_y[0:batch_size])
    model.save('trained_model',save_format='tf')
    print('模型已保存。')