# this is the method of ModelSplit of AlexNet, you can define your own model by conferencing it.

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.optimizers import Adam
import numpy as np
import time


# 对卷积核的一个自定义
class c2(tf.keras.layers.Layer):
    def __init__(self):
        super().__init__()

    def build(self, input_shape):
        self.w = tf.random.normal([5, 5, input_shape[-1], 256])

    def call(self, inputs):
        return tf.nn.conv2d(inputs, filters=self.w, strides=1, padding=[[0, 0], [3, 3], [3, 3], [0, 0]])


class AlexnetModel():
    # your Sequential model is here.
    def __init__(self, nb_classes=1000, input_shape=(224, 224, 3)):
        self.model = keras.Sequential([
            keras.layers.InputLayer(input_shape=input_shape),
            keras.layers.Conv2D(96, 11, 5),
            keras.layers.ReLU(),
            keras.layers.MaxPooling2D((3, 3), 2),
            keras.layers.BatchNormalization(),

            c2(),
            keras.layers.ReLU(),
            keras.layers.MaxPooling2D((3, 3), 2),
            keras.layers.BatchNormalization(),

            keras.layers.Conv2D(384, 3, 1, padding='same'),
            keras.layers.ReLU(),

            keras.layers.Conv2D(384, 3, 1, padding='same'),
            keras.layers.ReLU(),

            keras.layers.Conv2D(256, 3, 1, padding='same'),
            keras.layers.ReLU(),
            keras.layers.MaxPooling2D((3, 3), 2),

            keras.layers.Flatten(),
            keras.layers.Dense(4096),
            keras.layers.ReLU(),
            keras.layers.Dropout(0.25),

            keras.layers.Dense(4096),
            keras.layers.ReLU(),
            keras.layers.Dropout(0.25),

            keras.layers.Dense(nb_classes),
            keras.layers.Softmax(),
        ])

    # train is inhrent from keras.Model
    def train(self, dataset, batch_size=32, nb_epoch=50):
        ''' train function '''
        # sgd = SGD(lr=0.01, decay=1e-6,
        #           momentum=0.9, nesterov=True)  # 采用SGD+momentum的优化器进行训练，首先生成一个优化器对象
        adam = Adam(lr=1e-4)
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=adam,
                           metrics=['accuracy'])  # 完成实际的模型配置工作

        self.model.fit(dataset.train_images,
                       dataset.train_labels,
                       steps_per_epoch=dataset.train_images.shape[0],
                       epochs=nb_epoch,
                       validation_data=(dataset.valid_images, dataset.valid_labels),
                       shuffle=True)

    def save_model(self, file_path='./model/alexnetmodel.h5'):
        self.model.save(file_path)

    def load_model(self, file_path='./model/alexnetmodel.h5'):
        self.model = keras.Model.load_model(file_path)

    # x is input data with shape[None, 277, 277, 3] , return a numpy darray
    def splitpredict(self, x, startlayer, endlayer=24):
        if startlayer < 0 or endlayer > len(self.model.layers):
            raise Exception("Layer range wrong, please check")
        for i in range(startlayer, endlayer):
            x = self.model.layers[i](x)
        return x.numpy()

    def estimateTimeAndData(self):
        nums = 100  # 默认执行100次取平均值
        len_model = len(self.model.layers)
        time_layer = [0.0] * len_model
        datasize = [0] * len_model
        for i in range(100):
            x = tf.random.normal([10, 277, 277, 3], dtype=tf.float32)
            for d in range(len_model):
                start = time.process_time()
                x = self.model.layers[d](x)
                end = time.process_time()
                time_layer[d] += (end - start)
                datasize[d] += (tf.size(x).numpy())
        return np.array(time_layer) / 100, np.array(datasize) / 100


if __name__ == '__main__':
    x = tf.random.normal([10, 277, 277, 3], dtype=tf.float32)
    alexnet = AlexnetModel()
    # alexnet.model.summary()

    avg_time, avg_data = alexnet.estimateTimeAndData()
    print(tf.size(x).numpy(), avg_data)
    print(sum(avg_time))
# for i in range(len(alexnet.model.layers)):
# print('The {} layer\'s time cost is {}, output data size is {}'.format(i+1, avg_time[i], avg_data[i]))

# print("test the splitpredict")
# index = 13
# x = alexnet.splitpredict(x, 0, index)
# print('执行到第{}层的结果:{}'.format(index, x.size))
# x = alexnet.splitpredict(x, index)
# print('继续执行后续层结果:{}'.format(x))
# alexnet.summary()

# 模型每层执行时间和输出数据大小
# time_layer = []
# datasize = []
# for d in alexnet.layers:
#     start = time.process_time()
#     x = d(x)
#     end = time.process_time()
#     time_layer.append(end - start)
#     datasize.append(tf.size(x).numpy())
# print(time_layer)
# print(datasize)

# # 执行到指定层。
# index = 13
# for i in range(index):
#     x = alexnet.layers[i](x)
# print('执行到第{}层的结果:{}'.format(index, x.numpy().size))

# # 执行剩下的所有层
# for i in range(index, len(alexnet.layers)):
#     x = alexnet.layers[i](x)
# print('执行到第{}层的结果:{}'.format(len(alexnet.layers), x.numpy().size))
