import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, activations, losses, optimizers, metrics
import os

VER = 'v1.0'
BASE_DIR, FILE_NAME = os.path.split(__file__)
SAVE_DIR = os.path.join(BASE_DIR, '_save', FILE_NAME, VER)
LOG_DIR = os.path.join(BASE_DIR, '_log', FILE_NAME, VER)


class ConvCell(keras.Model):

    def __init__(self, filters, ksize=(3, 3), strides=(1, 1), padding='same', **kwargs):
        super().__init__(**kwargs)
        self.conv = layers.Conv2D(filters, ksize, strides, padding, use_bias=False)
        self.bn = layers.BatchNormalization()
        self.relu = layers.ReLU()

    def call(self, inputs, training=None, mask=None):
        x = self.conv(inputs, training=training)
        x = self.bn(x, training=training)
        x = self.relu(x, training=training)
        return x


cfg = [64, 64, 'm', 128, 128, 'm', 256, 256, 256, 'm', 512, 512, 512, 'm', 512, 512, 512, 'm']
fc_cfg = [4096, 4096, 1000]


class Vgg16(keras.Model):

    def __init__(self, cfg, fc_cfg, **kwargs):
        super().__init__(**kwargs)
        list = []
        for layer in cfg:
            if 'm' == layer:
                list.append(layers.MaxPool2D((2, 2), (2, 2), 'same'))
            else:
                list.append(ConvCell(layer))
        self.convs = keras.Sequential(list)
        list = []
        for i, layer in enumerate(fc_cfg):
            if i == len(list) -1:
                act = activations.softmax
            else:
                act = activations.relu
            list.append(layers.Dense(layer, activation=act))
        self.fcs = keras.Sequential(list)

    def call(self, inputs, training=None, mask=None):
        x = self.convs(inputs, training=training)
        x = layers.Flatten()(x)
        x = self.fcs(x, training=training)
        return x


model = Vgg16(cfg, fc_cfg)
model.build(input_shape=(None, 224, 224, 3))
model.summary()

x = tf.zeros((4, 224, 224, 3), dtype=tf.float32)
pred = model(x)
print('pred:', tf.shape(pred))
