import os

import tensorflow as tf
from tensorflow.python.keras import Input, Model
from tensorflow.python.keras.applications.densenet import DenseNet169, DenseNet121
from tensorflow.python.keras.applications.mobilenet_v2 import MobileNetV2
from tensorflow.python.keras.applications.xception import Xception
from tensorflow.python.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.python.keras.applications.resnet import ResNet50, ResNet152
from tensorflow.python.keras.layers import GlobalAveragePooling2D, Dense, Rescaling, BatchNormalization, AvgPool2D
from tensorflow.python.keras.layers import GlobalAveragePooling2D, Dense, Reshape, Flatten, GlobalMaxPooling2D, \
    MaxPooling2D, Conv2D
from tensorflow.python.keras import regularizers, Input, Model

import tensorflow as tf
from tensorflow.python.keras import Input, Model
from tensorflow.python.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.python.keras.callbacks import EarlyStopping
from tensorflow.python.keras.applications.resnet import ResNet152

from tensorflow.python.keras.layers import GlobalAveragePooling2D, Dense, GlobalMaxPooling2D, Concatenate, Dropout
from tensorflow.python.keras.layers.normalization import BatchNormalization
from tensorflow.python.keras.layers import GlobalAveragePooling2D, Dense, Rescaling
# from tensorflow.python.layers.pooling import AvgPool2D

from util import read_data, get_data, get_data_new
from tensorflow.python.keras.layers import GlobalAveragePooling2D, Dense, GlobalMaxPooling2D, Concatenate, Dropout
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.callbacks import EarlyStopping

from util import read_data, get_data, get_data_new

AUTOTUNE = tf.data.experimental.AUTOTUNE
def CoordAtt(x, reduction = 32):
 
    def coord_act(x):
        tmpx = tf.nn.relu6(x+3) / 6
        x = x * tmpx
        return x
 
    x_shape = x.get_shape().as_list()
    print(x_shape)
    [b, h, w, c] = x_shape
    x_h = AvgPool2D(pool_size=(1, w), strides = 1)(x)
    x_w = AvgPool2D(pool_size=(h, 1), strides = 1)(x)
    x_w = tf.transpose(x_w, [0, 2, 1, 3])
 
    y = tf.concat([x_h, x_w], axis=1)
    mip = max(8, c // reduction)
    y = Conv2D(mip, (1, 1), strides=1, activation=coord_act,name='ca_conv1')(y)
 
    x_h, x_w = tf.split(y, num_or_size_splits=2, axis=1)
    x_w = tf.transpose(x_w, [0, 2, 1, 3])
    a_h = Conv2D(c, (1, 1), strides=1,activation=tf.nn.sigmoid,name='ca_conv2')(x_h)
    a_w = Conv2D(c, (1, 1), strides=1,activation=tf.nn.sigmoid,name='ca_conv3')(x_w)
 
    out = x * a_h * a_w
 
 
    return out


if __name__ == '__main__':
    train_ds, val_ds = get_data()

    train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size = AUTOTUNE)
    val_ds = val_ds.cache().prefetch(buffer_size = AUTOTUNE)
    normalization_layer = Rescaling(1. / 255)
    normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
    image_batch, labels_batch = next(iter(normalized_ds))
    input_layer = Input(shape = (300, 300, 3))
    dense = ResNet152(include_top = False , input_tensor = input_layer,
                        input_shape = (300, 300, 3))
    xception = Xception(include_top = False, input_tensor = input_layer,
                        input_shape = (300, 300, 3))
    top1_model = MaxPooling2D(input_shape = (7, 7, 1024), data_format = 'channels_last')(dense.output)
    top2_model = MaxPooling2D(input_shape = (7, 7, 1024), data_format = 'channels_last')(xception.output)
    concatenate_model = Concatenate(axis = 1)([top1_model, top2_model])
    concatenate_model.trainable = False
    # h1 = MaxPooling2D(pool_size = 2)(concatenate_model)
    out  = CoordAtt(concatenate_model)
    out = Flatten()(out)
    top_model = Dense(units = 512, activation = "relu")(out)
    top_model = BatchNormalization()(top_model)
    top_model = Dense(units = 256, activation = "relu")(top_model)
    top_model = BatchNormalization()(top_model)
    top_model = Dense(units = 2, activation = "softmax")(top_model)
    model = Model(inputs = input_layer, outputs = top_model)

    model.compile(optimizer = 'adam',
                  loss = 'sparse_categorical_crossentropy',
                  metrics = ['accuracy'])
    early_stopping = EarlyStopping(
        monitor = 'val_accuracy',
        verbose = 1,
        patience = 40,
        restore_best_weights = True
    )
    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(min_lr=0.00001,
                                                     factor=0.2)
    num_0 = len(os.listdir('original_data/0'))
    num_1 = len(os.listdir('original_data/1'))
    total = num_0 + num_1
    weight_for_0 = total / num_0 / 2.0
    weight_for_1 = total / num_1 / 2.0
    class_weight = {0: weight_for_0, 1: weight_for_1}
    print(class_weight)
    # 迭代次数2000，准确率还可以，耐心等待
    history = model.fit(train_ds, epochs=2000, callbacks=[early_stopping, reduce_lr],
                        validation_data=val_ds)
    model.save('ResNet50.h5')
