import tensorflow as tf
import os
from keras.optimizers import Adam
from keras.applications.resnet50 import ResNet50
from keras.layers import Dense, Dropout, BatchNormalization, GlobalAveragePooling2D
from keras.models import Model
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
from keras import regularizers


# 设备控制台输出配置
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.7
sess = tf.compat.v1.Session(config=config)


def add_new_last_layer(base_model, num_classes):
    x = base_model.output
    # 池化层，对于空域数据的全局平均池化。输出尺寸是 (batch_size, channels) 的 2D 张量
    x = GlobalAveragePooling2D(name='avg_pool')(x)
    # 核心网络层，Dropout 包括在训练中每次更新时， 将输入单元的按比率随机设置为 0， 这有助于防止过拟合。
    # rate: 在 0 和 1 之间浮动。需要丢弃的输入比例。
    x = Dropout(0.5, name='dropout1')(x)
    # x = Dense(1024,activation='relu',kernel_regularizer= regularizers.l2(0.0001),name='fc1')(x) # 全连接层
    # x = BatchNormalization(name='bn_fc_00')(x) # 批量标准化层
    x = Dense(512, activation='relu', kernel_regularizer=regularizers.l2(0.0001), name='fc2')(x)
    x = BatchNormalization(name='bn_fc_01')(x)
    x = Dropout(0.5, name='dropout2')(x)
    x = Dense(num_classes, activation='softmax')(x)
    model = Model(inputs=base_model.input, outputs=x)
    return model


# 模型微调
def setup_to_finetune(FLAGS, model, layer_number=149):
    # K.set_learning_phase(0)
    for layer in model.layers[:layer_number]:
        layer.trainable = False
    # K.set_learning_phase(1)
    for layer in model.layers[layer_number:]:
        layer.trainable = True
    # Adam = adam(lr=FLAGS.learning_rate,clipnorm=0.001)
    adam = Adam(lr=FLAGS.learning_rate, decay=0.0005)
    model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])


# 模型初始化设置
def model_fn(FLAGS):
    # K.set_learning_phase(0)
    # 引入初始化resnet50模型
    base_model = ResNet50(weights="imagenet",
                          include_top=False,
                          pooling=None,
                          input_shape=(FLAGS.input_size, FLAGS.input_size, 3),
                          classes=FLAGS.num_classes)
    for layer in base_model.layers:
        layer.trainable = False
    model = add_new_last_layer(base_model, FLAGS.num_classes)
    model.compile(optimizer="adam", loss='categorical_crossentropy', metrics=['accuracy'])
    return model


def train_model(FLAGS, train_sequence, validation_sequence):
    model = model_fn(FLAGS)
    history_tl_50 = model.fit_generator(
        train_sequence,
        steps_per_epoch=len(train_sequence),
        epochs=FLAGS.max_epochs,
        verbose=1,
        validation_data=validation_sequence,
        max_queue_size=10,
        shuffle=True
    )
    # 模型微调
    setup_to_finetune(FLAGS, model)
    history_tl = model.fit_generator(
        train_sequence,
        steps_per_epoch=len(train_sequence),
        epochs=FLAGS.max_epochs * 2,
        verbose=1,
        callbacks=[
            ModelCheckpoint('output_model/best.h5',
                            monitor='val_loss', save_best_only=True, mode='min'),
            ReduceLROnPlateau(monitor='val_loss', factor=0.1,
                              patience=10, mode='min'),
            EarlyStopping(monitor='val_loss', patience=10),
        ],
        validation_data=validation_sequence,
        max_queue_size=10,
        shuffle=True
    )
    print('training done!')
    return history_tl_50, history_tl
