import os
from tensorflow.python.keras import regularizers, Input, Model
from tensorflow.keras.applications import ResNet152, Xception
from tensorflow.python.keras.api.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.keras.layers import InputSpec, Concatenate, Dense, Dropout, GlobalAveragePooling2D, Reshape, Conv2D, \
    Flatten, GlobalMaxPooling2D
from tensorflow.keras import backend as K, Input, Model
from tensorflow.python.keras.layers import Activation
from tensorflow.python.keras.utils import conv_utils
import tensorflow as tf
from tensorflow.python.keras import Input, Model
from tensorflow.python.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.python.keras.callbacks import EarlyStopping
from tensorflow.python.keras.applications.resnet import ResNet152
from tensorflow.python.keras.layers import GlobalAveragePooling2D, Dense, GlobalMaxPooling2D, Concatenate, Dropout
from tensorflow.python.keras.layers import BatchNormalization
# from tensorflow.python.keras.layers import GlobalAveragePooling2D, Dense, Rescaling
from util import read_data, get_data, get_data_new, get_data_newfour
from tensorflow.python.keras.layers import GlobalAveragePooling2D, Dense, GlobalMaxPooling2D, Concatenate, Dropout
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.callbacks import EarlyStopping
import keras_metrics as km
from util import read_data, get_data, get_data_new
from sklearn.metrics import f1_score, recall_score, precision_score
import numpy as np
from keras.callbacks import Callback
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
import tensorflow as tf
from tensorflow.python.keras import Input, Model
from tensorflow.python.keras.applications.densenet import DenseNet169, DenseNet121
from tensorflow.python.keras.applications.mobilenet_v2 import MobileNetV2
from tensorflow.python.keras.applications.xception import Xception
from tensorflow.python.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.python.keras.applications.resnet import ResNet50, ResNet152
from tensorflow.python.keras.layers import GlobalAveragePooling2D, BatchNormalization
from tensorflow.python.keras.layers import GlobalAveragePooling2D, Dense, Reshape, Flatten, GlobalMaxPooling2D, \
    MaxPooling2D, Conv2D
from tensorflow.python.keras import regularizers, Input, Model

import tensorflow as tf
from tensorflow.python.keras import Input, Model
from tensorflow.python.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.python.keras.callbacks import EarlyStopping
from tensorflow.python.keras.applications.resnet import ResNet152

from tensorflow.python.keras.layers import GlobalAveragePooling2D, Dense, GlobalMaxPooling2D, Concatenate, Dropout
from tensorflow.python.keras.layers import BatchNormalization
# from tensorflow.python.keras.layers import GlobalAveragePooling2D, Dense, Rescaling
from util import read_data, get_data, get_data_new, get_data_newfour
from tensorflow.python.keras.layers import GlobalAveragePooling2D, Dense, GlobalMaxPooling2D, Concatenate, Dropout
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.callbacks import EarlyStopping

from util import read_data, get_data, get_data_new
from sklearn.metrics import f1_score, recall_score, precision_score
import numpy as np
class Metrics(Callback):
    def on_train_begin(self, logs={}):
        self.val_f1s = []
        self.val_recalls = []
        self.val_precisions = []
 
    def on_epoch_end(self, epoch, logs={}):
        val_predict = (np.asarray(self.model.predict(self.validation_data[0]))).round()##.model

        val_targ = self.validation_data[1]###.model

        _val_f1 = f1_score(val_targ, val_predict,average='micro')
        _val_recall = recall_score(val_targ, val_predict,average=None)###
        _val_precision = precision_score(val_targ, val_predict,average=None)###
        self.val_f1s.append(_val_f1)
        self.val_recalls.append(_val_recall)
        self.val_precisions.append(_val_precision)

        print("— val_f1: %f "%_val_f1)

AUTOTUNE = tf.data.experimental.AUTOTUNE

if __name__ == '__main__':
    train_ds, val_ds = get_data_newfour()

    # train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size = AUTOTUNE)
    # val_ds = val_ds.cache().prefetch(buffer_size = AUTOTUNE)
    # normalization_layer = Rescaling(1. / 255)
    # normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
    # image_batch, labels_batch = next(iter(normalized_ds))
    input_layer = Input(shape = (300, 300, 3))
    resNet = ResNet152(include_top=False, input_tensor=input_layer,
                       input_shape=(300, 300, 3))
    xception = Xception(include_top=False, input_tensor=input_layer,
                        input_shape=(300, 300, 3))
    top1_model = MaxPooling2D(data_format = 'channels_last')(resNet.output)
    top2_model = MaxPooling2D(data_format = 'channels_last')(xception.output)
    concatenate_model = Concatenate(axis = 1)([top1_model, top2_model])
    concatenate_model.trainable = False
    # h1 = MaxPooling2D(pool_size = 2)(concatenate_model)
    h1 = BatchNormalization()(concatenate_model)

    hs = GlobalAveragePooling2D()(h1)
    hs = Reshape((1, 1, hs.shape[1]))(hs)
    hs = Conv2D(2048 // 16, kernel_size=1, strides=1, padding="same", kernel_regularizer=regularizers.l2(1e-4),
                use_bias=True, activation="relu")(hs)
    hs = Conv2D(2048, kernel_size=1, strides=1,
                padding="same",
                kernel_regularizer=regularizers.l2(1e-4),
                use_bias=True)(hs)
    # 全局最大
    hb = GlobalMaxPooling2D()(h1)
    # hb = GlobalAveragePooling2D()(h1)
    hb = Reshape((1, 1, hb.shape[1]))(hb)
    hb = Conv2D(2048 // 16, kernel_size=1, strides=1, padding="same", kernel_regularizer=regularizers.l2(1e-4),
                use_bias=True, activation="relu")(hb)
    hb = Conv2D(2048, kernel_size=1, strides=1, padding="same", kernel_regularizer=regularizers.l2(1e-4),
                use_bias=True)(hb)
    out = hs + hb  # 最大加平均
    out = tf.nn.sigmoid(out)
    out = out * h1
    out = Flatten()(out)
    top_model = Dense(units = 512, activation = "relu")(out)
    top_model = BatchNormalization()(top_model)
    top_model = Dense(units = 256, activation = "relu")(top_model)
    top_model = BatchNormalization()(top_model)
    top_model = Dense(units = 4, activation = "softmax")(top_model)
    model = Model(inputs = input_layer, outputs = top_model)

    model.compile(optimizer = 'adam',
                  loss = 'categorical_crossentropy',
                  metrics = ['accuracy',km.f1_score(),km.recall(),km.precision()])

    early_stopping = EarlyStopping(

        monitor = 'val_loss',
        verbose = 1,
        patience = 40,
        restore_best_weights = True
    )
    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(min_lr=0.00001,
                                                     factor=0.2)
    num_0 = len(os.listdir('train_data_all/0'))
    num_1 = len(os.listdir('train_data_all/1'))
    num_2 = len(os.listdir('train_data_all/2'))
    num_3 = len(os.listdir('train_data_all/3'))
    total = num_0 + num_1+num_3+num_2
    weight_for_0 = total / num_0 / 4.0
    weight_for_1 = total / num_1 / 4.0
    weight_for_2 = total / num_2 / 4.0
    weight_for_3 = total / num_3 / 4.0

    class_weight = {0: weight_for_0, 1: weight_for_1, 2: weight_for_2, 3: weight_for_3}
    print(class_weight)
    # 迭代次数2000，准确率还可以，耐心等待
    history = model.fit(train_ds, epochs=2000, callbacks=[early_stopping, reduce_lr],validation_data=val_ds)
    #预测-计算三个指标-注意数据集的shape 522/batch_size(16) 
    #x = [0][0]+[1][0]+....+[shape/batch][0]
    #y = [0][1]+[1][1]+....+[shape/batch][1]
    #x = []
    predict = []
    #targ = []
    val_targ =[]
    for i in range(6):
       res = model.predict(val_ds[i][0])
       [predict.append(np.argmax(r)) for r in res]
       [val_targ.append(np.argmax(label)) for label in val_ds[i][1]]
   
    #predict_res = model.predict(x)
    #取预测最大概率的索引
    #predict = [np.argmax(res) for res in predict_res]
    #取[0,0,0,1] 中的最大值所在的索引，即1所在的索引
   # val_targ = [np.argmax(t) for t in targ]
    #根据预测结果自己写以下混淆矩阵作图
    print(len(predict))
    print(len(val_targ))
    print(predict)
    print(val_targ)
    
    _val_f1 = f1_score(val_targ, predict,average='micro')
    _val_recall = recall_score(val_targ, predict,average=None)###
    _val_precision = precision_score(val_targ, predict,average=None)###
    print('_val_f1',_val_f1)
    print('_val_recall',_val_recall[0])
    print('_val_precision',_val_precision[0])
  
    hist_df = pd.DataFrame(history.history) 
      
    y_pre_file = 'SE_ResNet152+Xception_predict.csv'
    y_rel_file = 'SE_ResNet152+Xception_reltag.csv'
    test1=pd.DataFrame(data=predict)
    test2=pd.DataFrame(data=val_targ)
    test1.to_csv(y_pre_file, encoding= 'utf-8')
    test2.to_csv(y_rel_file, encoding= 'utf-8')

    
    hist_csv_file = 'SE_ResNet152+Xception_history.csv'
    with open(hist_csv_file, mode='w') as f:
        hist_df.to_csv(f)
    model.save('SE_ResNet152+Xception.h5')
