import tensorflow as tf
from tensorflow.python.keras import Input, Model
from tensorflow.python.keras.applications.densenet import DenseNet169, DenseNet121
from tensorflow.python.keras.applications.mobilenet_v2 import MobileNetV2
from tensorflow.python.keras.applications.xception import Xception
from tensorflow.python.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.python.keras.applications.resnet import ResNet50, ResNet152
from tensorflow.python.keras.layers import GlobalAveragePooling2D, Dense, Rescaling, BatchNormalization

from tensorflow.python.keras.layers import GlobalAveragePooling2D, Dense, GlobalMaxPooling2D, Concatenate, Dropout
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.callbacks import EarlyStopping
import os
import pandas as pd
from util import get_data_newfour
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.callbacks import EarlyStopping
import keras_metrics as km

import numpy as np
from keras.callbacks import Callback
from sklearn.metrics import f1_score, precision_score, recall_score


class Metrics(Callback):
    def on_train_begin(self, logs={}):
        self.val_f1s = []
        self.val_recalls = []
        self.val_precisions = []
 
    def on_epoch_end(self, epoch, logs={}):
        val_predict = (np.asarray(self.model.predict(self.validation_data[0]))).round()##.model

        val_targ = self.validation_data[1]###.model

        _val_f1 = f1_score(val_targ, val_predict,average='micro')
        _val_recall = recall_score(val_targ, val_predict,average=None)###
        _val_precision = precision_score(val_targ, val_predict,average=None)###
        self.val_f1s.append(_val_f1)
        self.val_recalls.append(_val_recall)
        self.val_precisions.append(_val_precision)

        print("— val_f1: %f "%_val_f1)

AUTOTUNE = tf.data.experimental.AUTOTUNE

if __name__ == '__main__':
    train_ds, val_ds = get_data_newfour()

    input_layer = Input(shape = (300, 300, 3))
    dense = ResNet152(include_top = False , input_tensor = input_layer,
                        input_shape = (300, 300, 3))
    xception = Xception(include_top = False, input_tensor = input_layer,
                        input_shape = (300, 300, 3))
    top1_model = GlobalMaxPooling2D(input_shape = (7, 7, 1024), data_format = 'channels_last')(dense.output)
    top2_model = GlobalMaxPooling2D(input_shape = (7, 7, 1024), data_format = 'channels_last')(xception.output)
    concatenate_model = Concatenate(axis = 1)([top1_model, top2_model])
    concatenate_model.trainable = False
    top_model = Dense(units = 512, activation = "relu")(concatenate_model)
    top_model = BatchNormalization()(top_model)
    top_model = Dense(units = 256, activation = "relu")(top_model)
    top_model = BatchNormalization()(top_model)
    top_model = Dense(units = 4, activation = "softmax")(top_model)
    model = Model(inputs = input_layer, outputs = top_model)

    model.compile(optimizer = 'adam',
                  loss = 'categorical_crossentropy',
                  metrics = ['accuracy',km.f1_score(),km.recall(),km.precision()])

    early_stopping = EarlyStopping(

        monitor = 'val_loss',
        verbose = 1,
        patience = 40,
        restore_best_weights = True
    )
    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(min_lr=0.00001,
                                                     factor=0.2)
    num_0 = len(os.listdir('train_data_all/0'))
    num_1 = len(os.listdir('train_data_all/1'))
    num_2 = len(os.listdir('train_data_all/2'))
    num_3 = len(os.listdir('train_data_all/3'))
    total = num_0 + num_1+num_3+num_2
    weight_for_0 = total / num_0 / 4.0
    weight_for_1 = total / num_1 / 4.0
    weight_for_2 = total / num_2 / 4.0
    weight_for_3 = total / num_3 / 4.0

    class_weight = {0: weight_for_0, 1: weight_for_1, 2: weight_for_2, 3: weight_for_3}
    print(class_weight)
    # 迭代次数2000，准确率还可以，耐心等待
    history = model.fit(train_ds, epochs=2000, callbacks=[early_stopping, reduce_lr],validation_data=val_ds)
    #预测-计算三个指标-注意数据集的shape 522/batch_size(16) 
    #x = [0][0]+[1][0]+....+[shape/batch][0]
    #y = [0][1]+[1][1]+....+[shape/batch][1]
    #x = []
    predict = []
    #targ = []
    val_targ =[]
    for i in range(33):
       res = model.predict(val_ds[i][0])
       [predict.append(np.argmax(r)) for r in res]
       [val_targ.append(np.argmax(label)) for label in val_ds[i][1]]
   
    #predict_res = model.predict(x)
    #取预测最大概率的索引
    #predict = [np.argmax(res) for res in predict_res]
    #取[0,0,0,1] 中的最大值所在的索引，即1所在的索引
   # val_targ = [np.argmax(t) for t in targ]
    #根据预测结果自己写以下混淆矩阵作图
    print(len(predict))
    print(len(val_targ))
    print(predict)
    print(val_targ)
    
    _val_f1 = f1_score(val_targ, predict,average='micro')
    _val_recall = recall_score(val_targ, predict,average=None)###
    _val_precision = precision_score(val_targ, predict,average=None)###
    print('_val_f1',_val_f1)
    print('_val_recall',_val_recall[0])
    print('_val_precision',_val_precision[0])
  
    hist_df = pd.DataFrame(history.history) 
      
    y_pre_file = 'ResNet152+Xception_predict.csv'
    y_rel_file = 'ResNet152+Xception_reltag.csv'
    test1=pd.DataFrame(data=predict)
    test2=pd.DataFrame(data=val_targ)
    test1.to_csv(y_pre_file, encoding= 'utf-8')
    test2.to_csv(y_rel_file, encoding= 'utf-8')

    
    hist_csv_file = 'ResNet152+Xception_history.csv'
    with open(hist_csv_file, mode='w') as f:
        hist_df.to_csv(f)
    model.save('ResNet152+Xception.h5')
