import os
import pandas as pd

import tensorflow as tf

from util import get_data_newfour
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.callbacks import EarlyStopping
import keras_metrics as km

import numpy as np
from keras.callbacks import Callback
from sklearn.metrics import f1_score, precision_score, recall_score

from tensorflow.python.keras.layers import Dense,MaxPooling2D, Flatten, BatchNormalization
from tensorflow.python.keras.applications.mobilenet_v2 import MobileNetV2

 
class Metrics(Callback):
    def on_train_begin(self, logs={}):
        self.val_f1s = []
        self.val_recalls = []
        self.val_precisions = []
 
    def on_epoch_end(self, epoch, logs={}):
        val_predict = (np.asarray(self.model.predict(self.validation_data[0]))).round()##.model

        val_targ = self.validation_data[1]###.model

        _val_f1 = f1_score(val_targ, val_predict,average='micro')
        _val_recall = recall_score(val_targ, val_predict,average=None)###
        _val_precision = precision_score(val_targ, val_predict,average=None)###
        self.val_f1s.append(_val_f1)
        self.val_recalls.append(_val_recall)
        self.val_precisions.append(_val_precision)

        print("— val_f1: %f "%_val_f1)


AUTOTUNE = tf.data.experimental.AUTOTUNE

if __name__ == '__main__':
    train_ds, val_ds = get_data_newfour()

    # 初始化DenseNet169网络(卷积神经网络的一种)
    mobile_net = MobileNetV2(input_shape=(300, 300, 3), include_top=False)
    # 固定参数
    mobile_net.trainable = False

    model = Sequential([
        mobile_net,
        MaxPooling2D(2, 2),
        Flatten(),
        Dense(1000, activation='relu'),
        BatchNormalization(),
        Dense(200, activation='relu'),
        BatchNormalization(),
        Dense(4, activation='softmax')])

    model.compile(optimizer = 'adam',
                  loss = 'categorical_crossentropy',
                  metrics = ['accuracy',km.f1_score(),km.recall(),km.precision()])

    early_stopping = EarlyStopping(

        monitor = 'val_loss',
        verbose = 1,
        patience = 40,
        restore_best_weights = True
    )
    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(min_lr=0.00001,
                                                     factor=0.2)
    num_0 = len(os.listdir('train_data_all/0'))
    num_1 = len(os.listdir('train_data_all/1'))
    num_2 = len(os.listdir('train_data_all/2'))
    num_3 = len(os.listdir('train_data_all/3'))
    total = num_0 + num_1+num_3+num_2
    weight_for_0 = total / num_0 / 4.0
    weight_for_1 = total / num_1 / 4.0
    weight_for_2 = total / num_2 / 4.0
    weight_for_3 = total / num_3 / 4.0

    class_weight = {0: weight_for_0, 1: weight_for_1, 2: weight_for_2, 3: weight_for_3}
    print(class_weight)
    # 迭代次数2000，准确率还可以，耐心等待
    history = model.fit(train_ds, epochs=2000, callbacks=[early_stopping, reduce_lr],validation_data=val_ds)
    #预测-计算三个指标-注意数据集的shape 522/batch_size(16) 
    #x = [0][0]+[1][0]+....+[shape/batch][0]
    #y = [0][1]+[1][1]+....+[shape/batch][1]
    #x = []
    predict = []
    #targ = []
    val_targ =[]
    for i in range(6):
       res = model.predict(val_ds[i][0])
       [predict.append(np.argmax(r)) for r in res]
       [val_targ.append(np.argmax(label)) for label in val_ds[i][1]]
   
    #predict_res = model.predict(x)
    #取预测最大概率的索引
    #predict = [np.argmax(res) for res in predict_res]
    #取[0,0,0,1] 中的最大值所在的索引，即1所在的索引
   # val_targ = [np.argmax(t) for t in targ]
    #根据预测结果自己写以下混淆矩阵作图
    print(len(predict))
    print(len(val_targ))
    print(predict)
    print(val_targ)
    
    _val_f1 = f1_score(val_targ, predict,average='micro')
    _val_recall = recall_score(val_targ, predict,average=None)###
    _val_precision = precision_score(val_targ, predict,average=None)###
    print('_val_f1',_val_f1)
    print('_val_recall',_val_recall[0])
    print('_val_precision',_val_precision[0])
  
    hist_df = pd.DataFrame(history.history) 
      
    y_pre_file = 'base2_MobileNet_predict.csv'
    y_rel_file = 'base2_Mobile_reltag.csv'
    test1=pd.DataFrame(data=predict)
    test2=pd.DataFrame(data=val_targ)
    test1.to_csv(y_pre_file, encoding= 'utf-8')
    test2.to_csv(y_rel_file, encoding= 'utf-8')

    
    hist_csv_file = 'base2_Mobile_history.csv'
    with open(hist_csv_file, mode='w') as f:
        hist_df.to_csv(f)
    model.save('base2_MobileNet.h5')
