# coding:utf-8
# __author__ = yuan
# __time__ = 2020/4/23
# __file__ = migrate_train
# __desc__ =

# coding:utf-8
# __author__ = yuan
# __time__ = 2020/2/29
# __file__ = ktens
# __desc__ =
import glob
import os
import time
from data import _get_data_list
import tensorflow as tf
import keras.backend as K
from scipy import misc

os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from pathlib import Path
import warnings
import numpy as np
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter("ignore",category=Warning)
from tensorflow import keras
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.resnet50 import ResNet50
from keras.layers import Dense,GlobalAveragePooling2D
from keras.models import Model

os.environ['CUDA_VISIBLE_DEVICES']='0'

datagen = ImageDataGenerator(
    rotation_range=10,
    width_shift_range=0.2,
    height_shift_range=0.2,
    brightness_range=(0.1,0.5),
    featurewise_std_normalization=True,
)
testgen = ImageDataGenerator(
    featurewise_std_normalization=True,
)
train_dir = r"/data/soft/javad/COCO/convd/train"
val_dir = r"/data/soft/javad/COCO/convd/val"
HEIGHT=900
WIDTH = 900
FC_size = 1024
nclasses = 2
nb_epoch = 1
gamma=2
batch_size=24
val_batch=4
alpha=.25
train_gen = datagen.flow_from_directory(train_dir,target_size=(HEIGHT,WIDTH),batch_size=batch_size)
val_gen = testgen.flow_from_directory(val_dir,target_size=(HEIGHT,WIDTH),batch_size=val_batch)
output_model_file = "./weight/convd.h5"
# 迁移模型之基本模型
base_model = ResNet50(weights='imagenet',include_top=False)  # 去除ImageNet网络的全连接层权重


def binary_focal_loss(gamma=2, alpha=0.25):
    """
    Binary form of focal loss.
    适用于二分类问题的focal loss

    focal_loss(p_t) = -alpha_t * (1 - p_t)**gamma * log(p_t)
        where p = sigmoid(x), p_t = p or 1 - p depending on if the label is 1 or 0, respectively.
    References:
        https://arxiv.org/pdf/1708.02002.pdf
    Usage:
     model.compile(loss=[binary_focal_loss(alpha=.25, gamma=2)], metrics=["accuracy"], optimizer=adam)
    """
    alpha = tf.constant(alpha, dtype=tf.float32)
    gamma = tf.constant(gamma, dtype=tf.float32)

    def binary_focal_loss_fixed(y_true, y_pred):
        """
        y_true shape need be (None,1)
        y_pred need be compute after sigmoid
        """
        y_true = tf.cast(y_true, tf.float32)
        alpha_t = y_true * alpha + (K.ones_like(y_true) - y_true) * (1 - alpha)

        p_t = y_true * y_pred + (K.ones_like(y_true) - y_true) * (K.ones_like(y_true) - y_pred) + K.epsilon()
        focal_loss = - alpha_t * K.pow((K.ones_like(y_true) - p_t), gamma) * K.log(p_t)
        return K.mean(focal_loss)

    return binary_focal_loss_fixed


def get_nb_files(directory):
      """Get number of files by searching directory recursively"""
      if not Path(directory).exists():
          return 0
      cnt = 0
      for r, dirs, files in os.walk(directory):
          if files:
              cnt += len(files)
      return cnt

def add_new_last_layer(base_model,nb_classes):
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x=Dense(FC_size,activation='relu')(x) # 全链接可以理解
    predictions = Dense(nb_classes,activation='sigmoid')(x)
    model = Model(inputs=base_model.input,outputs=predictions)
    return model

def setup_to_transfer_learn(model:Model,base_model):
    """Freeze all layers and compile the model"""
    # for layer in base_model.layers[:-2]:
    #     #     layer.trainable=False
    model.compile(optimizer="rmsprop",
                  loss=binary_focal_loss(gamma,alpha),
                  metrics=['accuracy'])

def Plot_echart(history):
    acc = history.history['acc']
    val_acc = history.history['val_acc']
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    epochs = range(len(acc))
    t_line=(line.Line(title="train result")
            .add("accuracy",epochs,acc,line_color='green')
            .add("loss",epochs,loss,line_color='red')
            .add("val_accuracy",epochs,val_acc,line_color='blue')
            .add("val_loss",epochs,val_loss,line_color='pink')
            .show_config())


def plot_training(history):
  acc = history.history['acc']
  val_acc = history.history['val_acc']
  loss = history.history['loss']
  val_loss = history.history['val_loss']
  epochs = range(len(acc))

  plt.plot(epochs, acc, 'r.')
  plt.plot(epochs, val_acc, 'r')
  plt.title('Training and validation accuracy')

  plt.figure()
  plt.plot(epochs, loss, 'r.')
  plt.plot(epochs, val_loss, 'r-')
  plt.title('Training and validation loss')
  plt.savefig()


def tl_train():
    model = add_new_last_layer(base_model, nclasses)
    nb_train_samples = get_nb_files(train_dir) //batch_size
    nb_val_samples = get_nb_files(val_dir) // val_batch
    setup_to_transfer_learn(model,base_model)

    history_tl = model.fit_generator(
        train_gen,
        epochs=nb_epoch,
        steps_per_epoch=nb_train_samples,
        validation_data=val_gen,
        validation_steps=nb_val_samples,
        class_weight='auto'
    )
    model.save(output_model_file)
    plot_training(history_tl)
# tl_train()
def test():
    testdir=""
    from keras.models import load_model
    from util import plot
    model = load_model(output_model_file)
    imgs = os.listdir(testdir)
    data = np.ndarray((len(imgs), HEIGHT, WIDTH, nclasses), dtype=np.uint8)
    images, labels = _get_data_list(testdir)
    preds=[]
    reals=[]
    for (imp, label) in zip(images, labels):
        img = misc.imread(imp)
        img = np.expand_dims(img)
        pred: np.ndarray = model.predict(img)
        pred = np.ravel(pred)[0]
        preds.append(pred)
        reals.append(label)
        print(f"{imp} 预测结果:\n"
              f"预测: {pred}  -- 真实: {label}")

