'''
Author: Angyi
Date: 2020-11-19 09:45:51
LastEditTime: 2020-11-19 14:12:04
LastEditors: Please set LastEditors
Description: Eddy 训练
FilePath: /Eddy/train.py
'''

import numpy as np
import os 
import xarray as xr
import matplotlib.pyplot as plt
from keras.models import Model, load_model
from keras.layers import Activation, Reshape, Permute, Lambda
from keras.layers import Input, Conv2D, MaxPooling2D, Dropout, UpSampling2D, AlphaDropout, concatenate, Conv2DTranspose
from keras.layers import BatchNormalization, LeakyReLU, add
from keras.utils import np_utils
from keras.optimizers import Adam
from keras import backend as K
import matplotlib.pyplot as plt
import numpy as np
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau,RemoteMonitor,LambdaCallback,Callback,CSVLogger


smooth = 1.  # to avoid zero division

def model():
    width = 168
    height = 168
    nbClass = 3
    nf = 16
    ker = 3

    ###################################### INPUT LAYER

    img_input = Input(shape=(height, width, 1))
    ######################################ENCODER

    conv1 = ConvBNActi(nf,ker,img_input)
    conv1 = ConvBNActi(nf,ker,conv1)
    conv1 = Dropout(0.2)(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = ConvBNActi(nf,ker,pool1)
    conv2 = ConvBNActi(nf,ker,conv2)
    conv2 = Dropout(0.3)(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = ConvBNActi(nf,ker,pool2)
    conv3 = ConvBNActi(nf,ker,conv3)
    conv3 = Dropout(0.4)(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    #######################################center

    convC = ConvBNActi(nf,ker,pool3)
    convC = ConvBNActi(nf,ker,convC)
    convC = Dropout(0.5)(convC)

    #######################################DECODER

    #up3 = concatenate([ConvTranspBNActi(nf,ker,convC), conv3])
    up3 = concatenate([UpSampling2D((2,2))(convC), conv3])
    decod3 = ConvBNActi(nf,ker,up3)
    decod3 = ConvBNActi(nf,ker,decod3)
    decod3 = Dropout(0.4)(decod3)



    #up2 = concatenate([ConvTranspBNActi(nf,ker,decod3), conv2])
    up2 = concatenate([UpSampling2D((2,2))(decod3), conv2])
    decod2 = ConvBNActi(nf,ker,up2)
    decod2 = ConvBNActi(nf,ker,decod2)
    decod2 = Dropout(0.3)(decod2)


    #up1 = concatenate([ConvTranspBNActi(nf,ker,decod2), conv1])
    up1 = concatenate([UpSampling2D((2,2))(decod2), conv1])
    decod1 = ConvBNActi(nf,ker,up1)
    decod1 = ConvBNActi(nf,ker,decod1)
    decod1 = Dropout(0.2)(decod1)


    ####################################### Segmentation Layer

    x = Conv2D(nbClass, (1, 1), padding="valid", use_bias=False)(decod1) 
    x = Reshape((height * width, nbClass))(x) 
    x = Activation("softmax")(x)
    eddynet = Model(img_input, x)
    # eddynet.save('Eddy/model/eddy.h5')
    return eddynet



def dice_coef_anti(y_true, y_pred):
    y_true_anti = y_true[:,:,1]
    y_pred_anti = y_pred[:,:,1]
    intersection_anti = K.sum(y_true_anti * y_pred_anti)
    return (2 * intersection_anti + smooth) / (K.sum(y_true_anti)+ K.sum(y_pred_anti) + smooth)

def dice_coef_cyc(y_true, y_pred):
    y_true_cyc = y_true[:,:,2]
    y_pred_cyc = y_pred[:,:,2]
    intersection_cyc = K.sum(y_true_cyc * y_pred_cyc)
    return (2 * intersection_cyc + smooth) / (K.sum(y_true_cyc) + K.sum(y_pred_cyc) + smooth)

def dice_coef_nn(y_true, y_pred):
    y_true_nn = y_true[:,:,0]
    y_pred_nn = y_pred[:,:,0]
    intersection_nn = K.sum(y_true_nn * y_pred_nn)
    return (2 * intersection_nn + smooth) / (K.sum(y_true_nn) + K.sum(y_pred_nn) + smooth)
    
def mean_dice_coef(y_true, y_pred):
    return (dice_coef_anti(y_true, y_pred) + dice_coef_cyc(y_true, y_pred) + dice_coef_nn(y_true, y_pred))/3.

def weighted_mean_dice_coef(y_true, y_pred):
    return (0.36*dice_coef_anti(y_true, y_pred) + 0.62*dice_coef_cyc(y_true, y_pred) + 0.02*dice_coef_nn(y_true, y_pred))
  
def dice_coef_loss(y_true, y_pred):
    return 1 - weighted_mean_dice_coef(y_true, y_pred)


def ConvBNActi(nf,ker,inputs):
    conv1 = Conv2D(nf, ker, padding="same", kernel_initializer='he_normal', use_bias=False)(inputs)
    conv1 = BatchNormalization()(conv1)
    conv1 = Activation('relu')(conv1)
    #conv1 = LeakyReLU(0.3)(conv1)
    return conv1
  
def ConvTranspBNActi(nf,ker,inputs):
    conv1 = Conv2DTranspose(nf, ker, strides=(2, 2), padding="same", kernel_initializer='he_normal', use_bias=False)(inputs)
    conv1 = BatchNormalization()(conv1)
    conv1 = Activation('relu')(conv1)
    #conv1 = LeakyReLU(0.3)(conv1)
    return conv1






def train(epoch):

    SSH_train = np.expand_dims(np.load('model_data/train/ssha_train.npy'),3)

    SSH_train = np.nan_to_num(SSH_train)
    Seg_train = np.expand_dims(np.load('model_data/train/seg_train.npy'),3)


    Seg_train_categor = np_utils.to_categorical(np.reshape(Seg_train[:,:,:,0],(300,168*168)),3)
    
    # eddynet = model()
    # 加载EddyNet在非洲的预训练成果，进行迁移学习
    eddynet = load_model('model/base.h5',custom_objects={'dice_coef_loss':dice_coef_loss,'mean_dice_coef':mean_dice_coef,'weighted_mean_dice_coef':weighted_mean_dice_coef})
    
    eddynet.compile(optimizer=Adam(lr=1e-3), loss=dice_coef_loss,
                metrics=['categorical_accuracy', mean_dice_coef, weighted_mean_dice_coef])
        
    modelcheck = ModelCheckpoint('model/eddy.h5', monitor='val_loss', verbose=2, save_best_only=True, save_weights_only=False)
    callback_to_csv = CSVLogger('eddy_his.csv', separator=',', append=False)

    reducecall = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=20, verbose=2, mode='auto', min_delta=1e-30, min_lr=1e-30)
   

    eddynet.fit(SSH_train, Seg_train_categor,
                                epochs=epoch,
                                batch_size=32,
                                shuffle=False,
                                verbose=1,
                                callbacks=[modelcheck,reducecall,callback_to_csv],#,MyCallback(alpha,beta),reducecall
                                validation_split=0.1
                                )


if __name__ == "__main__":
    
    train(125)
