import numpy as np 
import os
import pickle
from tqdm import tqdm
from net import *
from matplotlib import pyplot as plt 
from keras.optimizers import *
from keras import optimizers
import keras
from keras.models import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard
from keras.utils.multi_gpu_utils import multi_gpu_model
from keras import backend as K
import tensorflow as tf
import matplotlib.pyplot as plt
from utils import DataGenerator
import time
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# from imshow_data import imshow_4D

params = {'batch_size':9,
        'dim':(64,64,64),
        'n_channels':32,
        'shuffle': True}
TrainDataPathT = "./input/data_"
TrainLabelPathT = "./input/label_Gauss_"
ValidDataPathT = "./input/data_"
ValidLabelPathT = "./input/label_Gauss_"

train_ID0 = np.arange(10,100,1)
train_ID1 = np.arange(100,180,1)
train_ID2 = np.arange(420,500,1)
train_ID3 = np.arange(620,700,1)
train_ID4 = np.arange(1800,1900,1)
train_ID = np.append(train_ID0, train_ID1)
train_ID = np.append(train_ID, train_ID2)
# train_ID = np.append(train_ID, train_ID3)
# train_ID = np.append(train_ID, train_ID4)

valid_ID0 = np.arange(0,20,1)
valid_ID1 = np.arange(190,200,1)
valid_ID2 = np.arange(400,420,1)
valid_ID3 = np.arange(600,620,1)
valid_ID4 = np.arange(990,1000,1)
valid_ID = np.append(valid_ID0, valid_ID1)
valid_ID = np.append(valid_ID, valid_ID2)
# valid_ID = np.append(valid_ID, valid_ID3)
# valid_ID = np.append(valid_ID, valid_ID4)

train_generator = DataGenerator(tpath=TrainDataPathT,lpath=TrainLabelPathT,
                                data_IDs=train_ID,**params)
valid_generator = DataGenerator(tpath=ValidDataPathT,lpath=ValidLabelPathT,
                                data_IDs=valid_ID,**params)

#train
model = unet()
print(model)

def scheduler(epoch):
    if epoch % 50 == 0 and epoch != 0:
        lr = K.get_value(model.optimizer.lr)
        K.set_value(model.optimizer.lr, lr * 0.5)
        print("lr changed to {}" .format(lr * 0.5))
    return K.get_value(model.optimizer.lr)

reduce_lr = LearningRateScheduler(scheduler)

checkpoint = ModelCheckpoint(filepath = "model/pattern_rand_labelGauss_DLR_LossMSE-E{epoch:02d}.hdf5", monitor='val_acc',
        verbose=1, save_best_only=False, mode='max', period=10)

def loss_abs_func(y_label, y_pred):
    label_delta = y_label-y_pred
    label_square = K.square(label_delta)
    label_sum = K.sum(label_square)
    label_sqrt = K.sqrt(label_sum)
    loss_abs = 0.01*label_sqrt + 1*K.square(K.max(y_label)-K.max(y_pred))
    # loss_abs = 0.1*K.sqrt(K.sum(K.square(y_label-y_pred)))
    return loss_abs

def smooth_L1_loss(y_true, y_pred):
    diff = K.abs(y_true - y_pred)
    less_than_one = K.cast(K.less(diff, 1.0), "float32")
    loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
    return loss

def focal_loss(y_true, y_pred):
    gamma = 2.0
    alpha = 0.25
    # alpha_t = [1-alpha if y==0 else alpha for y in y_true]
    # pt = np.zeros((np.size(y_ture,0),np.size(y_ture,1),np.size(y_ture,2),np.size(y_ture,3)))
    # index1 = np.argwhere(y_true!=0)
    # index0 = np.argwhere(y_true==0)
    # pt[index1] = [1-y_pred[index1]]**gamma
    # pt[index0] = [y_pred[index0]]**gamma
    # weights = np.log(pt)*alpha_t
    # return sum(weights)

    # y_ture = np.reshape(y_true, 1, 64*64*64*32)
    # y_pred = np.reshape(y_pred, 1, 64*64*64*32)
    # pt_alpha = np.zeros((1, 64*64*64*32))
    # for i in range(64*64*64*32):
    #     if y_true[i] == 1:
    #         # pt_1(i) == y_pred(i)
    #         pt_alpha[i] = -1 * alpha * K.log10(y_pred[i])
    #     else:
    #         # pt_1(i) == 1-y_pred(i)
    #         pt_alpha[i] = -1 * (1-alpha) * K.log10(1-y_pred[i])
    # 
    # return K.sum(pt_alpha)

    pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
    pt_0 = tf.where(tf.less(y_true, 1), y_pred, tf.zeros_like(y_pred))
    #return -K.sum(alpha * K.log(pt_1)) -K.sum((1-alpha) * K.log(1 - pt_0))
    return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1))-K.sum((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0))

rms = optimizers.Adam(lr=0.0002, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(optimizer='adam',loss='mse',metrics='accuracy')
history=model.fit_generator(generator=train_generator, 
							validation_data=valid_generator, 
							epochs=200,
							callbacks=[checkpoint], # , reduce_lr
							verbose=1)

with open('log_rand_labelGauss_DLR_LossMSE-E200.txt', 'wb') as file_txt:
    pickle.dump(history.history, file_txt)

