# Simplified unet for fault segmentation
# The original u-net architecture is more complicated than necessary 
# for our task of fault segmentation.
# We significanlty reduce the number of layers and features at each 
# layer to save GPU memory and computation but still preserve high 
# performace in fault segmentation.

import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
import tensorflow as tf
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as K


def IEE(inputs, filters):
    outmean1 = tf.reduce_mean(inputs, axis=1, keepdims=True)  # GIE 3
    outmax1 = tf.reduce_max(inputs, axis=1, keepdims=True)
    out = Add()([outmax1, outmean1])
    out = Multiply()([inputs, out])
    out = Activation('softmax')(out)
    return out


def IES(inputs, filters):
    outmean1 = tf.reduce_mean(inputs, axis=1, keepdims=True)
    outmax1 = tf.reduce_max(inputs, axis=1, keepdims=True)
    out = Add()([outmax1, outmean1])
    out = Multiply()([inputs, out])
    return out


def unet3(pretrained_weights=None, input_size=(None, None, None, 1)):
    '''
    unet三层结构
    :param pretrained_weights:
    :param input_size:
    :return:
    '''
    inputs = Input(input_size)
    conv1 = Conv3D(16, (3, 3, 3), activation='relu', padding='same')(inputs)
    conv1 = Conv3D(16, (3, 3, 3), activation='relu',padding='same')(conv1)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)

    conv2 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(pool1)
    conv2 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv2)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)

    conv3 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(pool2)
    conv3 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv3)
    pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)

    conv4 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(pool3)
    conv4 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conv4)

    up5 = concatenate([UpSampling3D(size=(2, 2, 2))(conv4), conv3], axis=-1)
    conv5 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(up5)
    conv5 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv5)

    up6 = concatenate([UpSampling3D(size=(2, 2, 2))(conv5), conv2], axis=-1)
    conv6 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(up6)
    conv6 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv6)

    up7 = concatenate([UpSampling3D(size=(2, 2, 2))(conv6), conv1], axis=-1)
    conv7 = Conv3D(16, (3, 3, 3), activation='relu', padding='same')(up7)
    conv7 = Conv3D(16, (3, 3, 3), activation='relu', padding='same')(conv7)

    conv8 = Conv3D(1, (1, 1, 1), activation='sigmoid')(conv7)

    model = Model(inputs=[inputs], outputs=[conv8], name='unet3')
    model.summary()
    # model.compile(optimizer = Adam(lr = 1e-4),
    #    loss = cross_entropy_balanced, metrics = ['accuracy'])
    return model


def GIE_NET(pretrained_weights=None, input_size=(None, None, None, 1)):
    '''
    :param pretrained_weights:
    :param input_size:
    :return:
    '''
    inputs = Input(input_size)
    conv1 = Conv3D(16, (3, 3, 3), activation='relu', padding='same')(inputs)
    conv1 = Conv3D(16, (3, 3, 3), activation='relu', padding='same')(conv1)
    maxpool1 = MaxPooling3D(pool_size=(2, 2, 2))(concatenate([conv1, inputs], axis=-1))  # 下采样

    conv2 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(maxpool1)
    conv2 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv2)
    maxpool2 = MaxPooling3D(pool_size=(2, 2, 2))(concatenate([conv2, maxpool1], axis=-1))  # 下采样

    conv3 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(maxpool2)
    conv3 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv3)
    maxpool3 = MaxPooling3D(pool_size=(2, 2, 2))(concatenate([conv3, maxpool2], axis=-1))  # 下采样

    conv4 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(maxpool3)
    conv4 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conv4)
    conv4 = IES(conv4, 128)

    up5 = concatenate([UpSampling3D(size=(2, 2, 2))(conv4), IEE(conv3, 64)], axis=-1)
    conv5 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(up5)
    conv5 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv5)

    up6 = concatenate([UpSampling3D(size=(2, 2, 2))(conv5), IEE(conv2, 32)], axis=-1)
    conv6 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(up6)
    conv6 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv6)

    up7 = concatenate([UpSampling3D(size=(2, 2, 2))(conv6), IEE(conv1, 16)], axis=-1)
    conv7 = Conv3D(16, (3, 3, 3), activation='relu', padding='same')(up7)
    conv7 = Conv3D(16, (3, 3, 3), activation='relu', padding='same')(conv7)

    conv8 = Conv3D(1, (1, 1, 1), activation='sigmoid')(conv7)

    model = Model(inputs=[inputs], outputs=[conv8], name='snet3')
    model.summary()
    # model.compile(optimizer = Adam(lr = 1e-4),
    #    loss = cross_entropy_balanced, metrics = ['accuracy'])
    return model


def vnet1(input_size=(None, None, None, 1)):
    inputs = Input(shape=(128, 128, 128, 1), name='vnet_input')
    conv1 = Conv3D(16, 5, padding='same', name='conv1')(inputs)  # 卷积
    conv1 = PReLU(name='conv1_prelu')(conv1)  # 激活函数

    concat1 = Concatenate(name='concat1')([inputs, conv1])  # 拼接
    down_conv1 = Conv3D(32, 2, 2, name='down_conv1')(concat1)  # 第一次卷积进行卷积下采样
    down_conv1 = PReLU(name='down_conv1_prelu')(down_conv1)  # 激活函数

    conv2_1 = Conv3D(32, 5, padding='same', name='conv2_1')(down_conv1)  # 进行两次卷积激活操作
    conv2_1 = PReLU(name='conv2_1_prelu')(conv2_1)
    conv2_2 = Conv3D(32, 5, padding='same', name='conv2_2')(conv2_1)
    conv2_2 = PReLU(name='conv2_2_prelu')(conv2_2)

    concat2 = Concatenate(name='concat2')([down_conv1, conv2_2])
    down_conv2 = Conv3D(64, 2, 2, name='down_conv2')(concat2)
    down_conv2 = PReLU(name='down_conv2_prelu')(down_conv2)

    conv3_1 = Conv3D(64, 5, padding='same', name='conv3_1')(down_conv2)
    conv3_1 = PReLU(name='conv3_1_prelu')(conv3_1)
    conv3_2 = Conv3D(64, 5, padding='same', name='conv3_2')(conv3_1)
    conv3_2 = PReLU(name='conv3_2_prelu')(conv3_2)
    conv3_3 = Conv3D(64, 5, padding='same', name='conv3_3')(conv3_2)
    conv3_3 = PReLU(name='conv3_3_prelu')(conv3_3)

    concat3 = Concatenate(name='concat3')([conv3_3, down_conv2])
    down_conv3 = Conv3D(128, 2, 2, name='down_conv3')(concat3)
    down_conv3 = PReLU(name='down_conv3_prelu')(down_conv3)

    conv4_1 = Conv3D(128, 5, padding='same', name='conv4_1')(down_conv3)
    conv4_1 = PReLU(name='conv4_1_prelu')(conv4_1)
    conv4_2 = Conv3D(128, 5, padding='same', name='conv4_2')(conv4_1)
    conv4_2 = PReLU(name='conv4_2_prelu')(conv4_2)
    conv4_3 = Conv3D(128, 5, padding='same', name='conv4_3')(conv4_2)
    conv4_3 = PReLU(name='conv4_3_prelu')(conv4_3)

    concat4 = Concatenate(name='concat4')([down_conv3, conv4_3])
    up_conv4 = Conv3DTranspose(128, 2, 2, name='up_conv4')(concat4)
    up_conv4 = PReLU(name='up_conv4_prelu')(up_conv4)

    conv5 = Concatenate(name='conv5')([up_conv4, concat3])
    conv5_1 = Conv3D(128, 5, padding='same', name='conv5_1')(conv5)
    conv5_1 = PReLU(name='conv5_1_prelu')(conv5_1)
    conv5_2 = Conv3D(128, 5, padding='same', name='conv5_2')(conv5_1)
    conv5_2 = PReLU(name='conv5_2_prelu')(conv5_2)
    conv5_3 = Conv3D(128, 5, padding='same', name='conv5_3')(conv5_2)
    conv5_3 = PReLU(name='conv5_3_prelu')(conv5_3)

    concat5 = Concatenate(name='concat5')([up_conv4, conv5_3])
    up_conv5 = Conv3DTranspose(64, 2, 2, name='up_conv5')(concat5)
    up_conv5 = PReLU(name='up_conv5_prelu')(up_conv5)

    conv6 = Concatenate(name='conv6')([up_conv5, concat2])
    # 两次卷积
    for i in range(1, 3):
        conv6 = Conv3D(64, 5, padding='same', name=f'conv6_{i}')(conv6)
        conv6 = PReLU(name=f'conv6_{i}_prelu')(conv6)

    concat6 = Concatenate(name='concat6')([conv6, up_conv5])
    up_conv6 = Conv3DTranspose(32, 2, 2, name='up_conv6')(concat6)
    up_conv6 = PReLU(name='up_conv6_prelu')(up_conv6)

    conv7 = Concatenate(name='conv7')([up_conv6, concat1])
    conv7_1 = Conv3D(32, 5, padding='same', name='conv7_1')(conv7)

    concat7 = Concatenate(name='concat7')([conv7_1, up_conv6])
    conv8 = Conv3D(1, 1, 1, name='out', activation='sigmoid')(concat7)  # 为了保持一致使用sigmoid, softmax

    model = Model(inputs=[inputs], outputs=[conv8], name='vnet')
    model.summary()

    return model


def vnet(pretrained_weights=None, input_size=(None, None, None, 1)):
    inputs = Input(input_size)
    conv1 = Conv3D(16, (3, 3, 3), padding='same', name='conv1', activation='relu')(inputs)  # 卷积

    concat1 = Concatenate(name='concat1')([inputs, conv1])  # 拼接
    down_conv1 = Conv3D(32, (2, 2, 2), 2, name='down_conv1', activation='relu')(concat1)  # 第一次卷积进行卷积下采样

    conv2_1 = Conv3D(32, (3, 3, 3), padding='same', name='conv2_1', activation='relu')(down_conv1)  # 进行两次卷积激活操作
    conv2_2 = Conv3D(32, (3, 3, 3), padding='same', name='conv2_2', activation='relu')(conv2_1)

    concat2 = Concatenate(name='concat2')([down_conv1, conv2_2])
    down_conv2 = Conv3D(64, (2, 2, 2), 2, name='down_conv2', activation='relu')(concat2)

    conv3_1 = Conv3D(64, (3, 3, 3), padding='same', name='conv3_1', activation='relu')(down_conv2)
    conv3_2 = Conv3D(64, (3, 3, 3), padding='same', name='conv3_2', activation='relu')(conv3_1)
    conv3_3 = Conv3D(64, (3, 3, 3), padding='same', name='conv3_3', activation='relu')(conv3_2)

    concat3 = Concatenate(name='concat3')([conv3_3, down_conv2])
    down_conv3 = Conv3D(128, (2, 2, 2), 2, name='down_conv3', activation='relu')(concat3)

    conv4_1 = Conv3D(128, (3, 3, 3), padding='same', name='conv4_1', activation='relu')(down_conv3)
    conv4_2 = Conv3D(128, (3, 3, 3), padding='same', name='conv4_2', activation='relu')(conv4_1)
    conv4_3 = Conv3D(128, (3, 3, 3), padding='same', name='conv4_3', activation='relu')(conv4_2)

    concat4 = Concatenate(name='concat4')([down_conv3, conv4_3])
    up_conv4 = Conv3DTranspose(128, (2, 2, 2), 2, name='up_conv4', activation='relu')(concat4)

    conv5 = Concatenate(name='conv5')([up_conv4, concat3])
    conv5_1 = Conv3D(128, (3, 3, 3), padding='same', name='conv5_1', activation='relu')(conv5)
    conv5_2 = Conv3D(128, (3, 3, 3), padding='same', name='conv5_2', activation='relu')(conv5_1)
    conv5_3 = Conv3D(128, (3, 3, 3), padding='same', name='conv5_3', activation='relu')(conv5_2)

    concat5 = Concatenate(name='concat5')([up_conv4, conv5_3])
    up_conv5 = Conv3DTranspose(64, (2, 2, 2), 2, name='up_conv5', activation='relu')(concat5)

    conv6 = Concatenate(name='conv6')([up_conv5, concat2])
    # 两次卷积
    for i in range(1, 3):
        conv6 = Conv3D(64, (3, 3, 3), padding='same', name=f'conv6_{i}', activation='relu')(conv6)

    concat6 = Concatenate(name='concat6')([conv6, up_conv5])
    up_conv6 = Conv3DTranspose(32, (2, 2, 2), 2, name='up_conv6', activation='relu')(concat6)

    conv7 = Concatenate(name='conv7')([up_conv6, concat1])
    conv7_1 = Conv3D(32, (3, 3, 3), padding='same', name='conv7_1', activation='relu')(conv7)

    concat7 = Concatenate(name='concat7')([conv7_1, up_conv6])
    conv8 = Conv3D(1, (1, 1, 1), name='out', activation='sigmoid')(concat7)  # 为了保持一致使用sigmoid, softmax

    model = Model(inputs=[inputs], outputs=[conv8], name='vnet')
    model.summary()

    return model


def cross_entropy_balanced(y_true, y_pred):
    # Note: tf.nn.sigmoid_cross_entropy_with_logits expects y_pred is logits, 
    # Keras expects probabilities.
    # transform y_pred back to logits
    _epsilon = _to_tensor(K.epsilon(), y_pred.dtype.base_dtype)
    y_pred = tf.clip_by_value(y_pred, _epsilon, 1 - _epsilon)
    y_pred = tf.log(y_pred / (1 - y_pred))

    y_true = tf.cast(y_true, tf.float32)

    count_neg = tf.reduce_sum(1. - y_true)
    count_pos = tf.reduce_sum(y_true)

    beta = count_neg / (count_neg + count_pos)

    pos_weight = beta / (1 - beta)

    cost = tf.nn.weighted_cross_entropy_with_logits(logits=y_pred, targets=y_true, pos_weight=pos_weight)

    cost = tf.reduce_mean(cost * (1 - beta))

    return tf.where(tf.equal(count_pos, 0.0), 0.0, cost)


def _to_tensor(x, dtype):
    """Convert the input `x` to a tensor of type `dtype`.
    # Arguments
    x: An object to be converted (numpy array, list, tensors).
    dtype: The destination type.
    # Returns
    A tensor.
    """
    x = tf.convert_to_tensor(x)
    if x.dtype != dtype:
        x = tf.cast(x, dtype)
    return x
