import os, glob
os.environ['TF_CPP_MIN_LEVEL'] = '2'
import numpy as np
import tensorflow as tf
from tensorflow import keras
import copy
from PIL import Image
from tensorflow.keras import losses
import random
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from skimage.transform import resize
from keras.utils import to_categorical
from keras.models import Model, load_model
from keras.layers import Input, BatchNormalization, Activation, Dense, Dropout
from keras.layers.core import Lambda, RepeatVector, Reshape
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D
from keras.layers.merge import concatenate, add
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import cv2
import tensorflow.keras.backend as K

print(tf.__version__)
# print(tf.config.list_physical_devices('GPU'))



def relu6(x):
    return K.relu(x, max_value=6)

def conv2d_block(input_tensor, num_filters, kernel_size=3, strides=(1, 1)):
    # first layer
    x = Conv2D(filters=num_filters, kernel_size=(kernel_size, kernel_size), strides=strides,
               kernel_initializer="he_normal",
               padding="same")(input_tensor)
    # 批量标准化层
    # 　维持激活项平均值接近 0，标准差接近 1 的转换
    x = Activation(relu6)(x)
    x = BatchNormalization()(x)

    #     # second layer
    #     x = Conv2D(filters=num_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal",
    #                padding="same")(x)
    #     x = BatchNormalization()(x)
    #     x = Activation('relu')(x)

    #     #third layer
    #     x = Conv2D(filters=num_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal",
    #                padding="same")(x)
    #     x = BatchNormalization()(x)
    #     x = Activation('relu')(x)

    #     #fourth layer
    #     x = Conv2D(filters=num_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer="he_normal",
    #                padding="same")(x)
    #     x = BatchNormalization()(x)
    #     x = Activation('relu')(x)
    return (x)


def conv2d_block_up(input_tensor, num_filters, kernel_size=3, strides=(1, 1)):
    # first layer
    x = Conv2D(filters=num_filters, kernel_size=(kernel_size, kernel_size), strides=strides,
               kernel_initializer="he_normal",
               padding="same")(input_tensor)
    # 批量标准化层
    # 　维持激活项平均值接近 0，标准差接近 1 的转换
    # x = Activation('relu')(x)
    x = BatchNormalization()(x)
    return x


# In[97]:


def get_unet(input_img, num_filters=16, dropout=0.2):
    # 左半U

    c1 = conv2d_block(input_img, num_filters=num_filters * 1, kernel_size=3, strides=(1, 1))
    c1 = conv2d_block(c1, num_filters=num_filters * 1, kernel_size=3, strides=(1, 1))
    c1 = conv2d_block(c1, num_filters=num_filters * 1, kernel_size=3, strides=(1, 1))
    # 步长为2的卷积代替maxpool
    # p1 = MaxPooling2D(2, 2)(c1)
    p1 = conv2d_block(c1, num_filters=num_filters * 2, kernel_size=3, strides=(2, 2))
    # p1 = Dropout(dropout)(p1)

    c2 = conv2d_block(p1, num_filters=num_filters * 2, kernel_size=3, strides=(1, 1))
    c2 = conv2d_block(c2, num_filters=num_filters * 2, kernel_size=3, strides=(1, 1))
    # c2 = conv2d_block(c2, num_filters=num_filters*2, kernel_size=3,strides=(1, 1))
    # p2 = MaxPooling2D(2, 2)(c2)
    p2 = conv2d_block(c2, num_filters=num_filters * 4, kernel_size=3, strides=(2, 2))
    # p2 = Dropout(dropout)(p2)

    c3 = conv2d_block(p2, num_filters=num_filters * 4, kernel_size=3, strides=(1, 1))
    c3 = conv2d_block(c3, num_filters=num_filters * 4, kernel_size=3, strides=(1, 1))
    # c3 = conv2d_block(c3, num_filters=num_filters*4, kernel_size=3, strides = (1, 1))
    # p3 = MaxPooling2D(2, 2)(c3)
    p3 = conv2d_block(c3, num_filters=num_filters * 8, kernel_size=3, strides=(2, 2))
    # p3 = Dropout(dropout)(p3)

    c4 = conv2d_block(p3, num_filters=num_filters * 8, kernel_size=3, strides=(1, 1))
    c4 = conv2d_block(c4, num_filters=num_filters * 8, kernel_size=3, strides=(1, 1))

    c4 = conv2d_block(c4, num_filters=num_filters*8, kernel_size=3, strides = (1, 1))

    u7 = Conv2DTranspose(num_filters * 4, (3, 3), strides=(2, 2), padding='same')(c4)
    u7 = concatenate([u7, c3])  # 串联
    # u7 = Dropout(dropout)(u7)
    c7 = conv2d_block_up(u7, num_filters=num_filters * 4, kernel_size=3)
    #     c7 = conv2d_block(c7, num_filters=num_filters*4, kernel_size=3)
    #     c7 = conv2d_block(c7, num_filters=num_filters*4, kernel_size=3)
    c7 = conv2d_block(c7, num_filters=num_filters*4, kernel_size=3)

    u8 = Conv2DTranspose(num_filters * 2, (3, 3), strides=(2, 2), padding='same')(c7)
    u8 = concatenate([u8, c2])  # 串联
    # u8 = Dropout(dropout)(u8)
    c8 = conv2d_block(u8, num_filters=num_filters * 2, kernel_size=3)
    #     c8 = conv2d_block(c8, num_filters=num_filters*2, kernel_size=3)
    c8 = conv2d_block(c8, num_filters=num_filters*2, kernel_size=3)

    u9 = Conv2DTranspose(num_filters * 1, (3, 3), strides=(2, 2), padding='same')(c8)
    u9 = concatenate([u9, c1])  # 串联
    # u9 = Dropout(dropout)(u9)
    c9 = conv2d_block(u9, num_filters=num_filters * 1, kernel_size=3)
    c9 = conv2d_block(c9, num_filters=num_filters * 1, kernel_size=3)

    outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)
    model = Model(inputs=[input_img], outputs=[outputs])
    model.compile(optimizer=Adam(1e-3), loss="binary_crossentropy", metrics=["accuracy"])
    model.summary()
    return model

inputs = Input((128, 128, 1))

model = get_unet(inputs)
