import warnings

import keras.activations

warnings.filterwarnings("ignore")
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

from tensorflow.keras.layers import (Input,Conv2D,UpSampling2D,
                                     Concatenate,MaxPooling2D,
                                     )
from tensorflow.keras.models import Model
import tensorflow as tf
from tensorflow.keras import Input
from tensorflow.keras.layers import Dense, Conv2D, Conv2DTranspose, Flatten
from tensorflow.keras.layers import MaxPool2D, Dropout,UpSampling2D,Concatenate
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.optimizers import Adam, RMSprop
from keras.models import Model
import numpy as np

def unet(shape=(256,256 ,1)):
    """ set up unet model """
    inputs = Input(shape=shape,name='input')
    conv1_1 = Conv2D(64,3,activation='relu',name='conv1_1')(inputs)
    conv1_2 = Conv2D(64,3,activation='relu',name='conv1_2')(conv1_1)

    max_pool1 = MaxPooling2D(name='maxpool1')(conv1_2) # maxpooling2D默认stride=filer size
    conv2_1 = Conv2D(128,3,activation='relu',name='conv2_1')(max_pool1)
    conv2_2 = Conv2D(128,3,activation='relu',name='conv2_2')(conv2_1)

    max_pool2 = MaxPooling2D(name='maxpool2')(conv2_2)
    conv3_1 = Conv2D(256,3,activation='relu',name='conv3_1')(max_pool2)
    conv3_2 = Conv2D(256,3,activation='relu',name='conv3_2')(conv3_1)

    maxpool3 = MaxPooling2D(name='maxpool3')(conv3_2)
    conv4_1 = Conv2D(512,3,activation='relu',name='conv4_1')(maxpool3)
    conv4_2 = Conv2D(512,3,activation='relu',name='conv4_2')(conv4_1)

    maxpool4 = MaxPooling2D(name='maxpool4')(conv4_2)
    conv5_1 = Conv2D(1024,3,activation='relu',name='conv5_1')(maxpool4)
    conv5_2 = Conv2D(1024,3,activation='relu',name='conv5_2')(conv5_1)

    up5 = UpSampling2D(name='up5')(conv5_2)
    up5_conv = Conv2D(512,2,padding='same',name='up5_conv')(up5)
    conv4_feature = tf.image.resize(conv4_2,(up5_conv.shape[1],up5_conv.shape[2]),
                                    tf.image.ResizeMethod.NEAREST_NEIGHBOR,
                                    name='conv4_feature')

    concat1 = Concatenate(name='concat1')([up5_conv,conv4_feature])
    conv6_1 = Conv2D(512,3,activation='relu',name='conv6_1')(concat1)
    conv6_2 = Conv2D(512,3,activation='relu',name='conv6_2')(conv6_1)

    up6 = UpSampling2D(name='up6')(conv6_2)
    up6_conv = Conv2D(256,2,padding='same',name='up6_conv')(up6)
    conv3_feature = tf.image.resize(conv3_2,(up6_conv.shape[1],up6_conv.shape[2]),
                                    tf.image.ResizeMethod.NEAREST_NEIGHBOR,
                                    name='conv3_feature')

    concat2 = Concatenate(name='concat2')([up6_conv,conv3_feature])
    conv7_1 = Conv2D(256,3,activation='relu',name='conv7_1')(concat2)
    conv7_2 = Conv2D(256,3,activation='relu',name='conv7_2')(conv7_1)

    up7 = UpSampling2D(name='up7')(conv7_2)
    up7_conv = Conv2D(128,2,padding='same',name='up7_conv')(up7)
    conv2_feature = tf.image.resize(conv2_2,(up7_conv.shape[1],up7_conv.shape[2]),
                                    tf.image.ResizeMethod.NEAREST_NEIGHBOR,
                                    name='conv2_feature')

    concat3 = Concatenate(name='concat3')([up7_conv,conv2_feature])
    conv8_1 = Conv2D(128,3,activation='relu',name='conv8_1')(concat3)
    conv8_2 = Conv2D(128,3,activation='relu',name='conv8_2')(conv8_1)

    up8 = UpSampling2D(name='up8')(conv8_2)
    up8_conv = Conv2D(64,2,padding='same',name='up8_conv')(up8)
    conv1_feature = tf.image.resize(conv1_2,(up8_conv.shape[1],up8_conv.shape[2]),
                                    tf.image.ResizeMethod.NEAREST_NEIGHBOR,
                                    name='conv1_feature')

    concat4 = Concatenate(name='concat4')([up8_conv,conv1_feature])
    conv9_1 = Conv2D(64,3,activation='relu',name='conv9_1')(concat4)
    conv9_2 = Conv2D(64,3,activation='relu',name='conv9_2')(conv9_1)

    out = Conv2D(2,1,activation='sigmoid',name='out')(conv9_2)

    model = Model(inputs=inputs,outputs=out,name='mytf2_unet')

    return model




def automap(img_h=256, img_w=256 , epochs =200):
    input_shape = (img_h,img_w , 1)
    x_input = Input(shape=input_shape)
    conv2d = Conv2D(64, (3, 3), padding='same', activation='relu')(x_input)
    conv2d_1 = Conv2D(64, (3, 3), padding='same', activation='relu')(conv2d)
    max_pooling2d = MaxPool2D(pool_size=(2,2), strides=(2,2), padding='same')(conv2d_1)
    conv2d_2 = Conv2D(64, (3, 3), padding='same', activation='relu')(max_pooling2d)
    conv2d_3 = Conv2D(128, (3, 3), padding='same', activation='relu')(conv2d_2)
    conv2d_4 = Conv2D(128, (3, 3), padding='same', activation='relu')(conv2d_3)
    max_pooling2d_1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='same')(conv2d_4)
    conv2d_5 = Conv2D(128, (3, 3), padding='same', activation='relu')(max_pooling2d_1)
    conv2d_6 = Conv2D(256, (3, 3), padding='same', activation='relu')(conv2d_5)
    conv2d_7 = Conv2D(128, (3, 3), padding='same', activation='relu')(conv2d_6)
    up_sampling2d = UpSampling2D(size=(2, 2), interpolation="bilinear" )(conv2d_7)
    concatenate = Concatenate(axis=-1)([up_sampling2d,conv2d_4])
    batch_normalization = BatchNormalization()(concatenate)
    conv2d_8 = Conv2D(128, (3, 3), padding='same', activation='relu')(batch_normalization)
    conv2d_9 = Conv2D(64, (3, 3), padding='same', activation='relu')(conv2d_8)
    up_sampling2d_1 = UpSampling2D(size=(2, 2), interpolation="bilinear")(conv2d_9)
    concatenate_1 = Concatenate(axis=-1)([up_sampling2d_1, conv2d_1])
    batch_normalization_1 = BatchNormalization()(concatenate_1)
    conv2d_10 = Conv2D(64, (3, 3), padding='same', activation='relu')(batch_normalization_1)
    conv2d_11 = Conv2D(64, (3, 3), padding='same', activation='relu')(conv2d_10)
    conv2d_12 = Conv2D(1, (3, 3), padding='same', activation='relu')(conv2d_11)
    model = Model(inputs=x_input, outputs=conv2d_12)
    #定义优化器
    optimizer = RMSprop(learning_rate=0.00002)
    # 使用SGD作为优化器,动态更新学习率
    # learning_rate = 0.001
    # decay_rate = learning_rate / (epochs)
    # momentum = 0.8
    # optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, decay=decay_rate, nesterov=False)
    model.compile(optimizer=optimizer,
                  loss='mse',
                  metrics=['accuracy'])
    return model

def automap_complex(img_h=256, img_w=256 , epochs =200):
    input_shape = (img_h,img_w , 1)
    x_input = Input(shape = input_shape ,  dtype='complex32')
    conv2d = Conv2D(64, (3, 3), padding='same', activation='relu')(x_input)
    conv2d_1 = Conv2D(64, (3, 3), padding='same', activation='relu')(conv2d)
    max_pooling2d = MaxPool2D(pool_size=(2,2), strides=(2,2), padding='same')(conv2d_1)
    conv2d_2 = Conv2D(64, (3, 3), padding='same', activation='relu')(max_pooling2d)
    conv2d_3 = Conv2D(128, (3, 3), padding='same', activation='relu')(conv2d_2)
    conv2d_4 = Conv2D(128, (3, 3), padding='same', activation='relu')(conv2d_3)
    max_pooling2d_1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='same')(conv2d_4)
    conv2d_5 = Conv2D(128, (3, 3), padding='same', activation='relu')(max_pooling2d_1)
    conv2d_6 = Conv2D(256, (3, 3), padding='same', activation='relu')(conv2d_5)
    conv2d_7 = Conv2D(128, (3, 3), padding='same', activation='relu')(conv2d_6)
    up_sampling2d = UpSampling2D(size=(2, 2), interpolation="bilinear" )(conv2d_7)
    concatenate = Concatenate(axis=-1)([up_sampling2d,conv2d_4])
    batch_normalization = BatchNormalization()(concatenate)
    conv2d_8 = Conv2D(128, (3, 3), padding='same', activation='relu')(batch_normalization)
    conv2d_9 = Conv2D(64, (3, 3), padding='same', activation='relu')(conv2d_8)
    up_sampling2d_1 = UpSampling2D(size=(2, 2), interpolation="bilinear")(conv2d_9)
    concatenate_1 = Concatenate(axis=-1)([up_sampling2d_1, conv2d_1])
    batch_normalization_1 = BatchNormalization()(concatenate_1)
    conv2d_10 = Conv2D(64, (3, 3), padding='same', activation='relu')(batch_normalization_1)
    conv2d_11 = Conv2D(64, (3, 3), padding='same', activation='relu')(conv2d_10)
    conv2d_12 = Conv2D(1, (3, 3), padding='same', activation='relu')(conv2d_11)
    model = Model(inputs=x_input, outputs=conv2d_12)
    #定义优化器
    optimizer = RMSprop(learning_rate=0.00002)
    # 使用SGD作为优化器,动态更新学习率
    # learning_rate = 0.001
    # decay_rate = learning_rate / (epochs)
    # momentum = 0.8
    # optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, decay=decay_rate, nesterov=False)
    model.compile(optimizer=optimizer,
                  loss='mse',
                  metrics=['accuracy'])
    return model

if __name__ == '__main__':
    model = unet()
    print(model.summary())
