import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import model_code.attention_module as am

def fcn(input_shape, depth=3, out_dimension=128, drop=0, kernel_shape_list=None, act_func="relu", use_am=False):
    x = layers.Input(shape=input_shape)#用于构建网络的第一层——输入层，该层会告诉网络我们的输入的尺寸是什么
    y = layers.BatchNormalization()(x)#批标准化
    for i in range(depth) :
        if i==0:
            y = layers.Conv2D(128, kernel_size=[2, input_shape[1]])(y)
        else:
            y = layers.Conv2D(64, kernel_size=[2, 1])(y)
        if use_am==True:
            y = am.cbam(y)
        y = layers.BatchNormalization()(y)
        y = layers.Activation(act_func)(y)
    y = layers.GlobalAveragePooling2D()(y)
    return x, y

def resnet(input_shape, depth=3, out_dimension=128, drop=0, kernel_shape_list=None, act_func="relu", use_am=False) :
    x = layers.Input(shape=input_shape)
    y = layers.BatchNormalization()(x)
    x1 = y
    if kernel_shape_list == None :
        kernel_list = [(8,1), (5,1), (3,1)]
    elif len(kernel_shape_list)<3 :
        kernel_list = [(8,1), (5,1), (3,1)]
    else :
        kernel_list = kernel_shape_list
    for i in range(depth) :
        if i == 0 :
            dimension = 64
        else :
            dimension = out_dimension
        #first conv
        y = layers.Conv2D(dimension*4, kernel_list[0], padding="same")(y)
        y = layers.BatchNormalization()(y)
        y = layers.Activation(act_func)(y)
        #second conv
        y = layers.Conv2D(dimension*2, kernel_list[1], padding="same")(y)
        y = layers.BatchNormalization()(y)
        y = layers.Activation(act_func)(y)
        #third conv
        y = layers.Conv2D(dimension, kernel_list[2], padding="same")(y)
        y = layers.BatchNormalization()(y)
        #add
        if not x1.shape[-1] == dimension :
            shortcut_y = layers.Conv2D(dimension, (1, 1), padding="same")(x1)
            shortcut_y = layers.BatchNormalization()(shortcut_y)
        else :
            shortcut_y = layers.BatchNormalization()(x1)
        if use_am==True:
            y = am.cbam(y)
        y = layers.Add()([shortcut_y, y])
        y = layers.BatchNormalization()(y)
        y = layers.Activation(act_func)(y)
        x1 = y
    y = layers.GlobalAveragePooling2D()(y)
    return x, y

def gru(input_shape, depth=1, length=15, use_am=False) :
    x = layers.Input(shape=input_shape)
    y = layers.BatchNormalization()(x)
    if use_am==True:
        y = am.gru_attention_block(y, length)
    for i in range(depth-1):
        if i == 0:
            dim = 256
        elif i == 1:
            dim = 128
        else:
            dim = 64
        y = layers.GRU(dim, return_sequences=True)(y)
        if use_am==True:
            y = am.gru_attention_block(y, length)
    y = layers.GRU(64, return_sequences=False)(y)
    return x, y

def d_resnet(input_shape, depth=1, drop=0, act_func="relu", use_am=False) :
    x = layers.Input(shape=input_shape)
    y = layers.BatchNormalization()(x)
    x1 = y
    for i in range(depth) :
        #first conv
        y = layers.Conv2D(256, kernel_size=[2, 3], padding="same")(y)
        y = layers.BatchNormalization()(y)
        y = layers.Activation(act_func)(y)
        #second conv
        y = layers.Conv2D(128, kernel_size=[2, 2], padding="same")(y)
        y = layers.BatchNormalization()(y)
        y = layers.Activation(act_func)(y)
        #third conv
        y = layers.Conv2D(128, kernel_size=[1, 2], padding="same")(y)
        y = layers.BatchNormalization()(y)
        #add
        if not x1.shape[-1] == y.shape[-1] :
            shortcut_y = layers.Conv2D(y.shape[-1], (1, 1), padding="same")(x1)
            shortcut_y = layers.BatchNormalization()(shortcut_y)
        else :
            shortcut_y = layers.BatchNormalization()(x1)
        if use_am==True:
            y = am.cbam(y)
        y = layers.Add()([shortcut_y, y])
        y = layers.BatchNormalization()(y)
        y = layers.Activation(act_func)(y)
        x1 = y
    y = layers.GlobalAveragePooling2D()(y)
    return x, y

def create_model_1(static, dynamic, ob_win):
    resnet_x, resnet_y = resnet(input_shape=static, depth=3, out_dimension=64, drop=0, kernel_shape_list=None, act_func="relu", use_am=True)
    gru_x, gru_y = gru(dynamic, 3, ob_win, True)
    fcn_x, fcn_y = fcn(input_shape=(dynamic[0], dynamic[1], 1), use_am=True)
    y = layers.Concatenate()([resnet_y, fcn_y, gru_y])
    y = layers.Dense(1, activation="sigmoid")(y)
    model = keras.Model(inputs=[resnet_x, fcn_x, gru_x], outputs=y)
    return model

def create_model_2(dynamic, ob_win):
    gru_x, gru_y = gru(dynamic, 3, ob_win, True)
    #print(dynamic[0], dynamic[1])
    fcn_x, fcn_y = fcn(input_shape=(dynamic[0], dynamic[1], 1), use_am=True)
    y = layers.Concatenate()([fcn_y, gru_y])
    y = layers.Dense(1, activation="sigmoid")(y)
    model = keras.Model(inputs=[fcn_x, gru_x], outputs=y)
    return model
    
