import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Reshape
from keras.layers import merge,Concatenate,RepeatVector
# from keras.utils.visualize_util import plot
from keras.layers import Input, Lambda,Conv1D,Flatten
from keras.models import Model
import keras.backend as K


def slice(x,i):
    return x[:,i]


def encode(x):
    x_mean = K.mean(x, axis=1, keepdims=False)
    # x_out = K.reshape(x_mean, shape=(-1,x_mean.ndim(-1)//4, 4))

    return x_mean


def softmax(x, axis=1):
    """Softmax activation function.
    # Arguments
        x : Tensor.
        axis: Integer, axis along which the softmax normalization is applied.
    # Returns
        Tensor, output of softmax transformation.
    # Raises
        ValueError: In case `dim(x) == 1`.
    """
    ndim = K.ndim(x)
    if ndim == 2:
        return K.softmax(x)
    elif ndim > 2:
        e = K.exp(x - K.max(x, axis=axis, keepdims=True))
        s = K.sum(e, axis=axis, keepdims=True)
        return e / s
    else:
        raise ValueError('Cannot apply softmax to a tensor that is 1D')



# def mean(x):
#     return K.mean(x, axis=2, keepdims=False)
M = 5
Zero_encode = K.zeros((M,4),dtype=np.float32)
a = Input(shape=(12, 16))
# x1 = Lambda(slice, output_shape=(4, 1), arguments={'index':0})(a)
# x2 = Lambda(slice, output_shape=(4, 1), arguments={'index':1})(a)
# x1 = Reshape((4, 1, 1))(x1)
# x2 = Reshape((4, 1, 1))(x2)
# output = merge( [x1, x2], mode='concat')
# output = Concatenate(axis=-1)([x1, x2])
# output = Lambda(mean)(a)
# output = Reshape((-1,4))(output)
# x = K.permute_dimensions(a,[0,2,1])
# conv1 = Conv1D(filters=1,kernel_size=4,strides=4,padding='valid')(x)
# # model = Model(a, x)
# print(x.shape)
#
# print(conv1.shape)
# reshape = Reshape((4,2))(conv1)
# # model = Model(a,conv1)
# print(reshape.shape)
# x_test = np.array([[[1, 2], [2, 3], [3, 4], [4, 5]]])
# print(model.predict(x_test))
# plot(model, to_file=‘lambda .png‘, show_shapes=True)
# densor1 = Dense(8,activation='tanh')
# densor2 = Dense(2,activation='relu')
#
# X = densor1(a)
# X = densor2(X)
#
# print(X.shape)

observe_shape = [(12, 12), (12, 20)]


def attention_on_task(group_input: Input, task_input: Input):
    '''

    str: 定义task_slot特征向量到动作空间的注意力机制
    slot[i].shape = [12,4]
    slot.shape = [12,4*M] #M = 5
    action_space = M*group #group = 3
    slot[i].encoder = ( sum(x1),sum(x2),sum(x3),sum(x4) )/ 12
    slot[i].encoder.shape = [1,4]
    slot_encode.shape = [1,4*M]

    group[j].shape = [12,4]
    group[j]

    这里定义一个group对应的 attention

    '''

    M = observe_shape[1][1] // 4
    GROUP = observe_shape[0][1] // 4
    # define component
    repeator = RepeatVector(n=M)
    concatenator = Concatenate(axis=2)
    densor1 = Dense(6, activation='tanh')
    densor2 = Dense(1, activation='relu')
    activitor = Activation(activation=softmax, name='attention_weights')

    group_encode = Lambda(encode)(group_input)
    group_encode = Reshape((GROUP,4)) (group_encode)

    task_encode = Lambda(encode)(task_input)
    task_encode = Reshape((M, 4),name='task_encoder')(task_encode)

    move_densor1 = Dense(1,activation='tanh')
    move_densor2 = Dense(1,activation='sigmoid',name='move_action')

    moves = []
    outputs = []

    for j in range(GROUP):
        # for i in range(M):
        #     x_i=Lambda(slice,output_shape=(4, 1), arguments={'i':i})(task_slot_encode)
        # one step attention
        group_j = Lambda(slice, output_shape=(1, 4), arguments={'i': j})(group_encode)

        group_j_r = repeator(group_j)  # shape to (M,4)
        # print(group_j_r.shape)
        # print(task_encode.shape)
        # task_encode的shape为（M,4),task_input的shape为(12,4*M)
        # task_encode_1 = mean(task_input) 为(1,4*M)

        concat = concatenator([group_j_r, task_encode])

        e = densor1(concat)
        energies = densor2(e)
        n1 = M
        group_move =Reshape(target_shape=(n1,)) (energies)
        group_move1 = move_densor1(group_move)
        moves.append(group_move1)
        # alphas 表示当前的task 与worker之间的权重

        alphas = activitor(energies)

        outputs.append(alphas)

    out = Concatenate(axis=1)(outputs)
    # print(out.shape)
    n1 = M*GROUP
    out1 = Reshape(target_shape=(n1,))(out)
    print(out1.shape)
    move_on = Concatenate(axis=1)(moves)
    move_on = move_densor2(move_on)

    out_all = Concatenate(axis=1,name='all_actions')([out1,move_on])
    return out_all


group_input = Input(shape=observe_shape[0], name='group')
task_input = Input(shape=observe_shape[1], name='task')

outputs = attention_on_task(group_input, task_input)



model = Model([group_input, task_input], outputs)

model.summary()
print(outputs)
