import numpy as np
import tensorflow as tf

observe_shape = [(12, 12), (12, 20),(2,16)]

W_input = tf.placeholder(shape=[None,observe_shape[0][0],observe_shape[0][1]],dtype=tf.float32,name='W_input')
T_input = tf.placeholder(shape=[None,observe_shape[1][0],observe_shape[1][1]],dtype=tf.float32,name='T_input')
G_input = tf.placeholder(shape=[None,observe_shape[2][0],observe_shape[2][1]],dtype=tf.float32,name='G_input')


M = observe_shape[1][1] // 4
GROUP = observe_shape[0][1] // 4


# task_encode1 = task_encode

def repeat(x):
    # x = tf.expand_dims(x, 1)
    pattern = tf.stack([1, M, 1])
    return tf.tile(x, pattern)


# def flatten(x):
#
#     return tf.reshape(x, tf.stack([-1, tf.reduce_prod(tf.shape(x)[1:])]))

feature_size = 8
hidden_size = 6
attention_size = 1
v_hidden_size = 10

initializer = tf.random_normal_initializer(stddev=0.1)

# Trainable parameters
w_omega1 = tf.get_variable(name="w_omega1", shape=[feature_size, hidden_size], initializer=initializer)
b_omega1 = tf.get_variable(name="b_omega1", shape=[hidden_size], initializer=initializer)

w_omega2 = tf.get_variable(name="w_omega2", shape=[hidden_size], initializer=initializer)


# u_omega = tf.get_variable(name="u_omega", shape=[attention_size], initializer=initializer)
def one_attention(g_encode: tf.Tensor, t_encode: tf.Tensor, sub_name: str, i: int) -> tf.Tensor:

    with tf.name_scope(sub_name):
        group_repeat1 = repeat(tf.slice(g_encode, begin=[0, i, 0], size=[-1, 1, -1]))

        concat = tf.concat([t_encode,group_repeat1],axis=-1)
        # tf.tensordot[b,5,8] [8,6]
        # Applying fully connected layer with non-linear activation to each of the B*T timestamps;
        #  the shape of `v` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
        t_a1 = tf.nn.tanh(tf.tensordot(concat,w_omega1,axes=1) + b_omega1,name='hidden_layer1'+str(i))
        t_a2 = tf.tensordot(t_a1,w_omega2,axes=1,name='hidden_layer2'+str(i))
        #shape = (B,M)
        # alphas = tf.nn.softmax(t_a2,axis=1,name='attention_weights'+str(i))

    return t_a2

# group == 3
with tf.name_scope('action_attention'):
    # 用注意力定义优势函数
    group_encode = tf.reshape(tf.reduce_mean(W_input, axis=1), shape=(-1, GROUP, 4))
    task_encode = tf.reshape(tf.reduce_mean(T_input, axis=1), shape=(-1, M, 4))
    # slot vs worker
    pre_attention1 = one_attention(group_encode,task_encode,'w1_attention1',0)
    pre_attention2 = one_attention(group_encode,task_encode,'w1_attention2',1)
    pre_attention3 = one_attention(group_encode,task_encode,'w1_attention3',2)

    move_features = tf.concat([pre_attention1,pre_attention2,pre_attention3],axis=1) # shape = (B,M*GROUP)

    move_hidden = tf.layers.dense(move_features,units=3,activation='tanh')
    move_attention = tf.compat.v1.layers.dense(move_hidden,units=1,name='move_attention')

    pre_attention = tf.concat([move_features,move_attention],axis=1,name = 'all_attention')

    alphas = tf.nn.softmax(pre_attention,axis=1,name='attention_weights')



with tf.name_scope('Value_function'):
    # Valuefunction 估计与定义
    W_t = tf.transpose(W_input,perm=[0,2,1])
    T_t = tf.transpose(T_input,perm=[0,2,1])
    conv1_w = tf.layers.conv1d(W_t,filters=2,kernel_size=4,strides=4,padding='valid')
    conv1_t = tf.layers.conv1d(T_t,filters=2,kernel_size=4,strides=4,padding='valid')
    conv1_g = tf.layers.conv1d(T_t,filters=2,kernel_size=1,strides=1,padding='valid')

    conv1 = tf.concat([conv1_w, conv1_t, conv1_g],axis=1)

    X = tf.nn.relu(conv1)
    X_flatten = tf.compat.v1.layers.flatten(X)

    X_hidden = tf.layers.dense(X_flatten,units=v_hidden_size,activation='tanh',name='pre_value_function')
    Value = tf.layers.dense(X_hidden,units=1,name='Value_estimator')

Q_value = tf.add(Value , alphas,name='Q_value')

with tf.Session() as sess:

    w1 = [np.zeros(observe_shape[0], dtype=np.float32), np.ones(observe_shape[0], dtype=np.float32)]
    t1 = [np.zeros(observe_shape[1], dtype=np.float32), np.ones(observe_shape[1], dtype=np.float32)]
    g1 = [np.ones(observe_shape[2], dtype=np.float32)*0.2, -0.3*np.ones(observe_shape[2], dtype=np.float32)]
    sess.run(tf.compat.v1.global_variables_initializer())

    alphas1,V_,Q_ = sess.run([alphas,Value,Q_value],feed_dict={W_input:w1,T_input:t1,G_input:g1})

    print(alphas1,V_,Q_,'a=',sep='\n ========\n')
    # print(concat,'concat=')