import tensorflow as tf


def repeat(x,n):
    # x = tf.expand_dims(x, 1)
    pattern = tf.stack([1, n, 1])
    return tf.tile(x, pattern)

class RL_NetWork(object):

    def __init__(self,name,observe_shape,action_size,trainable = False):
        self.observe_shape = observe_shape
        self.action_size = action_size
        self.trainable = trainable
        self.name = name

        self.M = observe_shape[1][1] // 4
        self.GROUP = observe_shape[0][1] // 4

        self.learning_rate = 0.05

        feature_size = 8
        hidden_size = 6
        attention_size = 1
        self.v_hidden_size = 10

        initializer = tf.random_normal_initializer(stddev=0.1)


        with tf.variable_scope('attention_vars'):
            # Trainable parameters
            self.w_omega1 = tf.get_variable(name="w_omega1", shape=[feature_size, hidden_size], initializer=initializer)
            self.b_omega1 = tf.get_variable(name="b_omega1", shape=[hidden_size], initializer=initializer)

            self.w_omega2 = tf.get_variable(name="w_omega2", shape=[hidden_size], initializer=initializer)

        with tf.name_scope('inputs'):
            self.W_input = tf.placeholder(shape=[None,observe_shape[0][0],observe_shape[0][1]],dtype=tf.float32,name='W_input')
            self.T_input = tf.placeholder(shape=[None,observe_shape[1][0],observe_shape[1][1]],dtype=tf.float32,name='T_input')
            self.G_input = tf.placeholder(shape=[None,observe_shape[2][0],observe_shape[2][1]],dtype=tf.float32,name='G_input')
            self.target_vector = tf.placeholder(shape=[None,action_size],dtype=tf.float32,name='target_value')
        self._build_model()


    def one_attention(self,g_encode: tf.Tensor, t_encode: tf.Tensor, sub_name: str, i: int) -> tf.Tensor:
        with tf.name_scope(sub_name):
            group_repeat1 = repeat(tf.slice(g_encode, begin=[0, i, 0], size=[-1, 1, -1]),self.M)

            concat = tf.concat([t_encode, group_repeat1], axis=-1)
            # tf.tensordot[b,5,8] [8,6]
            # Applying fully connected layer with non-linear activation to each of the B*T timestamps;
            #  the shape of `v` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
            t_a1 = tf.nn.tanh(tf.tensordot(concat, self.w_omega1, axes=1) + self.b_omega1, name='hidden_layer1' + str(i))
            t_a2 = tf.tensordot(t_a1, self.w_omega2, axes=1, name='hidden_layer2' + str(i))
            # shape = (B,M)
            # alphas = tf.nn.softmax(t_a2,axis=1,name='attention_weights'+str(i))

        return t_a2


    def _build_model(self):

        with tf.name_scope('action_attention'):
            # 用注意力定义优势函数
            group_encode = tf.reshape(tf.reduce_mean(self.W_input, axis=1), shape=(-1, self.GROUP, 4))
            task_encode = tf.reshape(tf.reduce_mean(self.T_input, axis=1), shape=(-1, self.M, 4))
            # slot vs worker
            pre_attention1 = self.one_attention(group_encode, task_encode, 'w1_attention1', 0)
            pre_attention2 = self.one_attention(group_encode, task_encode, 'w1_attention2', 1)
            pre_attention3 = self.one_attention(group_encode, task_encode, 'w1_attention3', 2)

            move_features = tf.concat([pre_attention1, pre_attention2, pre_attention3], axis=1)  # shape = (B,M*GROUP)

            move_hidden = tf.layers.dense(move_features, units=3, activation='tanh')
            move_attention = tf.layers.dense(move_hidden, units=1, name='move_attention')

            pre_attention = tf.concat([move_features, move_attention], axis=1, name='all_attention')

            self.alphas = tf.nn.softmax(pre_attention, axis=1, name='attention_weights')

        with tf.name_scope('Value_function'):
            # Valuefunction 估计与定义
            W_t = tf.transpose(self.W_input, perm=[0, 2, 1])
            T_t = tf.transpose(self.T_input, perm=[0, 2, 1])
            conv1_w = tf.layers.conv1d(W_t, filters=2, kernel_size=4, strides=4, padding='valid')
            conv1_t = tf.layers.conv1d(T_t, filters=2, kernel_size=4, strides=4, padding='valid')
            conv1_g = tf.layers.conv1d(T_t, filters=2, kernel_size=1, strides=1, padding='valid')

            conv1 = tf.concat([conv1_w, conv1_t, conv1_g], axis=1)

            X = tf.nn.relu(conv1)
            X_flatten = tf.layers.flatten(X)

            X_hidden = tf.layers.dense(X_flatten, units=self.v_hidden_size, activation='tanh', name='pre_value_function')
            self.Value = tf.layers.dense(X_hidden, units=1, name='Value_estimator')

        self.Q_value = tf.add(self.Value, self.alphas, name='Q_value')

        self.vars = {v.name: v for v in
                tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES)}
        # Back-Propagation
        self.loss = tf.reduce_mean(tf.square(self.target_vector - self.Q_value), name=self.name + ".loss")
        self.optimizer = tf.train.AdamOptimizer(self.learning_rate, name=self.name + ".AdamOptimizer")
        self.gradients = self.optimizer.compute_gradients(self.loss)
        self.update = self.optimizer.apply_gradients(self.gradients)
        # return Q_value