import tensorflow as tf
import numpy as np
from collections import deque
import random


def repeat(x,n):
    # x = tf.expand_dims(x, 1)
    pattern = tf.stack([1, n, 1])
    return tf.tile(x, pattern)

class RL_NetWork(object):

    def __init__(self,name,observe_shape,action_size,
                 learning_rate=0.05,
                 reward_decay=0.97,
                 e_greedy=0.9995,
                 replace_target_iter=200,
                 memory_size=3000,
                 batch_size=32,
                 epsilon_decay=0.998,
                 output_graph=False,
                 double_q=True,
                 sess=None, save_model_path = 'save/dqn'):
        self.observe_shape = observe_shape
        self.action_size = action_size
        # self.trainable = trainable
        self.name = name

        self.M = observe_shape[1][1] // 4
        self.GROUP = observe_shape[0][1] // 4

        # self.lr = 0.5


        attention_size = 1
        self.v_hidden_size = 10

        self.lr = learning_rate
        self.learn_rate_decay = 0.9995
        self.gamma = reward_decay
        self.epsilon_max = e_greedy
        self.replace_target_iter = replace_target_iter
        self.memory_size = memory_size
        self.batch_size = batch_size
        self.epsilon_decay = epsilon_decay
        self.epsilon = self.epsilon_max
        self.epsilon_min = 0.01

        self.double_q = double_q  # decide to use double q or not

        self.learn_step_counter = 0
        self.memory = ReplayBuffer(observe_shape,memory_size)



        # with tf.variable_scope('attention_vars'):
        #     # Trainable parameters
        #     self.w_omega1 = tf.get_variable(name="w_omega1", shape=[feature_size, hidden_size], initializer=initializer)
        #     self.b_omega1 = tf.get_variable(name="b_omega1", shape=[hidden_size], initializer=initializer)
        #
        #     self.w_omega2 = tf.get_variable(name="w_omega2", shape=[hidden_size], initializer=initializer)

        # with tf.variable_scope('inputs'):
        #
        #     self.W_input = tf.placeholder(shape=[None,observe_shape[0][0],observe_shape[0][1]],dtype=tf.float32,name='W_input')
        #     self.T_input = tf.placeholder(shape=[None,observe_shape[1][0],observe_shape[1][1]],dtype=tf.float32,name='T_input')
        #     self.G_input = tf.placeholder(shape=[None,observe_shape[2][0],observe_shape[2][1]],dtype=tf.float32,name='G_input')
        #     self.q_target = tf.placeholder(shape=[None,action_size],dtype=tf.float32,name='target_value')

        self._build_model()

        self.replace_target_op = [tf.assign(t, e) for t, e in zip(self.target_net_params, self.q_net_params)]

        if sess is None:
            self.sess = tf.Session()
            self.sess.run(tf.global_variables_initializer())
        else:
            self.sess = sess
        if output_graph:
            tf.summary.FileWriter("logs/", self.sess.graph)
        self.cost_his = []

        self.saver = tf.train.Saver()
        self.save_model_path = save_model_path

    def _one_attention(self, w1, b1, w2, g_encode: tf.Tensor, t_encode: tf.Tensor, sub_name: str, i: int) -> tf.Tensor:
        with tf.variable_scope(sub_name):
            group_repeat1 = repeat(tf.slice(g_encode, begin=[0, i, 0], size=[-1, 1, -1]), self.M)

            concat = tf.concat([t_encode, group_repeat1], axis=-1)
            # tf.tensordot[b,5,8] [8,6]
            # Applying fully connected layer with non-linear activation to each of the B*T timestamps;
            #  the shape of `v` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
            t_a1 = tf.nn.tanh(tf.tensordot(concat, w1, axes=1) + b1, name='hidden_layer1' + str(i))
            t_a2 = tf.tensordot(t_a1, w2, axes=1, name='hidden_layer2' + str(i))
            # shape = (B,M)
            # alphas = tf.nn.softmax(t_a2,axis=1,name='attention_weights'+str(i))

        return t_a2


    def _build_model(self):

        # def build layer(c_name):
        #     pass
        def build_layers(W_input, T_input, G_input,initializer):

            feature_size = 8
            hidden_size = 6

            with tf.variable_scope('attention_vars'):
                # Trainable parameters
                w_omega1 = tf.get_variable(name="w_omega1", shape=[feature_size, hidden_size],\
                                                initializer=initializer)
                b_omega1 = tf.get_variable(name="b_omega1", shape=[hidden_size], initializer=initializer)

                w_omega2 = tf.get_variable(name="w_omega2", shape=[hidden_size], initializer=initializer)

            with tf.variable_scope('action_attention'):
                # 用注意力定义优势函数
                group_encode = tf.reshape(tf.reduce_mean(W_input, axis=1), shape=(-1, self.GROUP, 4))
                task_encode = tf.reshape(tf.reduce_mean(T_input, axis=1), shape=(-1, self.M, 4))
                # slot vs worker
                pre_attention1 = self._one_attention(w_omega1,b_omega1,w_omega2,group_encode, task_encode, 'w1_attention1', 0)
                pre_attention2 = self._one_attention(w_omega1,b_omega1,w_omega2,group_encode, task_encode, 'w1_attention2', 1)
                pre_attention3 = self._one_attention(w_omega1,b_omega1,w_omega2,group_encode, task_encode, 'w1_attention3', 2)

                move_features = tf.concat([pre_attention1, pre_attention2, pre_attention3], axis=1)  # shape = (B,M*GROUP)

                move_hidden = tf.layers.dense(move_features, units=3, activation='tanh')
                move_attention = tf.layers.dense(move_hidden, units=1, name='move_attention')

                pre_attention = tf.concat([move_features, move_attention], axis=1, name='all_attention')

                alphas = tf.nn.softmax(pre_attention, axis=1, name='attention_weights')

            with tf.variable_scope('Value_function'):
                # Valuefunction 估计与定义
                W_t = tf.transpose(W_input, perm=[0, 2, 1])
                T_t = tf.transpose(T_input, perm=[0, 2, 1])
                # G_t = tf.transpose(G_input,perm=[0,2,1])
                conv1_w = tf.layers.conv1d(W_t, filters=2, kernel_size=4, strides=4, padding='valid')
                conv1_t = tf.layers.conv1d(T_t, filters=2, kernel_size=4, strides=4, padding='valid')
                conv1_g = tf.layers.conv1d(G_input, filters=2, kernel_size=1, strides=1, padding='valid')

                conv1 = tf.concat([conv1_w, conv1_t, conv1_g], axis=1)

                X = tf.nn.relu(conv1)
                X_flatten = tf.layers.flatten(X)

                X_hidden = tf.layers.dense(X_flatten, units=self.v_hidden_size, activation='tanh', name='pre_value_function')
                Value = - tf.layers.dense(X_hidden, units=1, activation='relu',name='Value_estimator')

                Q_value = tf.add(Value, alphas, name='Q_value')

            return Q_value,alphas

        self.W_input = tf.placeholder(shape=[None, self.observe_shape[0][0], self.observe_shape[0][1]],
                                      dtype=tf.float32, \
                                      name='W_input')
        self.T_input = tf.placeholder(shape=[None, self.observe_shape[1][0], self.observe_shape[1][1]],
                                      dtype=tf.float32, \
                                      name='T_input')
        self.G_input = tf.placeholder(shape=[None, self.observe_shape[2][0], self.observe_shape[2][1]],
                                      dtype=tf.float32, \
                                      name='G_input')
        self.q_target = tf.placeholder(shape=[None, self.action_size], dtype=tf.float32, name='target_value')

        # --------build eval net---------
        with tf.variable_scope('eval_net') as scope:
            initializer = tf.random_normal_initializer(stddev=0.1)

            self.q_eval,self.action_attention = build_layers(self.W_input,self.T_input,self.G_input,initializer=initializer)

            self.q_net_params = \
                  tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES,scope='eval_net')

        with tf.variable_scope('loss'):

            self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))

        with tf.variable_scope('train'):

            self._train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss)

        # ------------------ build target_net ------------------
        self.W_input_tar = tf.placeholder(shape=[None, self.observe_shape[0][0], self.observe_shape[0][1]],
                                          dtype=tf.float32, \
                                          name='W_input_tar')
        self.T_input_tar = tf.placeholder(shape=[None, self.observe_shape[1][0], self.observe_shape[1][1]],
                                          dtype=tf.float32, \
                                          name='T_input_tar')
        self.G_input_tar = tf.placeholder(shape=[None, self.observe_shape[2][0], self.observe_shape[2][1]],
                                          dtype=tf.float32, \
                                          name='G_input_tar')

        with tf.variable_scope('target_net') as scop:
            initializer = tf.random_normal_initializer(stddev=0.2)

            self.q_next,_ = build_layers(self.W_input_tar, self.T_input_tar, self.G_input_tar, initializer=initializer)

            self.target_net_params =  \
                                 tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES,scope='target_net')

        # self.vars = {v.name: v for v in\
        #         tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES)}
        # Back-Propagation
        # self.loss = tf.reduce_mean(tf.square(self.target_vector - self.Q_value), name=self.name + ".loss")
        # self.optimizer = tf.train.AdamOptimizer(self.learning_rate, name=self.name + ".AdamOptimizer")
        # self.gradients = self.optimizer.compute_gradients(self.loss)
        # self.update = self.optimizer.apply_gradients(self.gradients)
        # return Q_value

    def choose_action(self, W,S,G,action_mask):

        # observation = observation[np.newaxis, :]
        if np.random.uniform() > self.epsilon:  # choosing action
            actions_value = self.sess.run(self.action_attention, feed_dict={self.W_input:[W],self.T_input:[S],self.G_input:[G]})
            # add action mask to remove valid action
            actions_value[0,action_mask] = 0
            action = np.argmax(actions_value)
        else:
            action_list = set(range(self.action_size)) - set(action_mask)
            action = np.random.choice(list(action_list))
        return action

    def store_transition(self, W, S, G, action, reward, W_, S_, G_, done):

        self.memory.store(W, S, G, action, reward, W_, S_, G_, done)

    def learn(self):

        if self.learn_step_counter % self.replace_target_iter == 0:
            self.sess.run(self.replace_target_op)
            print('\ntarget_params_replaced\n')

        # sample_index = np.random.choice(self.memory_size, size=self.batch_size)
        W_batch, S_batch, G_batch, act_batch, reward_batch, \
        W_next_batch, S_next_batch, G_next_batch, done_batch = self.memory.sample(self.batch_size)

        q_next = self.sess.run(self.q_next,
                               feed_dict={self.W_input_tar: W_next_batch,self.T_input_tar:S_next_batch,\
                                          self.G_input_tar:G_next_batch})  # next observation


        q_eval = self.sess.run(self.q_eval, {self.W_input: W_batch,self.T_input:S_batch,\
                                          self.G_input:G_batch})

        q_target = q_eval.copy()

        batch_index = np.arange(self.batch_size, dtype=np.int32)
        # eval_act_index = batch_memory[:, self.n_features].astype(int)
        # reward = batch_memory[:, self.n_features + 1]

        q_target[batch_index, act_batch] = reward_batch + self.gamma * np.max(q_next, axis=1)

        _, self.cost = self.sess.run([self._train_op, self.loss],
                                     feed_dict={self.W_input: W_batch,self.T_input:S_batch,\
                                                self.G_input:G_batch,self.q_target: q_target})



        self.cost_his.append(self.cost)
        self.lr = self.lr*self.learn_rate_decay if self.lr>0.001 else 0.001
        self.epsilon = self.epsilon * self.epsilon_decay \
            if self.epsilon > self.epsilon_min else self.epsilon_min

        self.learn_step_counter += 1

    def save_model(self):
        path = self.saver.save(self.sess, self.save_model_path + '.ckpt', global_step=self.learn_step_counter)

    def load_model(self):
        path = self.save_model_path.split('/')[0]
        ckpt = tf.train.get_checkpoint_state(path)
        # ckpt.all_model_checkpoint_paths[0] 0 为最新保存的模型
        self.saver.restore(self.sess, ckpt.all_model_checkpoint_paths[0])


class ReplayBuffer:
    '''DQN replay buffer'''
    def __init__(self,observe_shape,memory_size):

        self.observe_shape = observe_shape

        self.W_array = np.zeros(shape=(memory_size, self.observe_shape[0][0], self.observe_shape[0][1]),
                                dtype=np.float32)
        self.S_array = np.zeros(shape=(memory_size, self.observe_shape[1][0], self.observe_shape[1][1]),
                                dtype=np.float32)
        self.G_array = np.zeros(shape=(memory_size, self.observe_shape[2][0], self.observe_shape[2][1]),
                                dtype=np.float32)

        self.rewards = np.zeros(shape=(memory_size, 1), dtype=np.float32)
        self.actions = np.zeros(shape=(memory_size, 1), dtype=np.int)

        self.dones = np.zeros(shape=(memory_size, 1), dtype=np.bool)

        self.W_next_array = np.zeros(shape=(memory_size, self.observe_shape[0][0], self.observe_shape[0][1]),
                                     dtype=np.float32)
        self.S_next_array = np.zeros(shape=(memory_size, self.observe_shape[1][0], self.observe_shape[1][1]),
                                     dtype=np.float32)
        self.G_next_array = np.zeros(shape=(memory_size, self.observe_shape[2][0], self.observe_shape[2][1]),
                                     dtype=np.float32)

        self.memory_size = memory_size
        self.memory_counter = 0
        self.full_size = False

    def __len__(self):
        return self.memory_counter if not self.full_size else self.memory_size

    def store(self,W,S,G,action,reward,W_,S_,G_,done):

        index = self.memory_counter % self.memory_size

        self.W_array[index] = W
        self.S_array[index] = S
        self.G_array[index] = G
        self.actions[index] = action
        self.rewards[index] = reward

        self.dones[index] = done

        self.W_next_array[index] = W_
        self.S_next_array[index] = S_
        self.G_next_array[index] = G_

        self.memory_counter += 1
        if self.memory_counter == self.memory_size:
            self.full_size = True
            self.memory_counter = 0

    def sample(self,batch_size):

        if self.full_size:
            sample_index = np.random.choice(self.memory_size, size=batch_size)

        else:
            sample_index = np.random.choice(self.memory_counter, size=batch_size)

        W_batch = self.W_array[sample_index]
        S_batch = self.S_array[sample_index]
        G_batch = self.G_array[sample_index]

        act_batch = self.actions[sample_index]
        reward_batch = self.rewards[sample_index]
        W_next_batch = self.W_next_array[sample_index]
        S_next_batch = self.S_next_array[sample_index]
        G_next_batch = self.G_next_array[sample_index]

        done_batch = self.dones[sample_index]


        return W_batch,S_batch,G_batch,act_batch,reward_batch,\
               W_next_batch,S_next_batch,G_next_batch,\
                done_batch







