import tensorflow as tf
import math
import random
import numpy as np
from replay_buffer import ReplayMemoryFast
# Layer 1: 32 8x8 filters with stride 4 + RELU
# Layer 2: 64 4x4 filters with stride 2 + RELU
# Layer 3: 64 3x3 filters with stride 1 + RELU
#
# Layer 4a: 512 unit Fully-Connected layer + RELU
# Layer 4b: 512 unit Fully-Connected layer + RELU
#
# Layer 5a: 1 unit FC + RELU (State Value)
# Layer 5b: actions FC + RELU (Advantage Value)
#
# Layer6: Aggregate V(s)+A(s,a)
class QNetwork(object):
    """
    Base class for QNetworks.
    """

    def __init__(self, input_size, output_size, name):
        self.name = name

    def weight_variable(self, shape, fanin=0):
        if fanin == 0:
            initial = tf.truncated_normal(shape, stddev=0.01)
        else:
            mod_init = 1.0 / math.sqrt(fanin)
            initial = tf.random_uniform(shape, minval=-mod_init, maxval=mod_init)

        return tf.Variable(initial)

    def bias_variable(self, shape, fanin=0):
        if fanin == 0:
            initial = tf.constant(0.01, shape=shape)
        else:
            mod_init = 1.0 / math.sqrt(fanin)
            initial = tf.random_uniform(shape, minval=-mod_init, maxval=mod_init)

        return tf.Variable(initial)

    def variables(self):
        return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)

    def copy_to(self, dst_net):
        """
        mn = ModelNetwork(2, 3, 0, "actor")
        mn_target = ModelNetwork(2, 3, 0, "target_actor")
        s=tf.InteractiveSession()
        s.run( tf.initialize_all_variables() )
        mn.copy_to(mn_target)
        """
        v1 = self.variables()
        v2 = dst_net.variables()

        for i in range(len(v1)):
            v2[i].assign(v1[i]).eval()

    def print_num_of_parameters(self):
        list_vars = self.variables()
        total_parameters = 0
        for variable in list_vars:
            # shape is an array of tf.Dimension
            shape = variable.get_shape()
            variable_parametes = 1
            for dim in shape:
                variable_parametes *= dim.value
            total_parameters += variable_parametes
        print('# of parameters in network ', self.name, ': ', total_parameters, '  ->  ',
              np.round(float(total_parameters) / 1000000.0, 2), 'M')


class QNetworkDueling(QNetwork):

    def __init__(self, input_size, output_size, name):
        self.name = name

        self.input_size = input_size
        self.output_size = output_size

        with tf.variable_scope(name):
            # Three convolutional Layers
            self.W_conv1 = self.weight_variable([8, 8, 4, 32])
            self.B_conv1 = self.bias_variable([32])
            self.stride1 = 4
            self.W_conv2 = self.weight_variable([4, 4, 32, 64])
            self.B_conv2 = self.bias_variable([64])
            self.stride2 = 2
            self.W_conv3 = self.weight_variable([3, 3, 64, 64])
            self.B_conv3 = self.bias_variable([64])
            self.stride3 = 1

            # Two fully connected layer
            self.W_fc4a = self.weight_variable([7 * 7 * 64, 512])
            self.B_fc4a = self.bias_variable([512])
            self.W_fc4b = self.weight_variable([7 * 7 * 64, 512])
            self.B_fc4b = self.bias_variable([512])
            # Value stream
            self.W_fc5a = self.weight_variable([512, 1])
            self.B_fc5a = self.bias_variable([1])
            # Advantage stream
            self.W_fc5b = self.weight_variable([512, self.output_size])
            self.B_fc5b = self.bias_variable([self.output_size])

            # Print number of parameters in the network
        self.print_num_of_parameters()

    def __call__(self, input_tensor):
        if type(input_tensor) == list:
            input_tensor = tf.concat(1, input_tensor)

        with tf.variable_scope(self.name):
            # Perform convolutional on three layers
            self.h_conv1 =tf.nn.relu( tf.nn.conv2d(input_tensor,filter=self.W_conv1,\
                                          strides=[1,self.stride1,self.stride1,1],padding='VALID')\
                                 +self.B_conv1)
            self.h_conv2 =tf.nn.relu( tf.nn.conv2d(self.h_conv1,filter=self.W_conv2,\
                                          strides=[1,self.stride2,self.stride2,1],padding='VALID')\
                                 +self.B_conv2)

            self.h_conv3 = tf.nn.relu(tf.nn.conv2d(self.h_conv2, filter=self.W_conv3, \
                                              strides=[1, self.stride3, self.stride3, 1], padding='VALID') \
                                 + self.B_conv3)
            self.h_fully = tf.reshape(self.h_conv3, shape=[-1, 7 * 7 * 64])
            # value function
            self.fully_con1_V = tf.nn.relu(tf.matmul(self.h_fully,self.W_fc4a) + self.B_fc4a)
            self.V = tf.nn.relu(tf.matmul(self.fully_con1_V,self.W_fc5a) + self.B_fc5a)
            # fully_con3 = tf.nn.relu(tf.matmal(fully_con2,self.W_fc6a) + self.B_fc6a)

            # advantage function
            self.fully_con1_A = tf.nn.relu(tf.matmul(self.h_fully, self.W_fc4b) + self.B_fc4b)
            self.A = tf.nn.relu(tf.matmul(self.fully_con1_A, self.W_fc5b) + self.B_fc5b)

            # Combine the both value and advantage stream to get the Q value
            self.h_fc6 = self.V + (\
                        self.A - tf.reduce_mean(self.A, reduction_indices=[1, ],\
                                                               keep_dims=True))

        return self.h_fc6


class DQN(object):
    def __init__(self, state_size, \
                 action_size, \
                 session, \
                 summary_writer=None, \
                 exploration_period=1000, \
                 minibatch_size=32, \
                 discount_factor=0.99, \
                 experience_replay_buffer=10000, \
                 target_qnet_update_frequency=10000, \
                 initial_exploration_epsilon=1.0, \
                 final_exploration_epsilon=0.05, \
                 reward_clipping=-1, \
                 ):
        # Initialize all variables:
        self.state_size = state_size
        self.action_size = action_size
        self.session = session
        self.exploration_period = float(exploration_period)
        self.minibatch_size = minibatch_size
        self.discount_factor = tf.constant(discount_factor)
        self.experience_replay_buffer = experience_replay_buffer
        self.summary_writer = summary_writer
        self.reward_clipping = reward_clipping
        self.target_qnet_update_frequency = target_qnet_update_frequency
        self.initial_exploration_epsilon = initial_exploration_epsilon
        self.final_exploration_epsilon = final_exploration_epsilon
        self.num_training_steps = 0
        self.DoubleDQN = True
        self.qnet = QNetworkDueling(self.state_size, self.action_size, "qnet")

        self.target_qnet = QNetworkDueling(self.state_size, self.action_size,"target_qnet")

        self.qnet_optimizer = tf.train.RMSPropOptimizer(learning_rate=0.00025, decay=0.99,\
                                                        epsilon=0.01)

        self.experience_replay = ReplayMemoryFast(self.experience_replay_buffer,
                                                  self.minibatch_size)
        # Setup the computational graph
        self.create_graph()

    @staticmethod
    def copy_to_target_network(source_network, target_network):
        source_network.copy_to(target_network)

    def action(self, state, training=False):
        """
        If `training', compute the epsilon-greedy parameter epsilon according to the defined exploration_period, initial_epsilon and final_epsilon.
        If not `training', use a fixed testing epsilon=0.05
        """
        if self.num_training_steps > self.exploration_period:
            epsilon = self.final_exploration_epsilon
        else:
            epsilon = self.initial_exploration_epsilon - float(self.num_training_steps) * (
                        self.initial_exploration_epsilon - self.final_exploration_epsilon) / self.exploration_period

        if not training:
            epsilon = 0.05

            # Execute a random action with probability epsilon, or follow the QNet policy with probability 1-epsilon.
        if random.random() <= epsilon:
            action = random.randint(0, self.action_size - 1)
        else:
            action = self.session.run(self.predicted_actions, {self.state: [state]})[0]

        return action


    def create_graph(self):
        # Pick action given state ->   action = argmax( qnet(state) )
        with tf.name_scope("pick_action"):
            self.state = tf.placeholder(tf.float32, (None,) + self.state_size, name="state")

            self.q_values = tf.identity(self.qnet(self.state), name="q_values")
            self.predicted_actions = tf.argmax(self.q_values, dimension=1, name="predicted_actions")

            tf.summary.histogram("Q values",
                                 tf.reduce_mean(tf.reduce_max(self.q_values, 1)))  # save max q-values to track learning

        # Predict target future reward: r  +  gamma * max_a'[ Q'(s') ]
        with tf.name_scope("estimating_future_rewards"):
            # DQN vs DoubleDQN (DDQN)
            # In DQN the target is          y_i^DQN  = r + gamma * max_a' Q_target(next_state, a')
            # In DoubleDQN it's changed to  y_i^DDQN = r + gamma * Q_target(next_state, argmax_a' Q(next_state, a') )
            # In practice, we use the actual QNet (non target) to select the action for the next state, but then use its Q value estimated using the target network
            self.next_state = tf.placeholder(tf.float32, (None,) + self.state_size, name="next_state")
            self.next_state_mask = tf.placeholder(tf.float32, (None,), name="next_state_mask")  # 0 for terminal states
            self.rewards = tf.placeholder(tf.float32, (None,), name="rewards")

            self.next_q_values_targetqnet = tf.stop_gradient(self.target_qnet(self.next_state),
                                                             name="next_q_values_targetqnet")

            if self.DoubleDQN:
                # DoubleDQN
                print(
                "Double DQN")
                self.next_q_values_qnet = tf.stop_gradient(self.qnet(self.next_state), name="next_q_values_qnet")
                self.next_selected_actions = tf.argmax(self.next_q_values_qnet, dimension=1)
                self.next_selected_actions_onehot = tf.one_hot(indices=self.next_selected_actions,
                                                               depth=self.action_size)

                self.next_max_q_values = tf.stop_gradient(
                    tf.reduce_sum(tf.multiply(self.next_q_values_targetqnet, self.next_selected_actions_onehot),
                                  reduction_indices=[1, ]) * self.next_state_mask)



            else:
                # DQN
                print(\
                "Regular DQN")
                self.next_max_q_values = tf.reduce_max(self.next_q_values_targetqnet,\
                                                       reduction_indices=[1, ]) * self.next_state_mask

            self.target_q_values = self.rewards + self.discount_factor * self.next_max_q_values

        # Gradient descent
        with tf.name_scope("optimization_step"):
            self.action_mask = tf.placeholder(tf.float32, (None, self.action_size),
                                              name="action_mask")  # action that was selected
            self.y = tf.reduce_sum(self.q_values * self.action_mask, reduction_indices=[1, ])

            ## ERROR CLIPPING AS IN NATURE'S PAPER
            self.error = tf.abs(self.y - self.target_q_values)
            quadratic_part = tf.clip_by_value(self.error, 0.0, 1.0)
            linear_part = self.error - quadratic_part
            self.loss = tf.reduce_mean(0.5 * tf.square(quadratic_part) + linear_part)

            qnet_gradients = self.qnet_optimizer.compute_gradients(self.loss, self.qnet.variables())
            for i, (grad, var) in enumerate(qnet_gradients):
                if grad is not None:
                    qnet_gradients[i] = (tf.clip_by_norm(grad, 10), var)
            self.qnet_optimize = self.qnet_optimizer.apply_gradients(qnet_gradients)

        with tf.name_scope("target_network_update"):
            self.hard_copy_to_target = DQN.copy_to_target_network(self.qnet, self.target_qnet)

        self.summarize = tf.merge_all_summaries()

    def train(self):
        # Copy the QNetwork weights to the Target QNetwork.
        if self.num_training_steps == 0:
            print(
                "Training starts...",
                self.qnet.copy_to(self.target_qnet))

        # Sample experience from replay memory
        minibatch = self.experience_replay.sample()
        if len(minibatch) == 0:
            return

        # Build the bach states
        batch_states = np.asarray([d[0] for d in minibatch])

        actions = [d[1] for d in minibatch]
        batch_actions = np.zeros((self.minibatch_size, self.action_size))
        for i in range(self.minibatch_size):
            batch_actions[i, actions[i]] = 1

        batch_rewards = np.asarray([d[2] for d in minibatch])
        batch_newstates = np.asarray([d[3] for d in minibatch])

        batch_newstates_mask = np.asarray([not d[4] for d in minibatch])

        # Perform training
        scores, _, = self.session.run([self.q_values, self.qnet_optimize],
                                      {self.state: batch_states,
                                       self.next_state: batch_newstates,
                                       self.next_state_mask: batch_newstates_mask,
                                       self.rewards: batch_rewards,
                                       self.action_mask: batch_actions})

        if self.num_training_steps % self.target_qnet_update_frequency == 0:
            # Hard update (copy) of the weights every # iterations
            self.session.run(self.hard_copy_to_target)

            # Write logs
            print(
            'mean maxQ in minibatch: ', np.mean(np.max(scores, 1)))

            str_ = self.session.run(self.summarize, {self.state: batch_states,
                                                     self.next_state: batch_newstates,
                                                     self.next_state_mask: batch_newstates_mask,
                                                     self.rewards: batch_rewards,
                                                     self.action_mask: batch_actions})
            self.summary_writer.add_summary(str_, self.num_training_steps)

        self.num_training_steps += 1

    def store(self, state, action, reward, next_state, is_terminal):
        # rewards clipping
        if self.reward_clipping > 0.0:
            reward = np.clip(reward, -self.reward_clipping, self.reward_clipping)

        self.experience_replay.store(state, action, reward, next_state, is_terminal)

