import numpy as np
import tensorflow as tf

class PolicyGradient(tf.Module):
    def __init__(self, n_actions, n_features, learning_rate=0.01):
        self.n_actions = n_actions
        self.n_features = n_features
        self.lr = learning_rate
        self.gamma = 0.95

        self.ep_obs, self.ep_as, self.ep_rs = [], [], []

        self.build_net()

        self.optimizer = tf.optimizers.Adam(learning_rate=self.lr)

    def build_net(self):
        self.tf_obs = tf.Variable(initial_value=tf.zeros([1, self.n_features]), dtype=tf.float32, trainable=False)

        self.tf_vt = tf.Variable(initial_value=tf.zeros([1]), dtype=tf.float32, trainable=False)
        self.tf_acts = tf.Variable(initial_value=tf.zeros([1]), dtype=tf.float32, trainable=False)

        # Define your network layers here
        self.dense1 = tf.keras.layers.Dense(units=10, activation=tf.nn.tanh)
        self.dense2 = tf.keras.layers.Dense(units=self.n_actions, activation=None)

    def call(self, inputs):
        x = self.dense1(inputs)
        all_act = self.dense2(x)
        self.all_act_prob = tf.nn.softmax(all_act, name='act_prob')
        return self.all_act_prob



    def choose_action(self, observation):
        prob_weights = self.call(observation)  # Use the `call` method, not direct invocation
        action = np.random.choice(range(prob_weights.shape[1]), p=prob_weights.numpy().ravel())
        return action

    def store_ob(self, s):
        self.ep_obs.append(s)

    def store_action(self, a):
        self.ep_as.append(a)

    def store_adv(self, r):
        self.ep_rs.append(r)


    def learn(self, all_ob, all_action, all_adv):
        with tf.GradientTape() as tape:
            all_act_prob = self.call(all_ob)  # Use the `call` method, not direct invocation
            indices = tf.range(0, tf.shape(all_act_prob)[0]) * tf.shape(all_act_prob)[1] + all_action
            chosen_prob = tf.gather(tf.reshape(all_act_prob, [-1]), indices)
            loss = -tf.reduce_mean(tf.math.log(chosen_prob) * all_adv)

        grads = tape.gradient(loss, self.trainable_variables)
        self.optimizer.apply_gradients(zip(grads, self.trainable_variables))

        self.ep_obs, self.ep_as, self.ep_rs = [], [], []
        return loss

    def save_data(self, pg_resume):
        checkpoint = tf.train.Checkpoint(model=self)
        checkpoint.save(pg_resume)

    def load_data(self, pg_resume):
        checkpoint = tf.train.Checkpoint(model=self)
        checkpoint.restore(pg_resume)
