#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 18-11-16
import numpy    as np
import tensorflow as tf

tf.enable_eager_execution()


class Actor(tf.keras.Model):
    def __init__(self, num_actions, num_features, reward_decay=0.95, **kwargs):
        super(Actor, self).__init__(**kwargs)
        self.num_actions = num_actions
        self.num_features = num_features
        self.gamma = reward_decay
        self.states, self.actions, self.rewards = [], [], []
        self.dense1 = tf.keras.layers.Dense(128, activation=tf.nn.tanh,
                                           kernel_initializer=tf.initializers.random_normal(0.0, 0.3))
        self.dense2 = tf.keras.layers.Dense(self.num_actions,
                                            kernel_initializer=tf.initializers.random_normal(0.0, 0.3))

    def call(self, inputs, training=None):
        layer = self.dense1(inputs)
        action = self.dense2(layer)
        action_softmax = tf.nn.softmax(action, 1, 'action_prob')
        return action_softmax

    def reset(self):
        self.states.clear()
        self.actions.clear()
        self.rewards.clear()

    def store_transition(self, s, a):
        self.states.append(s)
        self.actions.append(a)

    def _choose_action(self, x):
        return np.random.choice(range(x.shape[0]), p=x.numpy())

    def choose_action(self, obs):
        prob_weights = self(obs)
        action = tf.map_fn(self._choose_action, prob_weights, tf.int32)
        return action


if __name__ == '__main__':
    pass
