import matplotlib.pyplot as plt
import matplotlib

matplotlib.use('Agg')

from typing import TYPE_CHECKING

if TYPE_CHECKING:
    import keras
else:
    from tensorflow import keras
    from keras import layers

import tensorflow_probability as tfp
import seaborn as sns
import tensorflow as tf
import numpy as np

from Env import Env


def evaluate(agent, env: Env, render=False):

    episode_reward = 0
    done = False
    step_num = 1
    obs = env.get_state()
    env.reset()

    while not done:
        action = agent.get_action(obs)[0]
        obs, reward, done, info = env.step(action)
        episode_reward += reward
        if render:
            env.render()

        print(obs, episode_reward)

        step_num += 1

    return step_num, episode_reward, done, info


class Brain(keras.Model):
    def __init__(self, action_dim=12, input_shape=(1, 24)):
        super(Brain, self).__init__()
        self.dense1 = layers.Dense(32, input_shape=input_shape, activation="relu") # input_shape=input_shape,
        self.logits = layers.Dense(action_dim)

    "interface implements"
    def call(self, inputs) -> layers.Dense:
        x = tf.convert_to_tensor(inputs)

        # if len(x.shape) >= 2 and x.shape[0] != 1:
        #     x = tf.reshape(x, (1, -1))

        if len(x.shape) == 1:  # 如果输入是一维的，则扩展为二维
            x = tf.expand_dims(x, axis=0)

        return self.logits(self.dense1(x))

    "interface implements"
    def process(self, obs):
        action_logits = self.predict_on_batch(obs)
        return action_logits


class DiscretePolicy:
    def __init__(self, num_actions):
        self.action_dim = num_actions

    def sample(self, action_logits):
        self.distribution = tfp.distributions.Multinomial(
            logits=action_logits, total_count=1
        )

        return self.distribution.sample(1)


    def get_action(self, action_logits):
        action = self.sample(action_logits)
        return np.where(action)[-1]

    # 计算离散策略的熵？？？？
    def entropy(action_probs):
        return -tf.reduce_sum(action_probs * tf.math.log(action_probs), axis=-1)


class Agent:
    def __init__(self, action_dim=12, input_dim=(1, 24)):
        self.brain = Brain(action_dim, input_dim)
        self.policy = DiscretePolicy(action_dim)

    def get_action(self, obs):
        action_logits = self.brain.process(obs)
        action = self.policy.get_action(
            np.squeeze(action_logits, 0)
        )
        return action

    def learn(self):
        raise NotImplementedError


def test_process():
    # binary_policy = tfp.distributions.Bernoulli(probs=0.5)
    # samples = binary_policy.sample(500)

    action_dim = 12
    action_probabilities = [1 / action_dim for i in range(action_dim)]

    discrete_policy = tfp.distributions.Multinomial(
        probs=action_probabilities, total_count=1
    )

    samples = discrete_policy.sample(50)

    print(samples)

    # print(samples)
    sns.histplot(samples)
    plt.savefig('output.png')


if __name__ == '__main__':

    env = Env()

    agent = Agent(env.action_space.n, env.observation_space.shape)

    steps, reward, done, info = evaluate(agent, env)

    print(f"steps: {steps} reward:{reward} done:{done} info:{info}")

