from abc import ABC

import tensorflow as tf
import tensorflow_probability as tfp
import gym
import numpy as np
import os

import time

"等效于"
"from tensorflow import keras"
"from tensorflow import layers"
from typing import TYPE_CHECKING
if TYPE_CHECKING:
    import keras
else:
    from tensorflow import keras
    from keras import layers

from tqdm import tqdm


class PolicyNet(keras.Model, ABC):
    def __init__(self, action_dim=1, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.fc1 = layers.Dense(24, activation="relu")
        self.fc2 = layers.Dense(36, activation="relu")
        self.fc3 = layers.Dense(action_dim, activation="softmax")

    def call(self, x):
        x = self.fc1(x)
        x = self.fc2(x)
        x = self.fc3(x)
        return x

    def process(self, obs):
        action_probabilities = self.predict_on_batch(obs)
        return action_probabilities


class Agent:
    def __init__(self, action_dim=1):
        self.policy_net = PolicyNet(action_dim=action_dim)
        self.optimizer = keras.optimizers.Adam(
            learning_rate=1e-3)
        self.gamma = 0.99

    def policy(self, obs):
        obs = obs.reshape(1, -1)
        obs = tf.convert_to_tensor(obs, dtype=tf.float32)
        action_logits = self.policy_net(obs)
        action = tf.random.categorical(
            tf.math.log(action_logits), num_samples=1
        )
        return action

    def get_action(self, obs):
        action = self.policy(obs).numpy()
        return action.squeeze()

    def learn(self, states, rewards, actions):
        discounted_reward = 0
        discounted_rewards = []
        rewards.reverse()

        for r in rewards:
            discounted_reward = r + self.gamma * discounted_reward
            discounted_rewards.append(discounted_reward)
            discounted_rewards.reverse()

        for state, reward, action in zip(states, discounted_rewards, actions):
            with tf.GradientTape() as tape:
                action_probabilities = self.policy_net(np.array([state]), training=True)
                loss = self.loss(action_probabilities, action, reward)
            grads = tape.gradient(loss, self.policy_net.trainable_variables)
            self.optimizer.apply_gradients(
                zip(grads, self.policy_net.trainable_variables)
            )

    def loss(self, action_probabilities, action, reward):
        dist = tfp.distributions.Categorical(
            probs=action_probabilities, dtype=tf.float32
        )
        log_prob = dist.log_prob(action)
        loss = -log_prob * reward
        return loss


def train(agent: Agent, env: gym.Env, episodes: int, render=False):

    max_step = 1000

    for episode in tqdm(range(episodes)):
        done = False
        state = env.reset()
        total_reward = 0
        rewards = []
        states = []
        actions = []

        step = 0
        while not done:
            action = agent.get_action(state)
            next_state, reward, done = env.step(action)
            rewards.append(reward)
            states.append(action)
            state = next_state
            total_reward += reward

            step += 1

            if step == max_step:
                done = True

            if render:
                env.render()
            if done:
                agent.learn(states, rewards, actions)
                print(f"step:{step}\treward:{total_reward}")
                env.render()



if __name__ == '__main__':
    print(tf.__version__)
    print("Built with CUDA:", tf.test.is_built_with_cuda())
    print("CUDA Devices:", tf.config.list_physical_devices('GPU'))

    from Env import Env

    env = Env()
    agent = Agent()

    # 设置Checkpoint保存路径
    work_dir = os.getcwd()  # 绝对路径里不要有中文
    checkpoint_dir = 'checkpoints'
    checkpoint_prefix = os.path.join(checkpoint_dir, "policy_gradient")
    checkpoint = tf.train.Checkpoint(optimizer=agent.optimizer, model=agent.policy_net)

    # 恢复最新的checkpoint，如果存在的话
    checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))

    episodes = 500
    train(agent, env, episodes)

    # 保存checkpoint
    checkpoint.save(file_prefix=checkpoint_prefix)
    env.close()  # 应该没用
