import matplotlib.pyplot as plt
import numpy as np
import torch
from tensorboardX import SummaryWriter
from tqdm import tqdm


def compute_advantage(gamma, lmbda, td_delta):
    td_delta = td_delta.detach().numpy()
    advantage_list = []
    advantage = 0.0
    for delta in td_delta[::-1]:
        advantage = gamma * lmbda * advantage + delta
        advantage_list.append(advantage)
    advantage_list.reverse()
    return torch.tensor(advantage_list, dtype=torch.float)


def moving_average(a, window_size):
    cumulative_sum = np.cumsum(np.insert(a, 0, 0))
    middle = (
        cumulative_sum[window_size:] - cumulative_sum[:-window_size]
    ) / window_size
    r = np.arange(1, window_size - 1, 2)
    begin = np.cumsum(a[: window_size - 1])[::2] / r
    end = (np.cumsum(a[:-window_size:-1])[::2] / r)[::-1]
    return np.concatenate((begin, middle, end))


def is_save_iteration(iteration, save_interval, num_episodes):
    return (iteration % save_interval == 0) or (iteration == num_episodes - 1)


def train_on_policy_agent(env, agent, num_episodes):
    writer = SummaryWriter(".", flush_secs=1)
    return_list = []
    pbar = tqdm(range(num_episodes), ncols=80)
    for i_episode in pbar:
        episode_return = 0
        transition_dict = {
            "states": [],
            "actions": [],
            "next_states": [],
            "rewards": [],
            "dones": [],
        }
        state = env.reset()
        done = False
        step = 0
        y_list = []
        target_y_list = []
        vy_list = []
        while step < 1000 and not done:
            action = agent.take_action(state)
            next_state, reward, done, _ = env.step(action)
            transition_dict["states"].append(state.copy())
            transition_dict["actions"].append(action)
            transition_dict["next_states"].append(next_state.copy())
            transition_dict["rewards"].append(reward)
            transition_dict["dones"].append(done)
            state = next_state.copy()
            episode_return += reward
            if is_save_iteration(i_episode, 500, num_episodes):
                y_list.append(state[1])
                target_y_list.append(state[3])
                vy_list.append(state[0])
            step += 1

        return_list.append(episode_return)
        reward_display = np.mean(return_list[-10:]) if len(return_list) > 10 else np.mean(return_list)
        actor_loss, critic_loss = agent.update(transition_dict)
        pbar.set_description(
            "episode: %d, reward: %.3f" % (i_episode, reward_display)
        )
        writer.add_scalar("reward", reward_display, i_episode)
        writer.add_scalar("actor_loss", actor_loss, i_episode)
        writer.add_scalar("critic_loss", critic_loss, i_episode)

        if is_save_iteration(i_episode, 500, num_episodes):
            y_fig, y_ax = plt.subplots()
            y_ax.plot(y_list)
            y_ax.plot(target_y_list)
            y_ax.legend(["y", "target_y"])
            writer.add_figure("y", y_fig, i_episode)
            vy_fig, vy_ax = plt.subplots()
            vy_ax.plot(vy_list)
            vy_ax.legend(["vy"])
            writer.add_figure("vy", vy_fig, i_episode)
            pth_path = f"model_{i_episode}.pth"
            agent.save_pth(pth_path)
    return return_list
