import os
from typing import List, Tuple

import matplotlib.pyplot as plt
import torch
from torch.distributions import Categorical
from constants import TASK_BATCH

from environment.env import Environment
from model.model import SchedulePolicyNet


class Agent:
    def __init__(self, env: Environment, gamma=0.9, lr=0.001, model_path='weight/model.pth'):
        self.env = env
        self.policy_net = SchedulePolicyNet(n_processors=len(env.processors))
        self.opt = torch.optim.Adam(self.policy_net.parameters(), lr=lr)
        # self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.opt, T_max=500)
        self.gamma = gamma
        self.tot_r_record = []
        self.model_path = model_path

        if os.path.exists(model_path):
            print('detected pre-trained model, loading...')
            self.policy_net.load_state_dict(torch.load(model_path))
            print('loading finish')

    def reset(self):
        self.tot_r_record = []

    def calc_returns(self, rewards: List[float]):
        returns = []
        R = 0
        for r in rewards[::-1]:
            R = r + self.gamma * R
            returns.insert(0, R)

        returns = torch.tensor(returns)
        returns = (returns - returns.mean()) / (returns.std() + 1e-20)
        return returns

    def plot_total_reward(self, num_episodes: int, idx: int):
        plt.clf()
        x = range(num_episodes)
        y = self.tot_r_record
        plt.plot(x, y, color='red')
        plt.xlabel('episode')
        plt.ylabel('total rewards')
        plt.title(f'Learning curve(epoch={num_episodes})')
        plt.savefig(f'learning curve_{idx}.png')

    def finish_episode(self, log_props: List[float], rewards: List[float]):
        returns = self.calc_returns(rewards)
        policy_loss = []
        self.opt.zero_grad()
        for G, log_prob in zip(returns, log_props):
            policy_loss.append(-log_prob * G)
        policy_loss = torch.stack(policy_loss).sum()
        policy_loss.backward()
        self.opt.step()
        # self.scheduler.step()

    def play_one_episode(self, verbose=False) -> Tuple[List[float], List[float]]:
        s = self.env.reset()
        log_probs = []
        rewards = []
        done = False
        while not done:
            probs = self.policy_net(s)
            m = Categorical(probs)
            action = m.sample()
            log_prob = m.log_prob(action)
            log_probs.append(log_prob)
            task_idx = action.item()
            s, r, done = self.env.step(task_idx)
            # print(f"get rewards: {r}")
            rewards.append(r)
        return log_probs, rewards

    def play_many_episodes(self, num_episodes: int, idx: int):
        max_reward = 0
        avg_rewards = 0
        for episode in range(num_episodes):
            log_probs, rewards = self.play_one_episode()
            total_r = sum(rewards)
            avg_rewards += total_r
            if total_r > max_reward:
                torch.save(self.policy_net.state_dict(), self.model_path)
                max_reward = total_r
            self.tot_r_record.append(total_r)
            self.finish_episode(log_probs, rewards)
            # print(f"Episode {episode} Total rewards = {total_r}")

        avg_rewards /= num_episodes

        print(f"Episode max rewards = {max_reward}")
        print(f"Episode avg rewards = {avg_rewards}")
        self.plot_total_reward(num_episodes, idx)
