from agent.dqn_agent import DQNAgent
from common.arguments import get_common_args
from common.agent import Agent
import torch
from torch.utils.tensorboard import SummaryWriter
import time
import numpy as np

class Runner():
    def __init__(self, env, agent, args, env_name):
        self.env = env
        self.agent = agent
        self.args = args
        self.loss_list = []
        self.env_name = env_name

    def run(self, ):       
        # agent = DQNAgent(state_size, action_size, args, True)
        # agent = Agent("DQN", state_size, action_size, args)
        total_step = 0
        loss_idx = 0

        period = self.args.eva_period
        period_reward = [0 for _ in range(period)]

        time_now = time.strftime('%y%m_%d%H%M')
        writer = SummaryWriter("log/{}/{}".format(self.agent.algorithm,time_now)) 
        for episodes in range(self.args.episodes):
            state = self.env.reset()
            total_reward = 0
            for step in range(self.args.max_steps):
                # writer.add_scalar('var', self.agent.agent.var, total_step) #DDPG
                total_step += 1
                action = self.agent.choose_action(state)
                next_state, reward, done, _ = self.env.step(action)
                total_reward += reward
                self.agent.remember(state, action, reward, next_state, done)
                state = next_state

                # if self.agent.len() > self.args.batch_size and total_step % self.args.learn_steps == 0:
                if total_step >= self.args.replay_buffer_size//5  and total_step % self.args.learn_steps == 0:
                    for _ in range(self.args.learn_steps):
                        loss = self.agent.train()
                        self.loss_list.append(loss)
                        writer.add_scalar('loss', loss, loss_idx)
                        loss_idx += 1                        
                            
                if done or step == self.args.max_steps-1:
                    writer.add_scalar('total_reward', total_reward, episodes)
                    print("episode: {}/{}, step: {}, score: {}"
                        .format(episodes, self.args.episodes, total_step, total_reward))
                    break
            
            # if episodes > 0 and episodes % self.args.episodes_to_save == 0:
            #     self.agent.save(episodes)

            index = episodes % period
            period_reward[index] = total_reward
            if self.env_name == "CartPole-v1":
                threshold = 490
            else:
                threshold = -50

            if np.average(period_reward) > threshold:
                self.agent.save(episodes)
                break



    