import numpy as np
from tqdm import tqdm
from environment.electric_scheduling import PowerDayAheadSchedule
import utils.rl_utils as rl
from model.DDPG.v1.ddpg_learning import DDPG


def train_off_policy_agent(env: PowerDayAheadSchedule,
                           agent: DDPG, num_episodes,
                           replay_buffer: rl.ReplayBuffer, minimal_size, batch_size):
    return_list = []
    for i in range(10):
        with tqdm(total=int(num_episodes / 10), desc='Iteration %d' % i) as pbar:
            for i_episode in range(int(num_episodes / 10)):
                episode_return = 0
                state = env.reset()
                done = False
                while not done:
                    # fire_action, water_action = agent.take_action(state)
                    fire_action = agent.take_action(state)
                    actionSet = {
                        "fire": fire_action,
                        # "water": water_action,
                    }

                    next_state, reward, done, _ = env.step(actionSet)
                    # replay_buffer.add(state, fire_action, water_action, reward, next_state, done)
                    replay_buffer.add(state, fire_action, reward, next_state, done)
                    state = next_state
                    episode_return += reward
                    if replay_buffer.size() > minimal_size:
                        # b_s, b_f_a, b_w_a, b_r, b_ns, b_d = replay_buffer.sample(batch_size)
                        # transition_dict = {'states': b_s, 'fire-actions': b_f_a, 'water-actions': b_w_a, 'next_states': b_ns, 'rewards': b_r,
                        #                    'dones': b_d}
                        b_s, b_f_a, b_r, b_ns, b_d = replay_buffer.sample(batch_size)
                        transition_dict = {'states': b_s, 'fire-actions': b_f_a, 'next_states': b_ns, 'rewards': b_r,
                                           'dones': b_d}
                        agent.update(transition_dict)
                return_list.append(episode_return)
                if (i_episode + 1) % 10 == 0:
                    pbar.set_postfix({'episode': '%d' % (num_episodes / 10 * i + i_episode + 1),
                                      'return': '%.3f' % np.mean(return_list[-10:])})
                pbar.update(1)
    return return_list
