import numpy as np
from tqdm import tqdm
from environment.electric_scheduling import PowerDayAheadSchedule
import utils.rl_utils as rl
from model.DDPG.v0.ddpg_learning import DDPG


def train_off_policy_agent(env: PowerDayAheadSchedule,
                           agent: DDPG, num_episodes,
                           replay_buffer: rl.ReplayBuffer, minimal_size, batch_size):
    return_list = []
    for i in range(10):
        with tqdm(total=int(num_episodes / 10), desc='Iteration %d' % i) as pbar:
            for i_episode in range(int(num_episodes / 10)):
                episode_return = 0
                state = env.reset()[0]
                done = False
                while not done:
                    action = agent.take_action(state)
                    part1 = env.fire_station_num
                    part2 = env.fire_station_num + env.water_station_num

                    total = 0
                    for x in range(agent.action_dim - 2):
                        total += action[x]
                    total += action[agent.action_dim - 2] * env.battery.eletric_rate
                    total += action[agent.action_dim - 2] * env.saving.eletric_rate

                    actionSet = {
                        "fire": action[0:part1],
                        "water": action[part1:part2],
                        "wind": action[part2],
                        "solar": action[part2 + 1],
                        "cost-battery": action[part2 + 2],
                        "cost-saving": action[part2 + 3],
                        "total": total
                    }

                    next_state, reward, done, _, _ = env.step(actionSet)
                    replay_buffer.add(state, action, reward, next_state, done)
                    state = next_state
                    episode_return += reward
                    if replay_buffer.size() > minimal_size:
                        b_s, b_a, b_r, b_ns, b_d = replay_buffer.sample(batch_size)
                        transition_dict = {'states': b_s, 'actions': b_a, 'next_states': b_ns, 'rewards': b_r,
                                           'dones': b_d}
                        agent.update(transition_dict)
                return_list.append(episode_return)
                if (i_episode + 1) % 10 == 0:
                    pbar.set_postfix({'episode': '%d' % (num_episodes / 10 * i + i_episode + 1),
                                      'return': '%.3f' % np.mean(return_list[-10:])})
                pbar.update(1)
    return return_list
