#!/usr/bin/env python
# -*- coding:utf-8 -*-

# file: sac_train_off_policy.py
# author: 刘浩宇
# datetime: 2023/11/30 14:53
# software: PyCharm

"""
This is function description
"""
import numpy as np
from tqdm import tqdm

from model.SAC.v1.sac_learning import SACContinuous
import utils.rl_utils as rl


def train_off_policy_agent(env,
                           agent: SACContinuous, num_episodes,
                           replay_buffer: rl.ReplayBuffer, minimal_size, batch_size):
    return_list = []
    for i in range(10):
        with tqdm(total=int(num_episodes / 10), desc='Iteration %d' % i) as pbar:
            for i_episode in range(int(num_episodes / 10)):
                episode_return = 0
                state = env.reset()
                done = False
                while not done:
                    fireAction = agent.take_action(state)
                    actionSet = {
                        "fire": fireAction,
                        "water": [0]
                    }
                    next_state, reward, done, _ = env.step(actionSet)
                    replay_buffer.add(state, fireAction, reward, next_state, done)
                    state = next_state
                    episode_return += reward
                    if replay_buffer.size() > minimal_size:
                        b_s, b_f_a, b_r, b_ns, b_d = replay_buffer.sample(batch_size)
                        transition_dict = {'states': b_s, 'fire-actions': b_f_a, 'next_states': b_ns, 'rewards': b_r,
                                           'dones': b_d}
                        agent.update(transition_dict)
                return_list.append(episode_return)
                if (i_episode + 1) % 10 == 0:
                    pbar.set_postfix({'episode': '%d' % (num_episodes / 10 * i + i_episode + 1),
                                      'return': '%.3f' % np.mean(return_list[-10:])})
                pbar.update(1)
    return return_list