#!/usr/bin/env python
# -*- coding:utf-8 -*-

# file: sac_trainer.py
# author: 刘浩宇
# datetime: 2023/11/30 14:45
# software: PyCharm

"""
This is function description
"""
import math
import random

import numpy as np
import torch
from matplotlib import pyplot as plt

import environment.electric_scheduling
from model.SAC.v2.sac_learning import SACContinuous
import utils.rl_utils as rl
import model.SAC.v2.sac_train_off_policy as train

actor_lr = 1e-5
critic_lr = 1e-4
alpha_lr = 1e-5
num_episodes = 10000
hidden_dim = 256
gamma = 0.9
tau = 0.005  # 软更新参数
buffer_size = 100000
minimal_size = 1000
batch_size = 64
device = torch.device("cuda") if torch.cuda.is_available() else torch.device(
    "cpu")


def sac_trainer(env: environment.electric_scheduling.PowerDayAheadSchedule) -> (SACContinuous, str):
    state = env.reset()
    state_dim = len(state)
    # 输出结果为水电出力，火电出力，新能源出力（光、风）
    action_dim = env.water_station_num + env.fire_station_num + 2
    random.seed(0)
    np.random.seed(0)
    torch.manual_seed(0)
    # target_entropy = -env.fire_station_num
    target_entropy = math.log2(action_dim)
    replay_buffer = rl.ReplayBuffer(buffer_size)
    agent = SACContinuous(env, state_dim, hidden_dim, action_dim,
                          actor_lr, critic_lr, alpha_lr, target_entropy, tau,
                          gamma, device)

    return_list = train.train_off_policy_agent(env, agent, num_episodes,
                                               replay_buffer, minimal_size,
                                               batch_size)
    episodes_list = list(range(len(return_list)))
    # plt.plot(episodes_list, return_list)
    # plt.xlabel('Episodes')
    # plt.ylabel('Returns')
    # plt.title('SAC on {}'.format("Pendulum"))
    # plt.show()

    mv_return = rl.moving_average(return_list, 9)
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 显示中文
    plt.rcParams['axes.unicode_minus'] = False  # 显示负号
    plt.plot(episodes_list, mv_return)
    plt.xlabel('Episodes')
    plt.ylabel('Returns')
    plt.title('SAC on {}'.format("Pendulum"))
    plt.show()
    modelPath = "./result/sac_model.pth"
    torch.save({
        "ActorNet": agent.actor.state_dict()
    }, "./result/sac_model.pth")
    return agent, modelPath
