import random
import argparse
import gym
import numpy as np
import torch

import utils
from ReplayBuffer import ReplayBuffer
from sac import SAC


def configs():
    """基础配置（不包括环境配置）"""
    parser = argparse.ArgumentParser()
    parser.add_argument('--actor_lr', type=float, default=3e-4, help="策略网络学习率")
    parser.add_argument('--critic_lr', type=float, default=3e-3, help="价值网络学习率")
    parser.add_argument('--alpha_lr', type=float, default=3e-4, help="SAC中熵正则项")
    parser.add_argument('--num_episodes', type=int, default=100, help="训练多少个episode")
    parser.add_argument('--gamma', type=float, default=0.98, help="折扣因子")
    parser.add_argument('--hidden_dim', type=int, default=128, help="网络中的隐藏层神经元个数")
    parser.add_argument('--tau', type=float, default=0.005, help="目标网络软更新的参数如PPO，DDPG，SAC")
    parser.add_argument('--buffer_size', type=int, default=100000, help="经验回访池大小")
    parser.add_argument('--minial_size', type=int, default=1000, help="off-policy中开始更新对应的buffer中的最少数量")
    parser.add_argument('--batch_size', type=int, default=64, help="批更新中一批样本个数")
    parser.add_argument('--seed', type=int, default=0, help="随机种子大小")
    parser.add_argument('--sigma', type=int, default=0.01, help="高斯噪声标准差")
    return parser.parse_args()


#
#
# actor_lr = 3e-4
# critic_lr = 3e-3
# alpha_lr = 3e-4
# num_episodes = 100
# hidden_dim = 128
# gamma = 0.98
# # 软更新参数
# tau = 0.005
# buffer_size = 100000
# minial_size = 1000
# batch_size = 64
# sigma = 0.01  # 高斯噪声标准差
# device = torch.device("cuda") if torch.cuda.is_available() else torch.device(
#     "cpu")
#
# env_name = 'Pendulum-v1'
# env = gym.make(env_name)
# random.seed(0)
# np.random.seed(0)
# env.seed(0)
# torch.manual_seed(0)
# replay_buffer = ReplayBuffer(buffer_size)
# state_dim = env.observation_space.shape[0]
# action_dim = env.action_space.shape[0]
# action_bound = env.action_space.high[0]
# target_entropy = -env.action_space.shape[0]
#
# agent = SAC(state_dim, hidden_dim, action_dim, action_bound, actor_lr, critic_lr, alpha_lr, target_entropy, tau, gamma,
#             device)
# return_list = utils.train_off_policy_agent(env, agent, num_episodes, replay_buffer, minial_size, batch_size)
# utils.plots(return_list, env_name, "SAC")

if __name__ == '__main__':
    def exponential_decay(t, init, m, finish=0.0000001):
        """
        指数衰减，控制epsilon-greedy策略的epsilon随时间衰减
        :param t: 时间步
        :param init: 初始值
        :param m: 衰减到指定值所需步数
        :param finish: 最终衰减值
        :return: 返回减少幅度
        """
        alpha = np.log(init / (finish )) / m
        l = - np.log(init) / alpha
        decay = np.exp(-alpha * (t + l))
        return decay

    for t in range(10):
        print(exponential_decay(t, 1, 10, finish=0.1))