import abc

import numpy as np
from torch.utils.tensorboard import SummaryWriter
import torch


class AgentBase:
    def __init__(self, args):

        self.args = args
        self.device = getattr(args, 'devices',
                              torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu"))
        self.tau = getattr(args, 'tau', 0.005)
        self.criterion = torch.nn.SmoothL1Loss(reduction="mean")

    def set_writer(self, writer):
        # tensorboard 日志记录
        self.writer = writer

    def take_action(self, state, prediction=False):
        """
        根据输入状态做决策
        这里是对于DQN系列而言，后面的引入了Actor网络的算法就不再需要这样了
        :param state: 输入状态
        :param prediction: 标志训练状态还是评估状态
        :return: 返回Agent动作
        """
        state = torch.tensor(np.array([state]), dtype=torch.float).to(self.device)
        if self.args.sampling_strategy == "epsilon":
            return self.epsilon_policy(state, prediction)
        else:  # 玻尔兹曼策略
            return self.boltzmann(state)

    @abc.abstractmethod
    def epsilon_policy(self, state, prediction):
        raise NotImplementedError("没有实现epsilon-greedy策略")

    @abc.abstractmethod
    def boltzmann(self, state):
        raise NotImplementedError("没有实现boltzmann策略")

    def soft_update(self, net: torch.nn.Module, target_net: torch.nn.Module):
        """ 软更新：w' = \tau w + (1-\tau) w' 其中w'是目标网络参数，w是训练网络参数"""
        for param_target, param in zip(target_net.parameters(), net.parameters()):
            param_target.data.copy_(param_target.data * (1.0 - self.tau) + param.data * self.tau)

    @abc.abstractmethod
    def save(self):
        """保存模型"""
        raise NotImplementedError("没有实现保存模型功能")

    @abc.abstractmethod
    def load(self, args):
        """读取已保存的模型"""
        raise NotImplementedError("没有实现读取已保存模型功能")

    @abc.abstractmethod
    def update(self, transition_dict):
        raise NotImplementedError("没有实现更新功能")

    def exponential_decay(self, t, init, m, finish=0.0000001):
        """
        指数衰减，控制参数随时间衰减
        :param t: 时间步
        :param init: 初始值
        :param m: 衰减到指定值所需步数
        :param finish: 最终衰减值
        :return: 返回衰减后的值
        """
        # 防止除0错误
        if finish == 0:
            finish += 1e-15
        alpha = np.log(init / finish) / m
        l = - np.log(init) / alpha
        decay = np.exp(-alpha * (t + l))
        return decay

    def linear_decay(self, init, finish):
        dec = (init - finish) / 100
        init = init - dec if init > finish else finish
        return init
