import copy
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F


class DDPG(nn.Module):
    def __init__(self, model, gamma, tau, actor_lr, critic_lr, device):
        """
        DDPG algorithm
        :param model: (nn.Module) actor and critic 的前向网络. model 必须实现 get_actor_params() 方法.
        :param gamma: (float) reward的衰减因子.
        :param tau: (float) self.target_model 跟 self.model 同步参数 的 软更新参数
        :param actor_lr: (float) actor 的学习率
        :param critic_lr: (float) critic 的学习率
        """
        super(DDPG, self).__init__()
        assert isinstance(gamma, float)
        assert isinstance(tau, float)
        assert isinstance(actor_lr, float)
        assert isinstance(critic_lr, float)
        self.gamma = gamma
        self.tau = tau
        self.actor_lr = actor_lr
        self.critic_lr = critic_lr
        self.device = device

        self.model = model
        self.target_model = copy.deepcopy(model)

        self.model.to(device)
        self.target_model.to(device)

        # 使用Adam优化器
        self.optimizer_actor = optim.Adam(self.model.actor_model.parameters(), lr=self.actor_lr)
        self.optimizer_critic = optim.Adam(self.model.critic_model.parameters(), lr=self.critic_lr)

    def predict(self, obs):
        # 使用 self.model 的 actor model 来预测动作
        return self.model.policy(obs)

    def learn(self, obs, action, reward, next_obs, terminal):
        # 用DDPG算法更新 actor 和 critic
        actor_loss = self._actor_learn(obs)
        critic_loss = self._critic_learn(obs, action, reward, next_obs, terminal)
        return actor_loss, critic_loss

    def _actor_learn(self, obs):
        """
        优化策略网络(参数θ)
        Loss = -Qw(s, a), 此处 a = uθ(s)
        """
        action = self.model.policy(obs)
        Q = self.model.value(obs, action)
        # 此处只对策略网络的参数进行优化
        loss = torch.mean(-1.0 * Q)
        self.optimizer_actor.zero_grad()
        loss.backward()
        self.optimizer_actor.step()
        return loss

    def _critic_learn(self, obs, action, reward, next_obs, terminal):
        """
        优化Q网络(参数w)
        Loss = MSE[ Qw(s,a), r+γ*target_Qw(s',a') ]
        此处 a' = target_uθ(s')
        """
        # 使用固定的target_model计算target_Q
        next_action = self.target_model.policy(next_obs)
        next_Q = self.target_model.value(next_obs, next_action)
        # terminal中True为结束(最后一个记录了)，此时target = reward
        terminal = terminal.float()
        target_Q = reward + (1.0 - terminal) * self.gamma * next_Q
        target_Q = target_Q.detach()  # 变为常量，阻止梯度传递
        # 根据当前环境的动作计算Q值
        Q = self.model.value(obs, action)
        # 计算 Q 与 target_Q的均方差，得到loss (只优化Q网络参数)
        loss = F.mse_loss(Q, target_Q)
        self.optimizer_critic.zero_grad()
        loss.backward()
        self.optimizer_critic.step()
        return loss

    def sync_target(self, soft_sync=True):
        """
        # 把 self.model 的模型参数值同步到 self.target_model
        """
        if soft_sync:  # 使用软更新方式更新
            # target_w = t * w + (1-t) * target_w
            # target_θ = t * θ + (1-t) * target_θ
            # 即每次只更新一点点
            model_state = self.model.state_dict().copy()
            target_model_state = self.target_model.state_dict().copy()
            for s in model_state:
                target_model_state[s] = self.tau * model_state[s] + (1 - self.tau) * target_model_state[s]
            self.target_model.load_state_dict(target_model_state)
        else:
            # 完全更新(hard)
            target_model_state = self.model.state_dict()
            self.target_model.load_state_dict(target_model_state)
