import numpy as np
import torch
import torch.nn.functional as F

from Net import PolicyNetDeterministic, QValueNet


class DDPG:
    """
    DDPG算法（off-policy算法）因为采样时的策略和更新时的策略不是同一个，并且它包含经验回访池
    架构：两个策略网络和两个价值网络（都各自包含了一个目标网络，作用和DuelingDQN中的目标网络一样）
    Actor训练（当前网络）:负责策略网络参数Θ的迭代更新，负责根据当前状态S选择当前动作A，用于和环境交互生成S', R。
    Actor训练（当前网络）:负责根据经验回访池中采样的下一个状态s‘选择最优的动作a',网络参数Θ’使用软更新。
    Critic训练（当前网络）:负责价值网络参数w的迭代更新，负责计算当前Q值(s,a;w)，目标Q值yi = R + λQ'(s',a';w')
    Critic训练（当前网络）:负责计算目标Q值中的Q'(s',a';w')部分,网络参数w’使用软更新。
    网络更新方式：ξ
        Critic: Q(s,a;w) =  r+λQ'(s',\pi_target(s'); w') - Q(s, a;w)
        Actor : ▽Q(s,a)=▽_Θ\pi_Θ(s)▽_a Q(s,a)|a=\pi_Θ(s)
    """

    def __init__(self, state_dim, hidden_dim, action_dim, action_bound, actor_lr, critic_lr, sigma, tau, gamma, device):
        self.action_dim = action_dim
        self.actor = PolicyNetDeterministic(state_dim, hidden_dim, action_dim,
                                            action_bound).to(device)
        self.critic = QValueNet(state_dim, hidden_dim, action_dim).to(device)
        self.target_actor = PolicyNetDeterministic(state_dim, hidden_dim, action_dim,
                                                   action_bound).to(device)
        self.target_critic = QValueNet(state_dim, hidden_dim,
                                       action_dim).to(device)
        # 初始化目标价值网络并使其参数和价值网络一样
        self.target_critic.load_state_dict(self.critic.state_dict())
        # 初始化目标策略网络并使其参数和策略网络一样
        self.target_actor.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),
                                                lr=actor_lr)
        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),
                                                 lr=critic_lr)
        self.gamma = gamma
        self.sigma = sigma  # 高斯噪声的标准差,均值直接设为0
        self.tau = tau  # 目标网络软更新参数
        self.action_bound = action_bound
        self.device = device

    def take_action(self, state):
        state = torch.tensor(np.array([state]), dtype=torch.float).to(self.device)
        action = self.actor(state).detach().cpu().numpy()[0]
        # 给动作添加噪声，增加探索
        action = action + self.sigma * np.random.randn(self.action_dim)
        return action

    def soft_update(self, net: torch.nn.Module, target_net: torch.nn.Module):
        """ 软更新：w' = \tau w + (1-\tau) w' 其中w'是目标网络参数，w是训练网络参数"""
        for param_target, param in zip(target_net.parameters(), net.parameters()):
            param_target.data.copy_(param_target.data * (1.0 - self.tau) + param.data * self.tau)

    def update(self, transition_dict):
        states = torch.tensor(transition_dict['states'], dtype=torch.float).to(self.device)
        if self.action_dim == 1:
            actions = torch.tensor(transition_dict['actions'], dtype=torch.float).view(-1, 1).to(self.device)
        else:
            actions = torch.tensor(transition_dict['actions'], dtype=torch.float).to(self.device)
        rewards = torch.tensor(transition_dict['rewards'],
                               dtype=torch.float).view(-1, 1).to(self.device)
        next_states = torch.tensor(transition_dict['next_states'], dtype=torch.float).to(self.device)
        dones = torch.tensor(transition_dict['dones'], dtype=torch.float).view(-1, 1).to(self.device)

        # Q'(s', w'(s')) 其中w'表示策略的目标网络 Q'表示价值的目标网络
        next_q_values = self.target_critic(next_states, self.target_actor(next_states))
        q_targets = rewards + self.gamma * next_q_values * (1 - dones)
        # 计算td_error MSE损失函数
        critic_loss = torch.mean(F.mse_loss(self.critic(states, actions), q_targets))
        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        self.critic_optimizer.step()

        # 策略网络损失
        actor_loss = -torch.mean(self.critic(states, self.actor(states)))
        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        self.actor_optimizer.step()

        # 软更新策略网络和价值网络
        self.soft_update(self.actor, self.target_actor)
        self.soft_update(self.critic, self.target_critic)

# if __name__ == '__main__':
# actor_lr = 5e-4
# critic_lr = 5e-3
# num_episodes = 200
# hidden_dim = 64
# gamma = 0.98
# # 软更新参数
# tau = 0.005
# buffer_size = 10000
# minial_size = 1000
# batch_size = 64
# sigma = 0.01  # 高斯噪声标准差
# device = torch.device("cuda") if torch.cuda.is_available() else torch.device(
#     "cpu")
#
# env_name = 'Pendulum-v1'
# env = gym.make(env_name)
# random.seed(0)
# np.random.seed(0)
# env.seed(0)
# torch.manual_seed(0)
# replay_buffer = ReplayBuffer(buffer_size)
# state_dim = env.observation_space.shape[0]
# action_dim = env.action_space.shape[0]
# action_bound = env.action_space.high[0]
#
# agent = DDPG(state_dim, action_dim, hidden_dim, action_bound, sigma, actor_lr, critic_lr, tau, gamma, device)
# return_list = utils.train_off_policy_agent(env, agent, num_episodes, replay_buffer, minial_size, batch_size)
# utils.plots(return_list, env_name, "DDPG")
