import numpy as np
import torch
import torch.nn.functional as F


class PolicyNet(torch.nn.Module):
    # 策略网络的输出层用正切函数作为激活函数
    # 因为正切函数的值域是[-1,1]，方便按比例调整成环境可以接受的动作范围
    def __init__(self, state_dim, hidden_dim, action_dim):
        super(PolicyNet, self).__init__()
        self.linear1 = torch.nn.Linear(state_dim, hidden_dim)
        # self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim)
        self.linear_out = torch.nn.Linear(hidden_dim, action_dim)

    def forward(self, x):
        x = F.relu(self.linear1(x))
        # x = F.relu(self.linear2(x))
        x = torch.tanh(self.linear_out(x))
        return x


class QValueNet(torch.nn.Module):
    # 在 DDPG 中处理的是与连续动作交互的环境
    # 网络的输入是状态和动作拼接后的向量
    # 网络的输出是一个值，表示该状态动作对的价值
    def __init__(self, state_dim, hidden_dim, action_dim):
        super(QValueNet, self).__init__()
        self.linear1 = torch.nn.Linear(state_dim + action_dim, hidden_dim)
        self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim)
        self.linear_out = torch.nn.Linear(hidden_dim, 1)

    def forward(self, x, a):
        cat = torch.cat([x, a], dim=1) # 拼接状态和动作
        x = F.relu(self.linear1(cat))
        x = F.relu(self.linear2(x))
        x = self.linear_out(x)
        return x



'''
https://github.com/sfujim/TD3
https://github.com/boyu-ai/Hands-on-RL/blob/main/%E7%AC%AC13%E7%AB%A0-DDPG%E7%AE%97%E6%B3%95.ipynb
T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra. Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015.
'''
class DDPG_TD3:
    def __init__(self, name_algorithm, state_dim, hidden_dim, action_dim, action_low, action_high, expl_noise_sigma, actor_lr, critic_lr, para_soft_update, discount_factor, device):
        self.name_algorithm = name_algorithm
        # 初始化策略网络
        self.actor = PolicyNet(state_dim, hidden_dim, action_dim).to(device)
        self.actor_target = PolicyNet(state_dim, hidden_dim, action_dim).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=actor_lr)
        
        if name_algorithm == 'DDPG':
            self.critic = QValueNet(state_dim, hidden_dim, action_dim).to(device)
            self.critic_target = QValueNet(state_dim, hidden_dim, action_dim).to(device)
            self.critic_target.load_state_dict(self.critic.state_dict())# 初始化目标价值网络：设置和价值网络相同的参数
            self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=critic_lr)
        elif name_algorithm == 'TD3':
            '''截断的双 Q 学习'''
            # 初始化价值网络
            self.critic_1 = QValueNet(state_dim, hidden_dim, action_dim).to(device)
            self.critic_1_target = QValueNet(state_dim, hidden_dim, action_dim).to(device)
            self.critic_1_target.load_state_dict(self.critic_1.state_dict())
            self.critic_1_optimizer = torch.optim.Adam(self.critic_1.parameters(), lr=critic_lr)
            
            self.critic_2 = QValueNet(state_dim, hidden_dim, action_dim).to(device)
            self.critic_2_target = QValueNet(state_dim, hidden_dim, action_dim).to(device)
            self.critic_2_target.load_state_dict(self.critic_2.state_dict())
            self.critic_2_optimizer = torch.optim.Adam(self.critic_2.parameters(), lr=critic_lr)
        
        '''动作缩放参数'''
        self.action_low = action_low
        self.action_high = action_high
        self.action_scale = torch.tensor((action_high - action_low)/2, device=device, dtype=torch.float32).unsqueeze(dim=0)
        self.action_bias = torch.tensor((action_high + action_low)/2, device=device, dtype=torch.float32).unsqueeze(dim=0)
        
        if name_algorithm == 'TD3':
            '''目标策略平滑'''
            self.policy_noise = 0.1 * action_high # 加入策略网络的噪声
            self.noise_clip = 0.25 * action_high # 加入策略网络噪声范围
            # self.policy_noise = 0.2 * action_high # 加入策略网络的噪声
            # self.noise_clip = 0.5 * action_high # 加入策略网络噪声范围
            
            '''延迟的策略更新'''
            self.sample_count = 0
            self.policy_freq = 2  # 策略网络更新频率比价值网络低几倍？
        
        '''other parameters'''
        self.discount_factor = discount_factor # discount_factor 折扣
        self.expl_noise_sigma = expl_noise_sigma  # 高斯噪声的标准差,均值直接设为0
        self.para_soft_update = para_soft_update  # 目标网络软更新参数
        self.action_dim = action_dim
        self.device = device


    def take_action_noise(self, state): # 训练的时候
        state = torch.tensor(state, dtype=torch.float).to(self.device)
        action = self.actor(state)
        action = self.action_scale * action + self.action_bias
        # 给动作添加噪声，增加探索
        action = action + self.action_scale * self.expl_noise_sigma * np.random.randn(self.action_dim)
        action = action.clip(self.action_low, self.action_high)
        action = action.detach().cpu().numpy()
        return action
    
    def take_action(self, state): # 训练的时候
        state = torch.tensor(state, dtype=torch.float).to(self.device)
        action = self.actor(state)
        action = self.action_scale * action + self.action_bias
        action = action.clip(self.action_low, self.action_high)
        action = action.detach().cpu().numpy()
        return action

    def soft_update(self, net, target_net):
        for param_target, param in zip(target_net.parameters(), net.parameters()):
            param_target.data.copy_(param_target.data * (1.0 - self.para_soft_update) + param.data * self.para_soft_update)

    def update(self, transition_dict):
        states = torch.tensor(transition_dict['states'], dtype=torch.float).to(self.device)
        actions = torch.tensor(transition_dict['actions'], dtype=torch.float).to(self.device)
        rewards = torch.tensor(transition_dict['rewards'], dtype=torch.float).view(-1, 1).to(self.device)
        next_states = torch.tensor(transition_dict['next_states'], dtype=torch.float).to(self.device)
        dones = torch.tensor(transition_dict['dones'], dtype=torch.float).view(-1, 1).to(self.device)
        
        
        if self.name_algorithm == 'DDPG':
            # 更新价值网络 critic
            next_q_values = self.critic_target(next_states, self.actor_target(next_states)) # 用目标网络计算
            q_targets = rewards + self.discount_factor * next_q_values * (1 - dones)
            # 价值网络的损失
            critic_loss = torch.mean(F.mse_loss(self.critic(states, actions), q_targets)) 
            self.critic_optimizer.zero_grad()
            critic_loss.backward()
            self.critic_optimizer.step()

            # 更新策略网络 actor
            # 策略网络的损失
            actor_loss = -torch.mean(self.critic(states, self.actor(states))) # (6) in paper
            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            self.actor_optimizer.step()

            self.soft_update(self.actor, self.actor_target)  # 软更新目标策略网络
            self.soft_update(self.critic, self.critic_target)  # 软更新目标价值网络
            
        elif self.name_algorithm == 'TD3':
            '''目标策略平滑(target policy smoothing)'''
            noises = (torch.randn_like(actions) * self.policy_noise).clamp(-self.noise_clip, self.noise_clip) 
            # 添加噪声，得到目标动作。使策略更难利用 Q 函数的误差
            next_actions = (self.actor_target(next_states) + noises).clamp(self.action_low, self.action_high)

            
            '''截断的双 Q 学习(clipped double Q-learning)'''
            # 更新价值网络 critic
            next_q_values_1 = self.critic_1_target(next_states, next_actions).detach() # 用目标网络计算
            next_q_values_2 = self.critic_2_target(next_states, next_actions).detach() # 用目标网络计算
            next_q_values = torch.min(next_q_values_1, next_q_values_2) # Q-target 是2个目标Q网络给出的Q值中小的那个
            q_targets = rewards + self.discount_factor * next_q_values * (1 - dones)
            
            # 价值网络的损失：最小化均方误差
            critic_1_loss = torch.mean(F.mse_loss(self.critic_1(states, actions), q_targets)) 
            self.critic_1_optimizer.zero_grad()
            critic_1_loss.backward()
            self.critic_1_optimizer.step()
            # 价值网络的损失：最小化均方误差
            critic_2_loss = torch.mean(F.mse_loss(self.critic_2(states, actions), q_targets)) 
            self.critic_2_optimizer.zero_grad()
            critic_2_loss.backward()
            self.critic_2_optimizer.step()

            '''延迟的策略更新(delayed policy updates)'''
            self.sample_count += 1
            # actor的更新频率低于critic
            if self.sample_count % self.policy_freq == 0:
                # 更新策略网络 actor
                actor_loss = -torch.mean(self.critic_1(states, self.actor(states))) 
                self.actor_optimizer.zero_grad()
                actor_loss.backward()
                self.actor_optimizer.step()
                # 目标网络软更新
                self.soft_update(self.actor, self.actor_target)  # 软更新目标策略网络
                self.soft_update(self.critic_1, self.critic_1_target)  # 软更新目标价值网络
                self.soft_update(self.critic_2, self.critic_2_target)  # 软更新目标价值网络



    