import numpy as np
import torch
import torch.nn.functional as F

from model.DDPG.v1.ddpg_net import PolicyNet, QValueNet


class DDPG:
    """ DDPG算法 """

    def __init__(self, state_dim, hidden_dim, fire_action_dim, fire_action_bound,
                 water_action_dim, water_action_bound,
                 sigma, actor_lr, critic_lr, tau, gamma, device):
        """
        分别对火电、水电的发电构建模型
        我们希望新能源的消耗能最大化，那么每个时刻的新能源消耗即为预测数值？（合理吗）
        """
        self.fire_actor = PolicyNet(state_dim, hidden_dim[0], hidden_dim[1], fire_action_dim, fire_action_bound).to(device)
        self.fire_critic = QValueNet(state_dim, hidden_dim[0], hidden_dim[1], fire_action_dim).to(device)
        # self.water_actor = PolicyNet(state_dim, hidden_dim, water_action_dim, water_action_bound).to(device)
        # self.water_critic = QValueNet(state_dim, hidden_dim, water_action_dim).to(device)

        self.fire_target_actor = PolicyNet(state_dim, hidden_dim[0], hidden_dim[1], fire_action_dim, fire_action_bound).to(device)
        self.fire_target_critic = QValueNet(state_dim, hidden_dim[0], hidden_dim[1], fire_action_dim).to(device)
        # self.water_target_actor = PolicyNet(state_dim, hidden_dim, water_action_dim, water_action_bound).to(device)
        # self.water_target_critic = QValueNet(state_dim, hidden_dim, water_action_dim).to(device)

        self.fire_actor_optimizer = torch.optim.AdamW(self.fire_actor.parameters(), lr=actor_lr)
        self.fire_critic_optimizer = torch.optim.AdamW(self.fire_critic.parameters(), lr=critic_lr)
        # self.water_actor_optimizer = torch.optim.Adam(self.water_actor.parameters(), lr=actor_lr)
        # self.water_critic_optimizer = torch.optim.Adam(self.water_critic.parameters(), lr=critic_lr)
        self.fire_criterion = torch.nn.MSELoss()
        # self.water_criterion = torch.nn.MSELoss()

        self.gamma = gamma
        self.sigma = sigma  # 高斯噪声的标准差,均值直接设为0
        self.tau = tau  # 目标网络软更新参数
        self.count = 0
        self.fire_action_dim = fire_action_dim
        self.water_action_dim = water_action_dim
        self.device = device

    def take_action(self, state):
        fireState = torch.tensor(state, dtype=torch.float).to(self.device)
        fireAction = self.fire_actor(fireState).cpu().detach().numpy()
        # waterState = torch.tensor(state, dtype=torch.float).to(self.device)
        # waterAction = self.water_actor(waterState).cpu().detach().numpy()

        # 给动作添加噪声，增加探索
        fireAction = fireAction + self.sigma * np.random.randn(self.fire_action_dim)
        # waterAction = waterAction + self.sigma * np.random.randn(self.water_action_dim)
        # return fireAction, waterAction
        return fireAction

    def soft_update(self, net, target_net):
        for param_target, param in zip(target_net.parameters(), net.parameters()):
            param_target.data.copy_(param_target.data * (1.0 - self.tau) + param.data * self.tau)

    def update(self, transition_dict):
        states = torch.tensor(transition_dict['states'], dtype=torch.float).to(self.device)
        fire_actions = torch.tensor(transition_dict['fire-actions'], dtype=torch.float).to(self.device)
        # water_actions = torch.tensor(transition_dict['water-actions'], dtype=torch.float).to(self.device)
        rewards = torch.tensor(transition_dict['rewards'], dtype=torch.float).view(-1, 1).to(self.device)
        next_states = torch.tensor(transition_dict['next_states'], dtype=torch.float).to(self.device)
        dones = torch.tensor(transition_dict['dones'], dtype=torch.float).view(-1, 1).to(self.device)

        next_q_values = self.fire_target_critic(next_states, self.fire_target_actor(next_states))
        q_targets = rewards + self.gamma * next_q_values * (1 - dones)
        critic_loss = torch.mean(F.mse_loss(self.fire_critic(states, fire_actions), q_targets))
        self.fire_critic_optimizer.zero_grad()
        critic_loss.backward()
        self.fire_critic_optimizer.step()

        actor_loss = -torch.mean(self.fire_critic(states, self.fire_actor(states)))
        self.fire_actor_optimizer.zero_grad()
        actor_loss.backward()
        self.fire_actor_optimizer.step()

        self.soft_update(self.fire_actor, self.fire_target_actor)  # 软更新策略网络
        self.soft_update(self.fire_critic, self.fire_target_critic)  # 软更新价值网络
        # fireAction = self.water_actor(states)
        # fireQSA = self.fire_critic(states, fireAction)
        # fireActorLoss = -torch.mean(fireQSA)
        # self.fire_actor_optimizer.zero_grad()
        # fireActorLoss.backward()
        # self.fire_actor_optimizer.step()
        # # waterAction = self.water_actor(states)
        # # waterQSA = self.water_critic(states, waterAction)
        # # waterActorLoss = -torch.mean(waterQSA)
        # # self.water_actor_optimizer.zero_grad()
        # # waterActorLoss.backward()
        # # self.water_actor_optimizer.step()
        #
        # fireActionTarget = self.fire_target_actor(next_states)
        # fireQTemp = self.fire_target_critic(next_states, fireActionTarget)
        # fireQTarget = rewards + self.gamma * fireQTemp * (1 - dones)
        # fireQPredict = self.fire_critic(states, fire_actions)
        # fireTdError = self.fire_criterion(fireQTarget, fireQPredict)
        # self.fire_critic_optimizer.zero_grad()
        # fireTdError.backward()
        # self.fire_critic_optimizer.step()
        # # waterActionTarget = self.water_target_actor(next_states)
        # # waterQTemp = self.water_target_critic(next_states, waterActionTarget)
        # # waterQTarget = rewards + self.gamma * waterQTemp * (1 - dones)
        # # waterQPredict = self.water_critic(states, water_actions)
        # # waterTdError = self.water_criterion(waterQTarget, waterQPredict)
        # # self.water_critic_optimizer.zero_grad()
        # # waterTdError.backward()
        # # self.water_critic_optimizer.step()
        #
        # if self.count % 10 == 0:
        #     fireActorDict = self.water_actor.state_dict()
        #     self.fire_target_actor.load_state_dict(fireActorDict)
        #     fireCriticDict = self.fire_critic.state_dict()
        #     self.fire_target_critic.load_state_dict(fireCriticDict)
        #
        #     # waterActorDict = self.water_actor.state_dict()
        #     # self.water_target_actor.load_state_dict(waterActorDict)
        #     # waterCriticDict = self.water_critic.state_dict()
        #     # self.water_target_critic.load_state_dict(waterCriticDict)
        # self.count += 1

