from agents.actor import ActorNetwork
from agents.critic import CriticNetwork
import torch.optim as optim
import torch
import numpy as np

import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


class MADDPGAgent:
    def __init__(self, state_dim, action_dim):
        self.actor = ActorNetwork(state_dim, action_dim).to(device)
        self.critic = CriticNetwork(state_dim, action_dim).to(device)
        self.target_actor = ActorNetwork(state_dim, action_dim).to(device)
        self.target_critic = CriticNetwork(state_dim, action_dim).to(device)

        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=0.001)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=0.001)

        # 初始化目标网络权重
        self.target_actor.load_state_dict(self.actor.state_dict())
        self.target_critic.load_state_dict(self.critic.state_dict())

    def update(self, replay_buffer,uav, gamma=0.99, tau=0.01):
        # 从经验池采样
        states, actions, rewards, next_states = replay_buffer.sample()

        # 将所有数据移动到指定设备
        states = torch.tensor(states, dtype=torch.float32).to(device)
        actions = torch.tensor(actions, dtype=torch.float32).to(device)
        rewards = torch.tensor(rewards, dtype=torch.float32).to(device)
        next_states = torch.tensor(next_states, dtype=torch.float32).to(device)
        
        #print(states.shape,actions.shape,rewards.shape,next_states.shape)   
        for i in range(64):
            # 更新Critic
            next_state = next_states[i]
            best_action =self.target_actor(next_state,uav)
            target_q = rewards[i] + gamma * self.target_critic(next_state,best_action)
            critic_loss = torch.nn.MSELoss()(self.critic(torch.tensor(states[i]), torch.tensor(actions[i])), target_q.detach())
            self.critic_optimizer.zero_grad()
            critic_loss.backward()
            self.critic_optimizer.step()

            # 更新Actor
            actor_loss = -self.critic(states[i], self.actor(states[i],uav)).mean()
            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            self.actor_optimizer.step()

            # 更新目标网络
            self.soft_update(self.actor, self.target_actor, tau)
            self.soft_update(self.critic, self.target_critic, tau)

    def soft_update(self, local_model, target_model, tau):
        for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
            target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
            
            

