import numpy as np
import torch
import os
from maddpg.maddpg import MADDPG
import numpy as np

import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

class Agent:
    def __init__(self, agent_id, args,num_action):
        self.args = args
        self.agent_id = agent_id
        self.policy = MADDPG(args, agent_id,num_action)

    def select_action(self, o,epsilon,uav):
        if np.random.uniform() < epsilon:
            index = np.random.randint(0, self.args.num_position_action)
            action = np.zeros(self.args.num_position_action)
            action[index] = 1
            flag = 1
            u = action
            #u = np.random.uniform(-self.args.high_action, self.args.high_action, self.args.action_shape[self.agent_id])
        else:
            flag = 0
            inputs = torch.tensor(o, dtype=torch.float32).unsqueeze(0)
            # pi = self.policy.actor_network(inputs).squeeze(0)
            # # print('{} : {}'.format(self.name, pi))
            # u = pi.cpu().numpy()
            # noise = noise_rate * self.args.high_action * np.random.randn(*u.shape)  # gaussian noise
            # u += noise
            # u = np.clip(u, -self.args.high_action, self.args.high_action)
            u = self.policy.actor_network(torch.tensor(inputs, dtype=torch.float32).to(device),uav)
            if isinstance(u, torch.Tensor):  # 检查 u 是否是张量
                u = u.clone().detach().cpu().numpy().astype(int).flatten().tolist()
            else:
                u = u.clone().cpu().numpy()
        return u.copy(),flag
    

    def learn(self, transitions, other_agents,uav):
        self.policy.train(transitions, other_agents,uav)