import torch.nn as nn
import torch
import numpy as np

class ActorNetwork(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dim=128):
        super(ActorNetwork, self).__init__()
        self.fc1 = nn.Linear(state_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.out = nn.Linear(hidden_dim, action_dim)
        self.activation = nn.ReLU()

    def forward(self, state,uav):
        state = torch.tensor(state, dtype=torch.float)
        x = self.activation(self.fc1(state))
        x = self.activation(self.fc2(x))
        self.x,self.y = uav.position
        return self.select_cruise_action(self.out(x))
    
    def select_cruise_action(self,action_values):
        # 从有效动作中选择最佳动作
        action = [0,1,2,3]
        
        for possible_action in action:
            if self.y<=25 and possible_action ==1:
                action_values[possible_action] = -10000
            if  self.y >= 975  and possible_action == 0:
                action_values[possible_action] = -10000
            if self.x <= 25 and possible_action ==2:
                action_values[possible_action] = -10000
            if self.x >= 975 and possible_action ==3:
                action_values[possible_action] = -10000
        
        if np.random.uniform(0, 1) < 0.1:
            return np.random.choice(action)  # 随机选择一个有效动作
        else:
            # 根据 action_values 选择最佳动作
            best_action = 0
            max_q_value = action_values[best_action]
            for i in action:
                if action_values[i] > max_q_value:
                    best_action = i
                    max_q_value = action_values[i]
        
        return best_action


