import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np


# define the actor network
class Actor(nn.Module):
    # 在Actor类的__init__方法中
    def __init__(self, args, agent_id, num_action, tau=1.0):
        super(Actor, self).__init__()
        self.fc1 = nn.Linear(args.num_states, 64)
        self.fc2 = nn.Linear(64, 64)
        self.fc3 = nn.Linear(64, 64)
        self.fc4 = nn.Linear(64,32)
        self.action_out = nn.Linear(32, num_action)
        self.dropout = nn.Dropout(0.1)
        self.initial_tau = tau
        self.min_tau = 0.1
        self.tau_decay = 0.999
    
    # 在forward方法中动态调整tau
    def forward(self, x, uav, training_step=0):
        # 记录坐标
        self.x,self.y = uav.position
        self.history_track = uav.history_position
        self.tau = max(self.min_tau, self.initial_tau * (self.tau_decay ** training_step))

        # 构建网络
        x = F.relu(self.fc1(x))
        x = self.dropout(x)
        x = F.relu(self.fc2(x))
        x = self.dropout(x)
        x = F.relu(self.fc3(x))
        x = self.dropout(x)
        x = F.relu(self.fc4(x))
        logits = self.action_out(x)
        hard_action = self.gumbel_softmax(logits, hard=True)  # Gumbel-Softmax 硬输出
        return hard_action

    def gumbel_softmax(self, logits, hard=False):
        """
        实现 Gumbel-Softmax 采样，支持硬输出并保持梯度流
        """
        # 生成 Gumbel 噪声
        gumbels = -torch.empty_like(logits).exponential_().log()
        y = logits + gumbels
        soft_action = F.softmax(y / self.tau, dim=-1)
        
        # 去除无效动作
        cruise_action = self.select_cruise_action().squeeze()
        if self.x == 199 and self.y == 199:
            print("error")
        soft_action = torch.clamp(soft_action, min=0.0000001, max=1)
        chuli_action = torch.mul(soft_action, cruise_action)


        if hard:
            # 转为 one-hot 编码，同时保持梯度
            index = chuli_action.max(dim=-1, keepdim=True)[1]  # argmax 获取动作索引
            hard_action = torch.zeros_like(soft_action).scatter_(-1, index, 1.0)  # 转为 one-hot
            # 用 straight-through estimator 替换梯度
            return (hard_action - soft_action).detach() + soft_action

        return soft_action


        
    def select_cruise_action(self):
        # 获取当前设备（与网络参数相同的设备）
        device = next(self.parameters()).device
        
        # 从有效动作中选择最佳动作
        action = [0,1,2,3]
        action_values = torch.ones(4,1, device=device)  # 直接在正确设备上创建
        
        for possible_action in action:
            if self.y <= 25 and possible_action == 1:
                action_values[possible_action] = 0
            if self.y >= 175 and possible_action == 0:
                action_values[possible_action] = 0
            if self.x <= 25 and possible_action == 2:
                action_values[possible_action] = 0
            if self.x >= 175 and possible_action == 3:
                action_values[possible_action]= 0
        
        position = (self.x,self.y)
        for possible_action in action:
            position_next = self.execute_cruise_action(position,possible_action)
            if position_next in self.history_track:
                action_values[possible_action] = 0
        
        action_values = torch.reshape(action_values,(1,4))
        return action_values


    def execute_cruise_action(self,position,action):
        """
        根据动作更新位置并消耗对应电量
        假设动作0-3分别表示上下左右移动，动作4表示原地停留
        """
        # 定义每个动作的电量消耗（例如，上下左右移动消耗5单位电量，停留消耗2单位电量）
        
        # 当前坐标
        x, y = position
        distance_per_time = 25
        grid_size = 200

        # 根据动作更新位置
        if action == 0 and y < grid_size - 1:  # 向上移动
            y += distance_per_time
        elif action == 1:  # 向下移动
            y -= distance_per_time
        elif action == 2:  # 向左移动
            x -= distance_per_time
        elif action == 3:  # 向右移动
            x += distance_per_time
        # 停留位置不变
        
        position_next = (x, y)
        return position_next    
    


class Critic(nn.Module):
    def __init__(self, args):
        super(Critic, self).__init__()
        self.max_action = args.num_position_action
        self.fc1 = nn.Linear(args.num_uavs * args.num_states + args.num_uavs * args.num_position_action, 128)
        self.fc2 = nn.Linear(128, 128)
        self.fc3 = nn.Linear(128, 64)
        self.q_out = nn.Linear(64, 1)

    def forward(self, state, action):
        state = torch.cat(state, dim=1)
        for i in range(len(action)):
            action[i] /= self.max_action
        action = torch.cat(action, dim=1)
        x = torch.cat([state, action], dim=1)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.relu(self.fc3(x))
        q_value = self.q_out(x)
        return q_value



