import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from collections import deque
import random
import math


class ActorNetwork(nn.Module):
    def __init__(self, state_dim=58, continuous_action_dim=6, discrete_action_dim=12):
        """
        根据论文公式(23)的状态表示和动作空间定义
        M=10子区域，N=3无人机时：
        - 状态维度：3(位置) + 10(AoI) + 30(连接状态) + 3(电量) + 3(能量) + 9(数据量) = 58
        - 连续动作：θ和d对每架无人机 → 3×2=6
        - 离散动作：每架无人机的b,x,y,z决策 → 3×4=12
        """
        super(ActorNetwork, self).__init__()
        # 共享特征提取层
        self.fc_shared = nn.Sequential(
            nn.Linear(state_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU()
        )
        # 连续动作头 (θ和d)
        self.continuous_head = nn.Sequential(
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, continuous_action_dim),
            nn.Sigmoid()  # θ∈[0,2π), d∈[0,l_max]
        )
        # 离散动作头 (b,x,y,z)
        self.discrete_head = nn.Sequential(
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, discrete_action_dim),
            nn.Sigmoid()  # 各决策项∈[0,1]
        )

    def forward(self, state):
        shared_features = self.fc_shared(state)
        continuous_actions = self.continuous_head(shared_features)
        discrete_logits = self.discrete_head(shared_features)
        return continuous_actions, discrete_logits


class CriticNetwork(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(CriticNetwork, self).__init__()
        self.q_network = nn.Sequential(
            nn.Linear(state_dim + action_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 1)
        )

    def forward(self, state, action):
        x = torch.cat([state, action], dim=1)
        return self.q_network(x)


class MDC2_DRL_Agent:
    def __init__(self, M=10, N=3):
        # 系统参数初始化
        self.M = M  # 子区域数
        self.N = N  # 无人机数
        self.l_max = 5.0  # 最大飞行距离(km)
        self.H = 0.1  # 飞行高度(km)
        self.T = 100  # 总时隙数

        # 计算状态和动作维度
        self.state_dim = N + M + M * N + N + N + 3 * N  # 位置 + AoI + 连接 + 电量 + 能量 + 数据量
        self.continuous_action_dim = 2 * N  # θ和d
        self.discrete_action_dim = 4 * N  # b,x,y,z

        # 网络初始化
        self.p_actor = ActorNetwork(self.state_dim, self.continuous_action_dim, self.discrete_action_dim)
        self.p_critic = CriticNetwork(self.state_dim, self.continuous_action_dim + self.discrete_action_dim)
        self.t_actor = ActorNetwork(self.state_dim, self.continuous_action_dim, self.discrete_action_dim)
        self.t_critic = CriticNetwork(self.state_dim, self.continuous_action_dim + self.discrete_action_dim)

        # 硬更新目标网络
        self.hard_update(self.t_actor, self.p_actor)
        self.hard_update(self.t_critic, self.p_critic)

        # 优化器
        self.actor_optimizer = optim.Adam(self.p_actor.parameters(), lr=0.0001)
        self.critic_optimizer = optim.Adam(self.p_critic.parameters(), lr=0.0002)

        # 训练参数
        self.gamma = 0.95  # 折扣因子
        self.tau = 0.01  # 软更新系数
        self.memory = deque(maxlen=100000)
        self.batch_size = 64

    def hard_update(self, target, source):
        for target_param, param in zip(target.parameters(), source.parameters()):
            target_param.data.copy_(param.data)

    def get_action(self, state, epsilon):
        state_tensor = torch.FloatTensor(state).unsqueeze(0)

        with torch.no_grad():
            continuous_actions, discrete_logits = self.p_actor(state_tensor)

        # 连续动作处理
        continuous_actions = continuous_actions.numpy()[0]
        continuous_actions[::2] *= 2 * math.pi  # θ∈[0,2π]
        continuous_actions[1::2] *= self.l_max  # d∈[0,l_max]

        # 离散动作处理 (ε-greedy)
        discrete_actions = np.zeros(self.discrete_action_dim)
        if random.random() > epsilon:
            # 确定性策略
            discrete_actions = (discrete_logits.numpy()[0] > 0.5).astype(np.float32)
        else:
            # 随机探索
            discrete_actions = np.random.randint(0, 2, size=self.discrete_action_dim)

        return continuous_actions, discrete_actions

    def store_transition(self, state, action, reward, next_state, done):
        self.memory.append((state, action, reward, next_state, done))

    def update_networks(self):
        if len(self.memory) < self.batch_size:
            return

        # 从记忆库中采样
        batch = random.sample(self.memory, self.batch_size)
        states, actions, rewards, next_states, dones = zip(*batch)

        states = torch.FloatTensor(np.array(states))
        actions = torch.FloatTensor(np.array(actions))
        rewards = torch.FloatTensor(np.array(rewards)).unsqueeze(1)
        next_states = torch.FloatTensor(np.array(next_states))
        dones = torch.FloatTensor(np.array(dones)).unsqueeze(1)

        # Critic更新 (论文公式29)
        with torch.no_grad():
            next_continuous, next_discrete = self.t_actor(next_states)
            next_actions = torch.cat([next_continuous, next_discrete], dim=1)
            next_q = self.t_critic(next_states, next_actions)
            target_q = rewards + (1 - dones) * self.gamma * next_q

        current_q = self.p_critic(states, actions)
        critic_loss = nn.MSELoss()(current_q, target_q)

        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.p_critic.parameters(), 1.0)
        self.critic_optimizer.step()

        # Actor更新 (论文公式31)
        continuous_actions, discrete_actions = self.p_actor(states)
        policy_actions = torch.cat([continuous_actions, discrete_actions], dim=1)
        actor_loss = -self.p_critic(states, policy_actions).mean()

        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.p_actor.parameters(), 1.0)
        self.actor_optimizer.step()

        # 目标网络软更新 (论文公式32)
        self.soft_update(self.p_actor, self.t_actor, self.tau)
        self.soft_update(self.p_critic, self.t_critic, self.tau)

    def soft_update(self, local_model, target_model, tau):
        for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
            target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)


class UAVEnvironment:
    def __init__(self, M=10, N=3, T=100):  # 修改初始化方法
        """
        参数说明：
        M: 子区域数量 (默认10)
        N: 无人机数量 (默认3)
        T: 总时隙数 (默认100)
        """
        self.M = M
        self.N = N
        self.T = T  # 添加总时隙数
        self.l_max = 5.0  # 最大飞行距离(km)
        self.H = 0.1  # 飞行高度(km)
        self.reset()

    def reset(self):
        self.current_step = 0
        # 初始化无人机位置 (随机分布在子区域)
        self.drone_positions = np.random.randint(0, self.M, size=self.N)
        # 初始化AoI (论文公式12)
        self.aoi = np.zeros(self.M)
        # 初始化电量 (论文2.6节)
        self.battery = np.full(self.N, 100.0)
        # 初始化收集能量 (论文2.6节)
        self.energy_harvest = np.random.uniform(0, 10, size=self.N)
        # 初始化数据量 (论文公式4)
        self.data_volume = np.random.uniform(0, 50, size=(self.N, 3))  # 本地/U2U/U2B数据量

        return self._get_state()

    def _get_state(self):
        """ 根据论文公式23构建状态向量 """
        state = []
        # 1. 无人机位置 (N维)
        state.extend(self.drone_positions / self.M)  # 归一化

        # 2. 各子区域AoI (M维)
        state.extend(self.aoi / 100)  # 假设最大AoI为100

        # 3. 连接状态 (M×N维)
        for i in range(self.N):
            state.extend([1 if k == self.drone_positions[i] else 0 for k in range(self.M)])

        # 4. 电量 (N维)
        state.extend(self.battery / 100)  # 归一化

        # 5. 收集能量 (N维)
        state.extend(self.energy_harvest / 10)  # 归一化

        # 6. 数据量 (3×N维: 本地/U2U/U2B)
        state.extend(self.data_volume.flatten() / 50)  # 归一化

        return np.array(state, dtype=np.float32)

    def step(self, continuous_actions, discrete_actions):
        """ 执行一个时隙的动作 """
        # 1. 更新无人机位置 (论文2.1节)
        for i in range(self.N):
            theta = continuous_actions[i * 2]
            d = continuous_actions[i * 2 + 1]
            # 计算新位置 (简化版)
            new_pos = int((self.drone_positions[i] + d * math.cos(theta)) % self.M)
            self.drone_positions[i] = new_pos

        # 2. 处理卸载决策 (论文2.2节)
        rewards = 0
        for i in range(self.N):
            # 解析离散动作 (b,x,y,z)
            b = discrete_actions[i * 4]
            x = discrete_actions[i * 4 + 1]  # 本地处理比例
            y = discrete_actions[i * 4 + 2]  # U2U卸载比例
            z = discrete_actions[i * 4 + 3]  # U2B卸载比例

            # 归一化处理比例
            total = x + y + z + 1e-6
            x, y, z = x / total, y / total, z / total

            # 更新AoI (论文公式12)
            k = self.drone_positions[i]
            if b > 0.5:  # 通信决策
                self.aoi[k] = 0  # 重置AoI
            else:
                self.aoi[k] += 1  # AoI增加

            # 计算能耗 (论文公式14-17)
            move_energy = d * 0.5  # 移动能耗
            comm_energy = b * 2.0  # 通信能耗
            comp_energy = (x * 1.0 + y * 1.2 + z * 1.5) * 10  # 计算能耗

            # 能量更新 (论文2.6节)
            self.battery[i] -= (move_energy + comm_energy + comp_energy)
            self.battery[i] = max(0, min(100, self.battery[i] + self.energy_harvest[i]))

            # 数据量更新
            self.data_volume[i] = np.random.uniform(0, 50, size=3)

        # 3. 计算奖励 (论文公式13)
        aoi_penalty = -np.sum(self.aoi)  # 最小化AoI
        energy_penalty = -np.sum(100 - self.battery)  # 最小化能耗
        reward = aoi_penalty * 0.7 + energy_penalty * 0.3  # 加权奖励

        # 4. 更新时隙
        self.current_step += 1
        done = self.current_step >= self.T
        next_state = self._get_state()

        return next_state, reward, done, {}


# 训练过程
def train():
    # 初始化时明确指定T参数
    env = UAVEnvironment(M=10, N=3, T=100)  # 与论文2.1节保持一致
    agent = MDC2_DRL_Agent(M=10, N=3)

    episodes = 10000
    epsilon = 1.0
    epsilon_min = 0.01
    epsilon_decay = 0.995

    for episode in range(episodes):
        state = env.reset()
        total_reward = 0
        done = False

        while not done:
            # 1. 选择动作
            continuous_actions, discrete_actions = agent.get_action(state, epsilon)

            # 2. 执行动作
            next_state, reward, done, _ = env.step(continuous_actions, discrete_actions)

            # 3. 存储转移
            action = np.concatenate([continuous_actions, discrete_actions])
            agent.store_transition(state, action, reward, next_state, done)

            # 4. 学习
            agent.update_networks()

            # 5. 更新状态
            state = next_state
            total_reward += reward

        # 衰减探索率
        epsilon = max(epsilon_min, epsilon * epsilon_decay)

        # 打印训练信息
        print(f"Episode: {episode + 1}, Total Reward: {total_reward:.2f}, "
              f"Avg AoI: {np.mean(env.aoi):.2f}, "
              f"Avg Battery: {np.mean(env.battery):.2f}%, "
              f"Epsilon: {epsilon:.3f}")


if __name__ == "__main__":
    train()
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from collections import deque
import random
import math
from torch.utils.data import DataLoader, Subset
from torchvision import datasets, transforms