import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import random
import matplotlib.pyplot as plt
from collections import deque, namedtuple
import os

# 环境参数
USER_COUNT = 10
ACTIVE_USERS = 8
ENV_SIZE = 100
MAX_EPISODES = 1000
EPISODE_LENGTH = 200
SWITCH_EVERY = 200  # 每200 episode切换用户
BATCH_SIZE = 128
GAMMA = 0.99
LEARNING_RATE = 1e-3
DELAY_WEIGHT = 0.7
ENERGY_WEIGHT = 0.3
ACTION_SPACE = 4  # 上下左右移动
MAX_SPEED = 5.0
INITIAL_ENERGY = 100
MAX_LATENCY = 50

# 经验回放
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))


class UAVEnv:
    def __init__(self):
        self.user_positions = np.random.rand(USER_COUNT, 2) * ENV_SIZE
        self.active_users = random.sample(range(USER_COUNT), ACTIVE_USERS)
        self.uav_pos = np.array([ENV_SIZE / 2, ENV_SIZE / 2])
        self.energy = INITIAL_ENERGY
        self.collected = [False] * USER_COUNT
        self.steps = 0

    def reset(self):
        self.active_users = random.sample(range(USER_COUNT), ACTIVE_USERS)
        self.uav_pos = np.array([ENV_SIZE / 2, ENV_SIZE / 2])
        self.energy = INITIAL_ENERGY
        self.collected = [False] * USER_COUNT
        self.steps = 0
        return self._get_state()

    def _get_state(self):
        active_positions = self.user_positions[self.active_users]
        state = np.concatenate([
            self.uav_pos,
            active_positions.flatten(),
            [1 if i in self.active_users else 0 for i in range(USER_COUNT)]
        ])
        return state.astype(np.float32)

    def step(self, action):
        dx, dy = [(0, -1), (1, 0), (0, 1), (-1, 0)][action]  # 上右下左
        self.uav_pos = np.clip(self.uav_pos + MAX_SPEED * np.array([dx, dy]), 0, ENV_SIZE)

        self.energy -= np.linalg.norm(MAX_SPEED * np.array([dx, dy])) * 0.1
        self.steps += 1

        reward = 0
        for i, user in enumerate(self.active_users):
            if not self.collected[user] and np.linalg.norm(self.uav_pos - self.user_positions[user]) < 5:
                self.collected[user] = True
                reward += 10  # 成功收集奖励

        # 延迟惩罚
        latency = self.steps / len(self.active_users)
        reward -= DELAY_WEIGHT * latency

        # 能耗惩罚
        reward -= ENERGY_WEIGHT * (INITIAL_ENERGY - self.energy)

        # 未完成任务惩罚
        if self.steps == EPISODE_LENGTH:
            remaining = sum(1 for u in self.active_users if not self.collected[u])
            reward -= remaining * 5

        done = all(self.collected[u] for u in self.active_users) or self.steps >= EPISODE_LENGTH
        return self._get_state(), reward, done, {'collected': sum(self.collected)}


class DQN(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(DQN, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(input_dim, 128),
            nn.ReLU(),
            nn.Linear(128, 128),
            nn.ReLU(),
            nn.Linear(128, output_dim)
        )

    def forward(self, x):
        return self.net(x)


class CRLAgent:
    def __init__(self, state_dim, action_dim):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.policy_net = DQN(state_dim, action_dim).to(self.device)
        self.target_net = DQN(state_dim, action_dim).to(self.device)
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.target_net.eval()

        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=LEARNING_RATE)
        self.memory = deque(maxlen=100000)

        self.steps_done = 0
        self.episode_rewards = []
        self.episode_collected = []

    def select_action(self, state):
        """选择动作（添加批次维度）"""
        with torch.no_grad():
            state = state.unsqueeze(0)  # 添加批次维度
            q_values = self.policy_net(state)
            return q_values.argmax().item()

    def store_transition(self, *args):
        self.memory.append(Transition(*args))

    def optimize_model(self):
        if len(self.memory) < BATCH_SIZE:
            return

        transitions = random.sample(self.memory, BATCH_SIZE)
        batch = Transition(*zip(*transitions))

        non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)), device=self.device,
                                      dtype=torch.bool)
        non_final_next_states = torch.stack([s for s in batch.next_state if s is not None])

        state_batch = torch.stack(batch.state)
        action_batch = torch.tensor(batch.action, device=self.device).unsqueeze(1)
        reward_batch = torch.tensor(batch.reward, device=self.device).unsqueeze(1)

        state_action_values = self.policy_net(state_batch).gather(1, action_batch)

        next_state_values = torch.zeros(BATCH_SIZE, device=self.device)
        next_state_values[non_final_mask] = self.target_net(non_final_next_states).max(1)[0].detach()

        expected_state_action_values = reward_batch + (GAMMA * next_state_values)

        loss = nn.MSELoss()(state_action_values, expected_state_action_values)

        self.optimizer.zero_grad()
        loss.backward()
        for param in self.policy_net.parameters():
            param.grad.data.clamp_(-1, 1)
        self.optimizer.step()

        return loss.item()


def main():
    env = UAVEnv()
    state_dim = len(env.reset())
    agent = CRLAgent(state_dim, ACTION_SPACE)

    print(f"Using device: {agent.device}")

    for episode in range(MAX_EPISODES):
        state = env.reset()
        state = torch.from_numpy(state).float().to(agent.device)
        total_reward = 0

        for step in range(EPISODE_LENGTH):
            action = agent.select_action(state)
            next_state, reward, done, info = env.step(action)
            next_state = torch.from_numpy(next_state).float().to(agent.device) if not done else None

            agent.store_transition(state, action, next_state, reward)
            loss = agent.optimize_model()

            total_reward += reward
            state = next_state

            if done:
                break

        agent.episode_rewards.append(total_reward)
        agent.episode_collected.append(info['collected'])

        if episode % 10 == 0:
            print(f"Episode: {episode} | Reward: {total_reward:.2f} | Collected: {info['collected']}/{ACTIVE_USERS}")

        # 每隔一定episode切换用户集合
        if (episode + 1) % SWITCH_EVERY == 0:
            print(f"Switching to new user set at episode {episode + 1}")
            env = UAVEnv()

    # 绘制结果
    plt.figure(figsize=(12, 5))
    plt.subplot(1, 2, 1)
    plt.plot(agent.episode_rewards)
    plt.title('Training Rewards')
    plt.xlabel('Episode')
    plt.ylabel('Reward')

    plt.subplot(1, 2, 2)
    plt.plot(agent.episode_collected)
    plt.title('Collected Tasks')
    plt.xlabel('Episode')
    plt.ylabel('Count')
    plt.tight_layout()
    plt.savefig('training_results.png')
    plt.show()

    # 保存模型
    torch.save(agent.policy_net.state_dict(), 'crl_uav.pth')


if __name__ == "__main__":
    main()