import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import random
from collections import deque
import matplotlib.pyplot as plt
import time
import copy

# --- Hyperparameters ---
# Environment
GRID_SIZE = 100.0  # m
TOTAL_USERS = 10
ACTIVE_USERS = 8
UAV_SPEED = 20.0  # m/s
ENERGY_COEFF = 0.1  # Energy consumption coefficient (e.g., Joules/meter)
W_DELAY = 0.5  # Weight for delay in the cost function
W_ENERGY = 0.5  # Weight for energy in the cost function

# Training
GAMMA = 0.99  # Discount factor
BATCH_SIZE = 128
LEARNING_RATE = 1e-4
REPLAY_MEMORY_SIZE = 10000
TARGET_UPDATE_FREQ = 20  # Episodes
TOTAL_EPISODES = 600
TASK_CHANGE_EPISODE = 200  # Change task set every 200 episodes

# Epsilon-Greedy
EPSILON_START = 1.0
EPSILON_END = 0.05
EPSILON_DECAY = 0.995

# CRL (EWC)
EWC_LAMBDA = 400.0  # Coefficient for EWC penalty, a critical hyperparameter

# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")


class UAVEnv:
    """UAV Mobile Edge Computing Environment."""

    def __init__(self):
        self.grid_size = GRID_SIZE
        # Generate 10 fixed base user locations
        self.base_user_locations = np.random.rand(TOTAL_USERS, 2) * self.grid_size
        self.active_task_locations = None
        self.task_mask = None
        self.uav_pos = None
        self.current_task_indices = None
        self.change_task_set()

    def change_task_set(self):
        """Randomly select ACTIVE_USERS from TOTAL_USERS to form a new task."""
        self.current_task_indices = np.random.choice(TOTAL_USERS, ACTIVE_USERS, replace=False)
        self.active_task_locations = self.base_user_locations[self.current_task_indices]
        print("\n" + "=" * 50)
        print(f"New task environment generated. Active user indices: {self.current_task_indices}")
        print("=" * 50 + "\n")

    def reset(self):
        """Resets the environment for a new episode."""
        self.uav_pos = np.array([self.grid_size / 2, self.grid_size / 2])
        self.task_mask = np.ones(ACTIVE_USERS)  # 1 for unvisited, 0 for visited
        return self._get_state()

    def _get_state(self):
        """Returns the current state of the environment."""
        # Normalize UAV position
        uav_pos_normalized = self.uav_pos / self.grid_size
        state = np.concatenate([uav_pos_normalized, self.task_mask])
        return torch.FloatTensor(state).to(device)

    def step(self, action):
        """
        Performs one step in the environment.
        Action is an integer from 0 to ACTIVE_USERS-1.
        """
        if self.task_mask[action] == 0:
            # Penalty for choosing an already visited location
            return self._get_state(), -100.0, True, {"collected": np.sum(1 - self.task_mask)}

        target_pos = self.active_task_locations[action]
        distance = np.linalg.norm(self.uav_pos - target_pos)

        # Update UAV position and task mask
        self.uav_pos = target_pos
        self.task_mask[action] = 0

        # Calculate costs
        delay_cost = distance / UAV_SPEED
        energy_cost = distance * ENERGY_COEFF

        # Reward is the negative of the weighted cost
        reward = - (W_DELAY * delay_cost + W_ENERGY * energy_cost)

        # Check if all tasks are completed
        done = np.all(self.task_mask == 0)

        info = {"collected": np.sum(1 - self.task_mask)}
        return self._get_state(), reward, done, info


class QNetwork(nn.Module):
    """DQN Model for Q-value approximation."""

    def __init__(self, state_size, action_size):
        super(QNetwork, self).__init__()
        self.layer1 = nn.Linear(state_size, 128)
        self.layer2 = nn.Linear(128, 128)
        self.layer3 = nn.Linear(128, action_size)

    def forward(self, x):
        x = F.relu(self.layer1(x))
        x = F.relu(self.layer2(x))
        return self.layer3(x)


class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)

    def push(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        return random.sample(self.buffer, batch_size)

    def __len__(self):
        return len(self.buffer)


class DQNAgent:
    """DQN Agent with EWC for Continuous Reinforcement Learning."""

    def __init__(self, state_size, action_size):
        self.state_size = state_size
        self.action_size = action_size

        self.policy_net = QNetwork(state_size, action_size).to(device)
        self.target_net = QNetwork(state_size, action_size).to(device)
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.target_net.eval()

        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=LEARNING_RATE)
        self.memory = ReplayBuffer(REPLAY_MEMORY_SIZE)

        self.epsilon = EPSILON_START

        # EWC attributes
        self.ewc_task_count = 0
        self.ewc_star_params = {}
        self.ewc_fisher = {}

    def act(self, state):
        """Epsilon-greedy action selection."""
        if random.random() > self.epsilon:
            with torch.no_grad():
                # state.unsqueeze(0) adds a batch dimension
                return self.policy_net(state.unsqueeze(0)).max(1)[1].view(1, 1)
        else:
            return torch.tensor([[random.randrange(self.action_size)]], device=device, dtype=torch.long)

    def _ewc_penalty(self):
        if self.ewc_task_count == 0:
            return 0.0

        penalty = 0.0
        for name, param in self.policy_net.named_parameters():
            if param.requires_grad:
                star_param = self.ewc_star_params[name]
                fisher = self.ewc_fisher[name]
                penalty += (fisher * (param - star_param).pow(2)).sum()
        return penalty

    def learn(self):
        if len(self.memory) < BATCH_SIZE:
            return

        transitions = self.memory.sample(BATCH_SIZE)
        batch = list(zip(*transitions))  # Transpose the batch

        state_batch = torch.cat(batch[0])
        action_batch = torch.cat(batch[1])
        reward_batch = torch.cat(batch[2])
        next_state_batch = torch.cat(batch[3])
        done_batch = torch.cat(batch[4])

        # Get current Q values
        q_values = self.policy_net(state_batch).gather(1, action_batch)

        # Get next Q values from target network
        next_q_values = self.target_net(next_state_batch).max(1)[0].detach()

        # Compute the expected Q values
        expected_q_values = reward_batch + (GAMMA * next_q_values * (1 - done_batch.float()))

        # Compute loss
        loss = F.smooth_l1_loss(q_values, expected_q_values.unsqueeze(1))

        # Add EWC penalty
        ewc_penalty = self._ewc_penalty()
        total_loss = loss + EWC_LAMBDA * ewc_penalty

        # Optimize the model
        self.optimizer.zero_grad()
        total_loss.backward()
        for param in self.policy_net.parameters():
            if param.grad is not None:
                param.grad.data.clamp_(-1, 1)
        self.optimizer.step()

        # Decay epsilon
        if self.epsilon > EPSILON_END:
            self.epsilon *= EPSILON_DECAY

    def _compute_fisher(self):
        """Computes the Fisher Information Matrix for the current task."""
        fisher = {}
        for name, param in self.policy_net.named_parameters():
            if param.requires_grad:
                fisher[name] = torch.zeros_like(param.data)

        self.policy_net.eval()
        # Use a subset of memory to estimate Fisher
        for state, action, _, _, _ in random.sample(self.memory.buffer, min(len(self.memory), 500)):
            self.optimizer.zero_grad()
            q_values = self.policy_net(state.unsqueeze(0))
            log_q_values = F.log_softmax(q_values, dim=1)
            log_likelihood = log_q_values.gather(1, action)
            log_likelihood.backward()

            for name, param in self.policy_net.named_parameters():
                if param.requires_grad:
                    fisher[name] += param.grad.data.pow(2)

        for name in fisher:
            fisher[name] /= min(len(self.memory), 500)

        self.policy_net.train()
        return fisher

    def register_ewc_task(self):
        """To be called after a task is learned. Stores optimal params and computes Fisher matrix."""
        print(f"--- Registering EWC task {self.ewc_task_count + 1} ---")
        # Store optimal parameters for the completed task
        star_params = {}
        for name, param in self.policy_net.named_parameters():
            if param.requires_grad:
                star_params[name] = param.data.clone()
        self.ewc_star_params = star_params

        # Compute and store the Fisher Information Matrix
        self.ewc_fisher = self._compute_fisher()
        self.ewc_task_count += 1
        # Clear replay buffer for the new task to avoid task confusion
        self.memory = ReplayBuffer(REPLAY_MEMORY_SIZE)


if __name__ == '__main__':
    env = UAVEnv()
    state_size = 2 + ACTIVE_USERS  # UAV_x, UAV_y, task_mask
    action_size = ACTIVE_USERS
    agent = DQNAgent(state_size, action_size)

    rewards_history = []
    trajectory_history = []

    start_time = time.time()

    for i_episode in range(1, TOTAL_EPISODES + 1):
        # --- Task Switching Logic ---
        if (i_episode - 1) % TASK_CHANGE_EPISODE == 0 and i_episode > 1:
            agent.register_ewc_task()
            env.change_task_set()

        state = env.reset()
        episode_reward = 0
        episode_trajectory = [env.uav_pos.copy()]

        for t in range(ACTIVE_USERS * 2):  # Max steps to avoid infinite loops
            action = agent.act(state)
            next_state, reward, done, info = env.step(action.item())

            episode_reward += reward
            episode_trajectory.append(env.uav_pos.copy())

            # Store transition
            reward_tensor = torch.tensor([reward], device=device)
            done_tensor = torch.tensor([done], device=device)
            agent.memory.push(state, action, reward_tensor, next_state, done_tensor)

            state = next_state

            # Perform learning step
            agent.learn()

            if done:
                break

        rewards_history.append(episode_reward)
        if i_episode == TOTAL_EPISODES:  # Save last episode's trajectory
            trajectory_history = episode_trajectory

        print(f"Episode {i_episode}/{TOTAL_EPISODES} | Reward: {episode_reward:.2f} | "
              f"Tasks Collected: {info['collected']:.0f} | Epsilon: {agent.epsilon:.3f}")

        if i_episode % TARGET_UPDATE_FREQ == 0:
            agent.target_net.load_state_dict(agent.policy_net.state_dict())

    end_time = time.time()
    print(f"\nTraining finished in {end_time - start_time:.2f} seconds.")

    # --- Plotting Results ---
    # 1. Reward Curve
    plt.figure(figsize=(12, 6))
    plt.plot(rewards_history)
    plt.title('Reward per Episode')
    plt.xlabel('Episode')
    plt.ylabel('Total Reward')
    # Add vertical lines for task changes
    for i in range(TASK_CHANGE_EPISODE, TOTAL_EPISODES, TASK_CHANGE_EPISODE):
        plt.axvline(x=i, color='r', linestyle='--', label=f'Task Change at Ep {i}' if i == TASK_CHANGE_EPISODE else "")
    plt.legend()
    plt.grid(True)
    plt.savefig('reward_curve.png')
    plt.show()

    # 2. UAV Trajectory
    plt.figure(figsize=(8, 8))
    trajectory = np.array(trajectory_history)
    start_point = trajectory[0, :]
    task_locations = env.active_task_locations

    plt.plot(task_locations[:, 0], task_locations[:, 1], 'ro', markersize=10, label='Task Locations')
    plt.plot(start_point[0], start_point[1], 'ks', markersize=12, label='Start (Center)')
    plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-o', label='UAV Trajectory')

    for i, txt in enumerate(range(1, len(trajectory))):
        plt.annotate(txt, (trajectory[i, 0], trajectory[i, 1]), textcoords="offset points", xytext=(0, 5), ha='center')

    plt.title(f'UAV Trajectory for Last Episode (Episode {TOTAL_EPISODES})')
    plt.xlabel('X Coordinate (m)')
    plt.ylabel('Y Coordinate (m)')
    plt.xlim(0, GRID_SIZE)
    plt.ylim(0, GRID_SIZE)
    plt.legend()
    plt.grid(True)
    plt.gca().set_aspect('equal', adjustable='box')
    plt.savefig('trajectory.png')
    plt.show()
