import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import random
import matplotlib.pyplot as plt
from collections import deque, namedtuple
import time

# Check if GPU is available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# Environment parameters
AREA_SIZE = 100  # 100m x 100m area
NUM_TOTAL_USERS = 10  # Total number of fixed users
NUM_ACTIVE_USERS = 8  # Number of active users in each episode
UAV_SPEED = 5  # m/s
UAV_COLLECTION_RADIUS = 5  # m
UAV_ENERGY_PER_METER = 0.1
MAX_STEPS_PER_EPISODE = 100

# CRL parameters
MEMORY_SIZE = 50000
BATCH_SIZE = 64
GAMMA = 0.99
EPSILON_START = 1.0
EPSILON_END = 0.01
EPSILON_DECAY = 0.995
TARGET_UPDATE = 10
LEARNING_RATE = 0.001
TASK_SWITCH_EPISODES = 200  # Switch tasks every 200 episodes
NUM_EPISODES = 2000

# Experience replay memory
Experience = namedtuple('Experience', ('state', 'action', 'reward', 'next_state', 'done'))


class TaskAwareReplayMemory:
    def __init__(self, capacity):
        self.memory = {}  # Dictionary to store experiences by task
        self.capacity = capacity
        self.current_task = 0

    def push(self, state, action, reward, next_state, done, task_id=None):
        if task_id is None:
            task_id = self.current_task

        if task_id not in self.memory:
            self.memory[task_id] = deque(maxlen=self.capacity)

        self.memory[task_id].append(Experience(state, action, reward, next_state, done))

    def sample(self, batch_size):
        if self.current_task not in self.memory or len(self.memory[self.current_task]) < batch_size:
            return []

        return random.sample(self.memory[self.current_task], batch_size)

    def set_task(self, task_id):
        self.current_task = task_id

    def __len__(self):
        if self.current_task not in self.memory:
            return 0
        return len(self.memory[self.current_task])


# DQN network
class DQN(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(DQN, self).__init__()
        self.fc1 = nn.Linear(state_dim, 128)
        self.fc2 = nn.Linear(128, 128)
        self.fc3 = nn.Linear(128, action_dim)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        return self.fc3(x)


# UAV Environment
class UAVEnvironment:
    def __init__(self):
        # Generate fixed positions for all users
        self.all_user_positions = np.random.uniform(0, AREA_SIZE, (NUM_TOTAL_USERS, 2))
        self.active_user_indices = None
        self.active_users = None
        self.task_sizes = None
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2])  # Start at center
        self.collected_tasks = []
        self.step_count = 0
        self.task_id = 0
        self.select_active_users()

    def select_active_users(self):
        self.active_user_indices = np.random.choice(NUM_TOTAL_USERS, NUM_ACTIVE_USERS, replace=False)
        self.active_users = self.all_user_positions[self.active_user_indices]
        # Generate random task sizes (1-10 units)
        self.task_sizes = np.random.uniform(1, 10, NUM_ACTIVE_USERS)
        self.collected_tasks = [False] * NUM_ACTIVE_USERS

    def switch_task(self):
        self.task_id += 1
        self.select_active_users()

    def reset(self):
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2])
        self.step_count = 0
        self.collected_tasks = [False] * NUM_ACTIVE_USERS
        return self._get_state()

    def step(self, action):
        # Actions: 0-3 (Up, Right, Down, Left)
        move_map = {
            0: np.array([0, UAV_SPEED]),  # Up
            1: np.array([UAV_SPEED, 0]),  # Right
            2: np.array([0, -UAV_SPEED]),  # Down
            3: np.array([-UAV_SPEED, 0])  # Left
        }

        # Move UAV
        move = move_map[action]
        prev_position = self.uav_position.copy()
        self.uav_position = np.clip(self.uav_position + move, 0, AREA_SIZE)

        # Calculate movement distance
        distance_moved = np.linalg.norm(self.uav_position - prev_position)
        energy_consumed = distance_moved * UAV_ENERGY_PER_METER

        # Check for task collection
        tasks_before = sum(self.collected_tasks)
        for i, user_pos in enumerate(self.active_users):
            if not self.collected_tasks[i]:
                distance = np.linalg.norm(self.uav_position - user_pos)
                if distance <= UAV_COLLECTION_RADIUS:
                    self.collected_tasks[i] = True

        # Calculate reward components
        tasks_completed = sum(self.collected_tasks)
        new_tasks_collected = tasks_completed - tasks_before
        completion_ratio = tasks_completed / NUM_ACTIVE_USERS

        # Reward: task completion reward - energy cost - time penalty
        reward = 10 * completion_ratio - energy_consumed - 0.1

        # Bonus for new collections
        if new_tasks_collected > 0:
            reward += 5 * new_tasks_collected

        # Completion bonus
        if all(self.collected_tasks):
            reward += 20

        # Check if episode is done
        self.step_count += 1
        done = False
        if self.step_count >= MAX_STEPS_PER_EPISODE or all(self.collected_tasks):
            done = True

        return self._get_state(), reward, done, {"tasks_completed": tasks_completed}

    def _get_state(self):
        # State: UAV position, active users positions, collection status
        state = np.zeros(2 + NUM_ACTIVE_USERS * 3)
        state[0:2] = self.uav_position / AREA_SIZE  # Normalize position

        for i in range(NUM_ACTIVE_USERS):
            idx = 2 + i * 3
            # Normalized user position
            state[idx:idx + 2] = self.active_users[i] / AREA_SIZE
            # Collection status
            state[idx + 2] = float(self.collected_tasks[i])

        return state


# CRL Agent
class CRLAgent:
    def __init__(self, state_dim, action_dim):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.policy_net = DQN(state_dim, action_dim).to(device)
        self.target_net = DQN(state_dim, action_dim).to(device)
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.target_net.eval()

        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=LEARNING_RATE)
        self.memory = TaskAwareReplayMemory(MEMORY_SIZE)
        self.epsilon = EPSILON_START

        # Task-specific parameters
        self.task_models = {}  # Store snapshots for each task

    def select_action(self, state):
        if random.random() < self.epsilon:
            return random.randint(0, self.action_dim - 1)

        with torch.no_grad():
            state_tensor = torch.FloatTensor(state).unsqueeze(0).to(device)
            q_values = self.policy_net(state_tensor)
            return q_values.argmax().item()

    def optimize_model(self):
        if len(self.memory) < BATCH_SIZE:
            return None

        experiences = self.memory.sample(BATCH_SIZE)
        if not experiences:
            return None

        # Transpose the batch
        batch = Experience(*zip(*experiences))

        # Convert to tensors
        state_batch = torch.FloatTensor(batch.state).to(device)
        action_batch = torch.LongTensor(batch.action).unsqueeze(1).to(device)
        reward_batch = torch.FloatTensor(batch.reward).to(device)

        # Handle done flag for non-final states
        non_final_mask = torch.BoolTensor([not done for done in batch.done]).to(device)
        non_final_next_states = torch.FloatTensor([s for s, d in zip(batch.next_state, batch.done) if not d]).to(device)

        # Compute Q(s_t, a)
        state_action_values = self.policy_net(state_batch).gather(1, action_batch)

        # Compute V(s_{t+1})
        next_state_values = torch.zeros(BATCH_SIZE, device=device)
        if non_final_next_states.size(0) > 0:
            with torch.no_grad():
                next_state_values[non_final_mask] = self.target_net(non_final_next_states).max(1)[0]

        # Compute the expected Q values
        expected_state_action_values = (next_state_values * GAMMA) + reward_batch

        # Compute loss
        loss = nn.MSELoss()(state_action_values, expected_state_action_values.unsqueeze(1))

        # Optimize the model
        self.optimizer.zero_grad()
        loss.backward()
        # Clip gradients
        for param in self.policy_net.parameters():
            param.grad.data.clamp_(-1, 1)
        self.optimizer.step()

        return loss.item()

    def update_epsilon(self):
        self.epsilon = max(EPSILON_END, self.epsilon * EPSILON_DECAY)

    def save_task_model(self, task_id):
        """Save the current model for a specific task"""
        self.task_models[task_id] = {
            'state_dict': self.policy_net.state_dict().copy(),
        }

    def adapt_to_new_task(self, task_id):
        """Adapt to a new task using knowledge from previous tasks"""
        if task_id in self.task_models:
            # If we've seen this task before, load its model
            self.policy_net.load_state_dict(self.task_models[task_id]['state_dict'])
        else:
            # For a new task, we use the current model (transfer learning)
            pass

        # Update task in memory
        self.memory.set_task(task_id)


def train_crl():
    env = UAVEnvironment()
    state_dim = 2 + NUM_ACTIVE_USERS * 3
    action_dim = 4  # Up, Right, Down, Left

    agent = CRLAgent(state_dim, action_dim)

    rewards_history = []
    tasks_completed_history = []
    loss_history = []

    for episode in range(NUM_EPISODES):
        # Check if we should switch tasks
        if episode > 0 and episode % TASK_SWITCH_EPISODES == 0:
            # Save the model for the current task before switching
            agent.save_task_model(env.task_id)

            # Switch to a new task
            env.switch_task()

            # Adapt the agent to the new task
            agent.adapt_to_new_task(env.task_id)

            print(f"Switched to task {env.task_id} at episode {episode}")

        # Reset environment
        state = env.reset()
        total_reward = 0
        done = False

        while not done:
            action = agent.select_action(state)
            next_state, reward, done, info = env.step(action)

            # Store in memory
            agent.memory.push(state, action, reward, next_state, done)

            # Update state and reward
            state = next_state
            total_reward += reward

            # Train the model
            loss = agent.optimize_model()
            if loss is not None:
                loss_history.append(loss)

        # Update target network
        if episode % TARGET_UPDATE == 0:
            agent.target_net.load_state_dict(agent.policy_net.state_dict())

        # Update exploration rate
        agent.update_epsilon()

        # Track rewards and completed tasks
        rewards_history.append(total_reward)
        tasks_completed_history.append(info["tasks_completed"])

        # Print progress
        if episode % 10 == 0:
            avg_reward = np.mean(rewards_history[-10:])
            avg_tasks = np.mean(tasks_completed_history[-10:])
            print(
                f"Episode: {episode}, Avg Reward: {avg_reward:.2f}, Avg Tasks: {avg_tasks:.2f}, Tasks Completed: {info['tasks_completed']}/{NUM_ACTIVE_USERS}")

    # Save final model
    torch.save(agent.policy_net.state_dict(), "uav_crl_model.pth")

    # Plot results
    plot_results(rewards_history, tasks_completed_history, loss_history)

    return agent


def plot_results(rewards, tasks, losses):
    plt.figure(figsize=(15, 5))

    plt.subplot(1, 2, 1)
    plt.plot(rewards)
    plt.title('Rewards per Episode')
    plt.xlabel('Episode')
    plt.ylabel('Reward')

    plt.subplot(1, 2, 2)
    plt.plot(tasks)
    plt.title('Tasks Completed per Episode')
    plt.xlabel('Episode')
    plt.ylabel('Tasks Completed')

    plt.tight_layout()
    plt.savefig('uav_crl_training_results.png')
    plt.show()


def visualize_trajectory(agent, env):
    """Visualize the UAV trajectory in the environment"""
    state = env.reset()
    done = False

    trajectory = [env.uav_position.copy()]

    while not done:
        action = agent.select_action(state)
        state, _, done, info = env.step(action)
        trajectory.append(env.uav_position.copy())
        if done:
            print(f"Mission complete: Collected {info['tasks_completed']}/{NUM_ACTIVE_USERS} tasks")

    trajectory = np.array(trajectory)

    # Plot the environment
    plt.figure(figsize=(10, 10))
    plt.xlim(0, AREA_SIZE)
    plt.ylim(0, AREA_SIZE)

    # Plot users
    for i, pos in enumerate(env.active_users):
        color = 'green' if env.collected_tasks[i] else 'red'
        plt.scatter(pos[0], pos[1], color=color, s=100)
        plt.text(pos[0] + 1, pos[1] + 1, f"User {i}")

    # Plot trajectory
    plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.6)
    plt.scatter(trajectory[0, 0], trajectory[0, 1], color='blue', s=100, marker='o', label='Start')
    plt.scatter(trajectory[-1, 0], trajectory[-1, 1], color='purple', s=100, marker='x', label='End')

    plt.title('UAV Trajectory')
    plt.legend()
    plt.grid(True)
    plt.savefig('uav_trajectory.png')
    plt.show()


if __name__ == "__main__":
    start_time = time.time()
    agent = train_crl()
    end_time = time.time()
    print(f"Training time: {end_time - start_time:.2f} seconds")

    # Visualize the final policy
    env = UAVEnvironment()
    visualize_trajectory(agent, env)
