import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
import matplotlib.pyplot as plt
import random
from collections import deque, namedtuple
import copy
import time
import gym
from gym import spaces
import matplotlib.animation as animation

# Set random seeds for reproducibility
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)

# Define experience tuple structure
Experience = namedtuple('Experience', ['state', 'action', 'reward', 'next_state', 'done'])


# Experience Replay Buffer
class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)

    def add(self, state, action, reward, next_state, done):
        experience = Experience(state, action, reward, next_state, done)
        self.buffer.append(experience)

    def sample(self, batch_size):
        experiences = random.sample(self.buffer, batch_size)
        states = torch.FloatTensor(np.vstack([e.state for e in experiences]))
        actions = torch.FloatTensor(np.vstack([e.action for e in experiences]))
        rewards = torch.FloatTensor(np.vstack([e.reward for e in experiences]))
        next_states = torch.FloatTensor(np.vstack([e.next_state for e in experiences]))
        dones = torch.FloatTensor(np.vstack([e.done for e in experiences]).astype(np.uint8))

        return states, actions, rewards, next_states, dones

    def __len__(self):
        return len(self.buffer)


# Actor Network
class Actor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action, hidden_dim=256):
        super(Actor, self).__init__()
        self.layer1 = nn.Linear(state_dim, hidden_dim)
        self.layer2 = nn.Linear(hidden_dim, hidden_dim)
        self.layer3 = nn.Linear(hidden_dim, action_dim)
        self.max_action = max_action

    def forward(self, state):
        x = F.relu(self.layer1(state))
        x = F.relu(self.layer2(x))
        x = torch.tanh(self.layer3(x))  # Output in [-1, 1]
        return x * self.max_action  # Scale to actual action range


# Critic Network
class Critic(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dim=256):
        super(Critic, self).__init__()
        self.layer1 = nn.Linear(state_dim + action_dim, hidden_dim)
        self.layer2 = nn.Linear(hidden_dim, hidden_dim)
        self.layer3 = nn.Linear(hidden_dim, 1)

    def forward(self, state, action):
        x = torch.cat([state, action], dim=1)
        x = F.relu(self.layer1(x))
        x = F.relu(self.layer2(x))
        x = self.layer3(x)
        return x


# UAV Environment
class UAVEnv(gym.Env):
    def __init__(self, area_size=100, num_users=10, active_users=8,
                 v_max=5, d_collect=5, t_max=200, dt=1.0,
                 fixed_start=True, start_position=None):
        super(UAVEnv, self).__init__()

        # Environment parameters
        self.area_size = area_size
        self.num_users = num_users
        self.active_users = active_users
        self.v_max = v_max
        self.d_collect = d_collect
        self.t_max = t_max
        self.dt = dt
        self.fixed_start = fixed_start
        self.start_position = start_position if start_position is not None else np.array([area_size / 2, area_size / 2])

        # Action and observation spaces
        self.action_space = spaces.Box(low=-v_max, high=v_max, shape=(2,), dtype=np.float32)

        # State: [uav_x, uav_y, user1_x, user1_y, ..., user10_x, user10_y, active1, ..., active10]
        self.observation_space = spaces.Box(
            low=np.array([0, 0] + [0, 0] * num_users + [0] * num_users),
            high=np.array([area_size, area_size] + [area_size, area_size] * num_users + [1] * num_users),
            dtype=np.float32
        )

        # Initialize fixed user positions
        self.user_positions = np.random.uniform(0, area_size, (num_users, 2))

        # Reset to initialize state
        self.reset()

    def _activate_random_users(self):
        # Randomly select active_users (default 8) users to activate
        active = np.zeros(self.num_users)
        active_indices = np.random.choice(self.num_users, self.active_users, replace=False)
        active[active_indices] = 1
        return active

    def reset(self):
        # Set UAV starting position (center or random)
        if self.fixed_start:
            self.uav_position = self.start_position.copy()
        else:
            self.uav_position = np.random.uniform(0, self.area_size, 2)

        # Activate random users
        self.active_status = self._activate_random_users()

        # Reset episode state
        self.steps = 0
        self.collected_tasks = 0
        self.prev_collected_tasks = 0

        return self._get_state()

    def _get_state(self):
        # Concatenate UAV position, all user positions, and active status
        state = np.concatenate([
            self.uav_position,
            self.user_positions.flatten(),
            self.active_status
        ])
        return state

    def _calculate_reward(self, new_collected_tasks):
        # Reward parameters
        lambda_task = 1.0
        lambda_energy = 0.1
        reward_per_task = 100
        penalty_per_step = -1

        # Calculate reward components
        task_reward = lambda_task * new_collected_tasks * reward_per_task
        energy_penalty = lambda_energy * penalty_per_step

        return task_reward + energy_penalty

    def step(self, action):
        self.steps += 1

        # Update UAV position
        next_position = self.uav_position + action * self.dt

        # Boundary handling - clamp to area boundaries
        next_position = np.clip(next_position, 0, self.area_size)
        self.uav_position = next_position

        # Check for task collection
        self.prev_collected_tasks = self.collected_tasks
        for i in range(self.num_users):
            if self.active_status[i] == 1:  # If user is active
                # Calculate distance to user
                distance = np.linalg.norm(self.uav_position - self.user_positions[i])

                # Collect task if within range
                if distance <= self.d_collect:
                    self.active_status[i] = 0  # Deactivate user (task collected)
                    self.collected_tasks += 1

        # Calculate new tasks collected in this step
        new_collected_tasks = self.collected_tasks - self.prev_collected_tasks

        # Calculate reward
        reward = self._calculate_reward(new_collected_tasks)

        # Check if episode is done
        all_tasks_collected = np.sum(self.active_status) == 0
        timeout = self.steps >= self.t_max

        done = all_tasks_collected or timeout

        # Additional info
        info = {
            'collected_tasks': self.collected_tasks,
            'active_tasks_remaining': np.sum(self.active_status),
            'all_tasks_collected': all_tasks_collected,
            'timeout': timeout
        }

        return self._get_state(), reward, done, info

    def switch_environment(self):
        """Switch the environment by activating a new set of users"""
        self.active_status = self._activate_random_users()
        return self._get_state()

    def render(self, mode='human', trajectory=None):
        """Render the environment (for visualization)"""
        plt.figure(figsize=(10, 10))
        plt.xlim(0, self.area_size)
        plt.ylim(0, self.area_size)

        # Plot inactive users
        for i in range(self.num_users):
            if self.active_status[i] == 0:
                plt.scatter(self.user_positions[i, 0], self.user_positions[i, 1],
                            color='gray', s=100, alpha=0.5)

        # Plot active users
        for i in range(self.num_users):
            if self.active_status[i] == 1:
                plt.scatter(self.user_positions[i, 0], self.user_positions[i, 1],
                            color='red', s=100)

        # Plot UAV
        plt.scatter(self.uav_position[0], self.uav_position[1],
                    color='blue', s=200, marker='*')

        # Plot trajectory if provided
        if trajectory is not None:
            traj = np.array(trajectory)
            plt.plot(traj[:, 0], traj[:, 1], 'b--')

        plt.grid(True)
        plt.title('UAV Environment')
        plt.xlabel('X position (m)')
        plt.ylabel('Y position (m)')

        if mode == 'human':
            plt.show()
        else:
            return plt.gcf()


# CRL Agent with DDPG and EWC
class CRLAgent:
    def __init__(self, state_dim, action_dim, max_action,
                 buffer_size=100000, batch_size=64, gamma=0.99,
                 tau=0.001, actor_lr=1e-4, critic_lr=1e-3,
                 ewc_lambda=100.0, device='cpu'):

        self.device = device
        self.gamma = gamma
        self.tau = tau
        self.max_action = max_action
        self.batch_size = batch_size
        self.ewc_lambda = ewc_lambda

        # Actor and Critic networks
        self.actor = Actor(state_dim, action_dim, max_action).to(device)
        self.actor_target = copy.deepcopy(self.actor)
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=actor_lr)

        self.critic = Critic(state_dim, action_dim).to(device)
        self.critic_target = copy.deepcopy(self.critic)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=critic_lr)

        # Experience Replay Buffer
        self.replay_buffer = ReplayBuffer(buffer_size)

        # EWC components
        self.fisher_dict = {}  # Store Fisher information matrices
        self.optim_param_dict = {}  # Store optimal parameters
        self.task_count = 0  # Counter for different tasks/environments

    def select_action(self, state, add_noise=True):
        """Select action from policy with optional exploration noise"""
        state = torch.FloatTensor(state.reshape(1, -1)).to(self.device)

        with torch.no_grad():
            action = self.actor(state).cpu().data.numpy().flatten()

        # Add exploration noise if requested
        if add_noise:
            noise = np.random.normal(0, self.max_action * 0.1, size=action.shape)
            action = np.clip(action + noise, -self.max_action, self.max_action)

        return action

    def store_transition(self, state, action, reward, next_state, done):
        """Store experience in replay buffer"""
        self.replay_buffer.add(state, action, reward, next_state, done)

    def compute_fisher_information(self, env, num_samples=1000):
        """Compute diagonal Fisher information matrix for current task"""
        # Initialize Fisher information dictionary
        fisher_dict = {n: torch.zeros_like(p, device=self.device)
                       for n, p in self.actor.named_parameters()}

        # Sample states from environment for Fisher computation
        states = []
        for _ in range(num_samples):
            state = env.reset()
            states.append(state)

        states = torch.FloatTensor(np.array(states)).to(self.device)

        # Compute log probabilities for sampled states
        for state in states:
            state = state.unsqueeze(0)  # Add batch dimension
            self.actor.zero_grad()

            # For continuous actions, we use a Normal distribution approximation
            action_mean = self.actor(state)
            action_std = torch.ones_like(action_mean) * 0.1  # Fixed standard deviation

            # Create normal distribution
            normal = Normal(action_mean, action_std)

            # Sample action and compute log probability
            action = normal.sample()
            log_prob = normal.log_prob(action).sum(dim=-1)

            # Compute gradient of log probability
            log_prob.backward()

            # Accumulate squared gradients (diagonal Fisher approximation)
            for n, p in self.actor.named_parameters():
                if p.grad is not None:
                    fisher_dict[n] += (p.grad ** 2) / num_samples

        return fisher_dict

    def save_optimal_params(self):
        """Save current parameters as optimal for the current task"""
        optim_params = {}
        for n, p in self.actor.named_parameters():
            optim_params[n] = p.data.clone()
        return optim_params

    def register_environment_switch(self, env):
        """Called when environment switches to compute and store Fisher information"""
        print(f"Registering environment switch for task {self.task_count}")

        # Compute Fisher information for current task
        fisher_dict = self.compute_fisher_information(env)
        self.fisher_dict[self.task_count] = fisher_dict

        # Save optimal parameters for current task
        self.optim_param_dict[self.task_count] = self.save_optimal_params()

        # Increment task counter
        self.task_count += 1

    def compute_ewc_loss(self):
        """Compute EWC regularization loss"""
        if len(self.fisher_dict) == 0:
            return torch.tensor(0.0).to(self.device)  # No previous tasks yet

        ewc_loss = torch.tensor(0.0).to(self.device)

        # Compute EWC loss for all previous tasks
        for task_id in range(self.task_count):
            for n, p in self.actor.named_parameters():
                # Get Fisher information and optimal parameters for this task and parameter
                fisher = self.fisher_dict[task_id][n]
                optim_param = self.optim_param_dict[task_id][n]

                # Add EWC penalty: λ/2 * F * (θ - θ*)^2
                ewc_loss += (fisher * (p - optim_param).pow(2)).sum()

        return 0.5 * self.ewc_lambda * ewc_loss

    def update(self):
        """Update policy and value networks"""
        # Skip update if not enough samples in buffer
        if len(self.replay_buffer) < self.batch_size:
            return

        # Sample batch of experiences
        states, actions, rewards, next_states, dones = self.replay_buffer.sample(self.batch_size)
        states = states.to(self.device)
        actions = actions.to(self.device)
        rewards = rewards.to(self.device)
        next_states = next_states.to(self.device)
        dones = dones.to(self.device)

        # ---------- Update Critic ----------
        # Compute target Q value
        with torch.no_grad():
            next_actions = self.actor_target(next_states)
            target_q = self.critic_target(next_states, next_actions)
            target_q = rewards + (1 - dones) * self.gamma * target_q

        # Compute current Q value
        current_q = self.critic(states, actions)

        # Compute critic loss
        critic_loss = F.mse_loss(current_q, target_q)

        # Update critic
        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        self.critic_optimizer.step()

        # ---------- Update Actor ----------
        # Compute actor loss
        actor_loss = -self.critic(states, self.actor(states)).mean()

        # Add EWC regularization loss for continual learning
        ewc_loss = self.compute_ewc_loss()
        total_actor_loss = actor_loss + ewc_loss

        # Update actor
        self.actor_optimizer.zero_grad()
        total_actor_loss.backward()
        self.actor_optimizer.step()

        # ---------- Update Target Networks ----------
        # Soft update of target networks
        for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)

        for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)

        return {
            'actor_loss': actor_loss.item(),
            'critic_loss': critic_loss.item(),
            'ewc_loss': ewc_loss.item() if ewc_loss != 0 else 0
        }

    def save(self, filename):
        """Save model parameters"""
        torch.save({
            'actor': self.actor.state_dict(),
            'actor_target': self.actor_target.state_dict(),
            'critic': self.critic.state_dict(),
            'critic_target': self.critic_target.state_dict(),
            'actor_optimizer': self.actor_optimizer.state_dict(),
            'critic_optimizer': self.critic_optimizer.state_dict(),
            'fisher_dict': self.fisher_dict,
            'optim_param_dict': self.optim_param_dict,
            'task_count': self.task_count
        }, filename)

    def load(self, filename):
        """Load model parameters"""
        checkpoint = torch.load(filename)
        self.actor.load_state_dict(checkpoint['actor'])
        self.actor_target.load_state_dict(checkpoint['actor_target'])
        self.critic.load_state_dict(checkpoint['critic'])
        self.critic_target.load_state_dict(checkpoint['critic_target'])
        self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer'])
        self.critic_optimizer.load_state_dict(checkpoint['critic_optimizer'])
        self.fisher_dict = checkpoint['fisher_dict']
        self.optim_param_dict = checkpoint['optim_param_dict']
        self.task_count = checkpoint['task_count']


# Evaluation function
def evaluate_policy(agent, env, eval_episodes=10, render=False):
    """Evaluate the policy over multiple episodes and return metrics"""
    success_count = 0
    rewards = []
    episode_lengths = []
    trajectories = []

    for ep in range(eval_episodes):
        state = env.reset()
        episode_reward = 0
        trajectory = [env.uav_position.copy()]

        for t in range(env.t_max):
            # Select action without exploration noise
            action = agent.select_action(state, add_noise=False)
            next_state, reward, done, info = env.step(action)

            trajectory.append(env.uav_position.copy())
            episode_reward += reward
            state = next_state

            if done:
                if info['all_tasks_collected']:
                    success_count += 1
                break

        rewards.append(episode_reward)
        episode_lengths.append(t + 1)
        trajectories.append(trajectory)

        if render and ep == 0:  # Render only the first episode
            env.render(trajectory=trajectory)

    # Calculate metrics
    success_rate = success_count / eval_episodes
    avg_reward = np.mean(rewards)
    avg_episode_length = np.mean(episode_lengths)

    return {
        'success_rate': success_rate,
        'avg_reward': avg_reward,
        'avg_episode_length': avg_episode_length,
        'trajectories': trajectories
    }


# Main training function
def train_crl_agent():
    # Hyperparameters
    area_size = 100  # Size of the 2D area (m)
    num_users = 10  # Total number of users
    active_users = 8  # Number of active users per episode
    v_max = 5.0  # Maximum UAV velocity (m/s)
    d_collect = 5.0  # Task collection distance (m)
    t_max = 200  # Maximum episode length (steps)

    # Training hyperparameters
    num_episodes = 1000  # Total number of episodes
    episodes_per_switch = 100  # K: Number of episodes before environment switch
    buffer_size = 100000  # Replay buffer capacity
    batch_size = 64  # Batch size for training
    gamma = 0.99  # Discount factor
    tau = 0.001  # Soft update coefficient
    actor_lr = 3e-4  # Actor learning rate
    critic_lr = 3e-3  # Critic learning rate
    ewc_lambda = 100.0  # EWC regularization strength
    updates_per_step = 1  # Number of gradient updates per step

    # Use GPU if available
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    # Create environment
    env = UAVEnv(
        area_size=area_size,
        num_users=num_users,
        active_users=active_users,
        v_max=v_max,
        d_collect=d_collect,
        t_max=t_max
    )

    # Create agent
    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.shape[0]
    agent = CRLAgent(
        state_dim=state_dim,
        action_dim=action_dim,
        max_action=v_max,
        buffer_size=buffer_size,
        batch_size=batch_size,
        gamma=gamma,
        tau=tau,
        actor_lr=actor_lr,
        critic_lr=critic_lr,
        ewc_lambda=ewc_lambda,
        device=device
    )

    # Lists to store metrics
    episode_rewards = []
    episode_success_rates = []
    episode_lengths = []
    environment_switches = []

    # Training loop
    task_id = 0  # Current environment/task ID

    for episode in range(1, num_episodes + 1):
        # Reset environment at the start of each episode
        state = env.reset()
        episode_reward = 0
        steps = 0

        # Run episode
        while True:
            # Select action with exploration noise
            action = agent.select_action(state, add_noise=True)

            # Take action in environment
            next_state, reward, done, info = env.step(action)

            # Store transition in replay buffer
            agent.store_transition(state, action, reward, next_state, done)

            # Update agent
            for _ in range(updates_per_step):
                agent.update()

            # Update state and counters
            state = next_state
            episode_reward += reward
            steps += 1

            if done:
                break

        # Store metrics
        episode_rewards.append(episode_reward)
        episode_success_rates.append(1 if info['all_tasks_collected'] else 0)
        episode_lengths.append(steps)

        # Print progress
        if episode % 10 == 0:
            print(f"Episode {episode}/{num_episodes}, Reward: {episode_reward:.2f}, " +
                  f"Steps: {steps}, Tasks: {info['collected_tasks']}/{active_users}, " +
                  f"Success: {'Yes' if info['all_tasks_collected'] else 'No'}")

        # Check for environment switch
        if episode % episodes_per_switch == 0:
            # Register current environment before switching
            agent.register_environment_switch(env)

            # Switch to new environment
            env.switch_environment()
            environment_switches.append(episode)

            # Evaluate on both old and new environment
            print(f"\nEnvironment switch at episode {episode}")

            # Evaluate on current (new) environment
            metrics = evaluate_policy(agent, env, eval_episodes=10)
            print(f"New Environment - Success Rate: {metrics['success_rate']:.2f}, " +
                  f"Avg Reward: {metrics['avg_reward']:.2f}")

            # Create a temporary environment with the previous configuration for evaluation
            temp_env = copy.deepcopy(env)
            temp_env.active_status = agent.optim_param_dict[task_id].get('active_status',
                                                                         temp_env._activate_random_users())

            metrics_old = evaluate_policy(agent, temp_env, eval_episodes=10)
            print(f"Old Environment - Success Rate: {metrics_old['success_rate']:.2f}, " +
                  f"Avg Reward: {metrics_old['avg_reward']:.2f}\n")

            task_id += 1

    # Final evaluation
    print("\nFinal Evaluation")
    final_metrics = evaluate_policy(agent, env, eval_episodes=20, render=True)
    print(f"Success Rate: {final_metrics['success_rate']:.2f}, " +
          f"Avg Reward: {final_metrics['avg_reward']:.2f}, " +
          f"Avg Episode Length: {final_metrics['avg_episode_length']:.2f}")

    # Save the trained agent
    agent.save("crl_uav_agent.pth")

    # Plot training curves
    plot_training_curves(episode_rewards, episode_success_rates, episode_lengths, environment_switches)

    # Create animation of final trajectory
    create_trajectory_animation(env, final_metrics['trajectories'][0])

    return agent, env, final_metrics


def plot_training_curves(rewards, success_rates, lengths, switches):
    """Plot training progress curves"""
    fig, axes = plt.subplots(3, 1, figsize=(12, 10), sharex=True)

    # Plot episode rewards
    axes[0].plot(rewards)
    axes[0].set_ylabel('Episode Reward')
    for switch in switches:
        axes[0].axvline(x=switch, color='r', linestyle='--', alpha=0.5)

    # Plot success rates (using moving average)
    window_size = 20
    if len(success_rates) >= window_size:
        moving_avg = [np.mean(success_rates[max(0, i - window_size):i + 1])
                      for i in range(len(success_rates))]
        axes[1].plot(moving_avg)
    else:
        axes[1].plot(success_rates)
    axes[1].set_ylabel('Success Rate (Moving Avg)')
    for switch in switches:
        axes[1].axvline(x=switch, color='r', linestyle='--', alpha=0.5)

    # Plot episode lengths
    axes[2].plot(lengths)
    axes[2].set_xlabel('Episode')
    axes[2].set_ylabel('Episode Length')
    for switch in switches:
        axes[2].axvline(x=switch, color='r', linestyle='--', alpha=0.5)

    plt.tight_layout()
    plt.savefig('training_curves.png')
    plt.show()


def create_trajectory_animation(env, trajectory):
    """Create animation of UAV trajectory"""
    fig, ax = plt.subplots(figsize=(10, 10))

    def init():
        ax.clear()
        ax.set_xlim(0, env.area_size)
        ax.set_ylim(0, env.area_size)
        ax.grid(True)
        ax.set_title('UAV Trajectory')
        ax.set_xlabel('X position (m)')
        ax.set_ylabel('Y position (m)')

        # Plot inactive users
        for i in range(env.num_users):
            if env.active_status[i] == 0:
                ax.scatter(env.user_positions[i, 0], env.user_positions[i, 1],
                           color='gray', s=100, alpha=0.5)

        # Plot active users
        for i in range(env.num_users):
            if env.active_status[i] == 1:
                ax.scatter(env.user_positions[i, 0], env.user_positions[i, 1],
                           color='red', s=100)

        return []

    def animate(i):
        if i > 0:
            traj = np.array(trajectory[:i + 1])
            line, = ax.plot(traj[:, 0], traj[:, 1], 'b-')
            point = ax.scatter(traj[-1, 0], traj[-1, 1], color='blue', s=200, marker='*')
            return [line, point]
        return []

    anim = animation.FuncAnimation(fig, animate, init_func=init,
                                   frames=len(trajectory), interval=200, blit=True)

    # Save animation
    anim.save('uav_trajectory.gif', writer='pillow')
    plt.close()


if __name__ == "__main__":
    agent, env, metrics = train_crl_agent()
