import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.nn.utils as utils
from torchvision import transforms

def gpdc(model, dataloader, epochs, validation_loader=None, clipping_percentile=0.9, noise_scale=0.01, lr=0.001,
         weight_decay=1e-4, patience=5, grad_clip_value=1.0, augment_data=False):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)

    # Data augmentation (if applicable)
    transform = None
    if augment_data:
        transform = transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.RandomRotation(15),
            transforms.RandomResizedCrop(224),
            transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])
    else:
        transform = transforms.Compose([transforms.ToTensor(),
                                       transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])

    # Adam optimizer with L2 regularization (weight decay)
    optimizer = optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)

    # Learning rate scheduler
    scheduler = ReduceLROnPlateau(optimizer, 'min', patience=2, verbose=True)

    # Store historical gradient norms for dynamic clipping threshold
    historical_grad_norms = []

    # Early stopping criteria
    best_loss = float('inf')
    epochs_since_improvement = 0

    for epoch in range(epochs):
        model.train()  # Switch to training mode
        total_loss = 0

        for data, target in dataloader:
            data, target = data.to(device), target.to(device)  # Move to device
            optimizer.zero_grad()

            # Forward pass
            output = model(data)
            loss = F.cross_entropy(output, target)
            loss.backward()

            # Collect gradients
            gradients = [param.grad.data for param in model.parameters() if param.grad is not None]
            norm_gradients = [torch.norm(grad) for grad in gradients]

            # Compute dynamic clipping threshold
            threshold = compute_clipping_threshold(historical_grad_norms, norm_gradients, clipping_percentile)
            historical_grad_norms.extend(norm_gradients)

            # Clip gradients based on the threshold
            clipped_gradients = [torch.clamp(grad, -threshold, threshold) for grad in gradients]

            # Apply noise injection based on the clipped gradients
            noisy_gradients = inject_noise(clipped_gradients, noise_scale, threshold)

            # Use built-in gradient clipping for stability
            utils.clip_grad_norm_(model.parameters(), grad_clip_value)

            # Update model parameters with noisy gradients
            for param, noisy_grad in zip(model.parameters(), noisy_gradients):
                if param.grad is not None:
                    param.grad = noisy_grad

            # Optimizer step to update the model
            optimizer.step()
            total_loss += loss.item()

        # Scheduler step (adjust learning rate)
        scheduler.step(total_loss)

        # Print loss and current learning rate after each epoch
        print(f'Epoch {epoch + 1}/{epochs}, Loss: {total_loss / len(dataloader)}')
        print(f"Current learning rate: {optimizer.param_groups[0]['lr']}")

        # Early stopping condition based on validation loss
        if validation_loader:
            val_loss = validate(model, validation_loader, device)  # Pass device here
            if val_loss < best_loss:
                best_loss = val_loss
                epochs_since_improvement = 0
            else:
                epochs_since_improvement += 1

            if epochs_since_improvement >= patience:
                print(f"Early stopping triggered at epoch {epoch + 1}")
                break

def compute_clipping_threshold(historical_grad_norms, current_grad_norms, clipping_percentile):
    """
    Compute the dynamic clipping threshold based on the historical gradient norms and current gradients.
    """
    if len(historical_grad_norms) == 0:
        threshold = torch.quantile(torch.stack(current_grad_norms), clipping_percentile)
    else:
        sorted_norms = sorted(historical_grad_norms)
        index = int(len(sorted_norms) * clipping_percentile)
        threshold = sorted_norms[index]
    return threshold


def inject_noise(clipped_gradients, noise_scale, threshold):
    """
    Inject noise into the clipped gradients to perturb them.
    """
    noisy_gradients = []
    for grad in clipped_gradients:
        noise_scale_adjusted = noise_scale * (torch.norm(grad) / threshold)  # Adjust noise based on gradient size
        noisy_gradients.append(grad + torch.randn_like(grad) * noise_scale_adjusted)
    return noisy_gradients


def validate(model, validation_loader, device):
    """
    Perform validation and return the validation loss.
    """
    model.eval()  # Set model to evaluation mode
    total_loss = 0
    criterion = nn.CrossEntropyLoss()

    with torch.no_grad():
        for data, target in validation_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            loss = criterion(output, target)
            total_loss += loss.item()

    avg_loss = total_loss / len(validation_loader)
    print(f'Validation Loss: {avg_loss}')
    return avg_loss

# Example usage
# Assuming `train_loader` and `validation_loader` are defined, and model is ready
# gpdc(model, train_loader, epochs=10, validation_loader=validation_loader, augment_data=True)
