import os
from time import time
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import transforms
from rich.console import Console
from rich.traceback import install
from warnings import filterwarnings
from utils import (
    DEVICE,
    Model,
    Logger
)

install()
filterwarnings('ignore')
console = Console()

def load_cifar10(root: str, base_folder: str = 'cifar-10', device: torch.device = DEVICE) -> tuple[CIFAR10, CIFAR10]:
    """
    Load CIFAR-10 dataset with preprocessing and augmentation.
    
    Workflow:
    1. Define training transforms with data augmentation.
    2. Define validation transforms without augmentation.
    3. Load train/test datasets with respective transforms.
    4. Return prepared dataset objects.
    
    Args:
        root (str): Root directory for dataset storage.
        base_folder (str): Subdirectory name for CIFAR-10 data.
        device (torch.device): Device to move tensors (CPU/GPU).
    
    Returns:
        tuple: (CIFAR10_train, CIFAR10_test) dataset objects.
    """
    # Set dataset subdirectory name
    CIFAR10.base_folder = base_folder
    # Training transforms with data augmentation
    train_transform = transforms.Compose([
        transforms.RandomCrop(32, padding=4).to(device),   # Random crop with padding
        transforms.RandomHorizontalFlip().to(device),      # 50% horizontal flip
        transforms.ColorJitter(0.2, 0.2, 0.2).to(device),  # Color variation augmentation
        transforms.ToTensor(),                             # Convert to [0,1] range tensor
        transforms.Normalize(                              # Standard CIFAR-10 normalization
            mean=(0.4914, 0.4822, 0.4465), 
            std=(0.2470, 0.2435, 0.2616)
        ).to(device)
    ])
    # Validation transforms (no augmentation)
    test_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(
            mean=(0.4914, 0.4822, 0.4465),
            std=(0.2470, 0.2435, 0.2616)
        ).to(device)
    ])
    # Load datasets with respective transforms
    cifar10_train = CIFAR10(
        root=root, 
        train=True, 
        transform=train_transform  # Apply training augmentations
    )
    cifar10_test = CIFAR10(
        root=root, 
        train=False, 
        transform=test_transform   # Apply validation transforms
    )
    return cifar10_train, cifar10_test

def cifar_image_layers() -> nn.Sequential:
    """
    CIFAR-10 CNN architecture with sequential layers.
    
    Architecture Details:
    - Input: 32x32 RGB images (3 channels).
    - Feature extraction: 3 Conv blocks with BN and ReLU.
    - Classification: Fully-connected layers with dropout.
    
    Conv Block Structure:
    1. Conv2d -> BatchNorm -> ReLU.
    2. Conv2d -> BatchNorm -> ReLU.
    3. MaxPool2d (except final block).
    
    Returns:
        nn.Sequential: Complete model architecture.
    """
    return nn.Sequential(
        # Block 1: 64 channels
        # Maintain spatial dimensions
        nn.Conv2d(3, 64, kernel_size=3, padding=1),
        nn.BatchNorm2d(64),
        nn.ReLU(inplace=True),
        # Double conv for better feature learning
        nn.Conv2d(64, 64, kernel_size=3, padding=1),
        nn.BatchNorm2d(64),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(2),  # Downsample to 16x16

        # Block 2: 128 channels
        nn.Conv2d(64, 128, kernel_size=3, padding=1),
        nn.BatchNorm2d(128),
        nn.ReLU(inplace=True),
        nn.Conv2d(128, 128, kernel_size=3, padding=1),
        nn.BatchNorm2d(128),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(2),  # Downsample to 8x8

        # Block 3: 256 channels
        nn.Conv2d(128, 256, kernel_size=3, padding=1),
        nn.BatchNorm2d(256),
        nn.ReLU(inplace=True),
        nn.Conv2d(256, 256, kernel_size=3, padding=1),
        nn.BatchNorm2d(256),
        nn.ReLU(inplace=True),
        # Global feature aggregation
        nn.AdaptiveAvgPool2d((1,1)),

        # Classification head
        nn.Flatten(),         # Convert features to vector
        nn.Linear(256, 512),  # Expand features for better separation
        nn.Dropout(0.5),      # Regularization for FC layer
        nn.Linear(512, 10)    # Final class scores (10 classes)
    )

def model_train(data: CIFAR10, model, logger: Logger | None = None) -> None:
    """
    Training pipeline for CIFAR-10 image classification model.
    
    Workflow:
    1. Initialize DataLoader with training data.
    2. Configure loss function and optimizer.
    3. Run training loop with forward/backward passes.
    4. Track and report training metrics.
    5. Save best model weights.
    
    Args:
        data (CIFAR10): Training dataset (50k images).
        model (nn.Module): Model instance to train.
        logger (Logger, optional): Logger for distributed training.
    """
    # Initialize DataLoader with small batch size for better gradient estimates
    dl = DataLoader(data, batch_size=8, shuffle=True)
    # Configure cross entropy loss and Adam optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)  # Standard learning rate
    # Training loop configuration
    num_epochs = 100  # Sufficient for convergence with this architecture
    for epoch in range(num_epochs):
        # Initialize epoch metrics
        total_loss, acc, total_num, start = 0.0, 0, 0, time()
        # Batch training loop
        for x, y in dl:
            model.train()  # Set training mode
            x, y = x.to(DEVICE), y.to(DEVICE)
            # Forward pass
            y_predict = model(x)
            loss = criterion(y_predict, y)
            # Backward pass and optimization
            optimizer.zero_grad()  # Clear previous gradients
            loss.backward()        # Calculate new gradients
            optimizer.step()       # Update weights
            # Accumulate batch metrics
            acc += (torch.argmax(y_predict, dim=-1) == y).sum()
            total_num += len(y)
            total_loss += loss.item() * len(y)
        # Calculate epoch statistics
        accuracy = acc / total_num
        # Logging/printing metrics
        if logger is not None:
            logger.info('epoch: %03d, loss: %.2f, acc: %.5f, time: %.2fs'
                        % (epoch + 1, total_loss / total_num, accuracy, time() - start))
        else:
            console.print(
                '[bold green]epoch: [bold cyan]%03d[/],' % (epoch + 1),
                '[bold green]loss: [bold cyan]%.2f[/],' % (total_loss / total_num),
                '[bold green]acc: [bold cyan]%.5f[/],' % accuracy,
                '[bold green]time: [bold cyan]%.2fs[/]' % (time() - start)
            )
    # Save model with accuracy in filename
    torch.save(model.state_dict(), './model/cifar10_image.pth')
    info = 'Model `cifar10_image.pth` saved successfuly!'
    logger.info(info) if logger is not None else console.print(info, style='bold green')

def model_test(data: CIFAR10, model) -> None:
    """
    Evaluate model performance on CIFAR-10 test dataset.
    
    Workflow:
    1. Create test DataLoader without shuffling.
    2. Set model to evaluation mode.
    3. Calculate accuracy across all test batches.
    4. Print final evaluation metrics.
    
    Args:
        data (CIFAR10): Test dataset containing 10k images.
        model (nn.Module): Trained model for evaluation.
    """
    # Initialize DataLoader with original test order (shuffle=False)
    dl = DataLoader(data, batch_size=8, shuffle=False)
    # Initialize accuracy counters
    acc, total_num = 0, 0
    # Batch iteration for evaluation
    for x, y in dl:
        # Set model to evaluation mode (disable dropout)
        model.eval()
        x, y = x.to(DEVICE), y.to(DEVICE)
        # Get model predictions
        y_predict = model(x)
        # Accumulate correct predictions
        acc += (torch.argmax(y_predict, dim=-1).to(DEVICE) == y).sum()
        total_num += len(y)
    # Calculate final accuracy percentage
    accuracy = acc / total_num
    # Display formatted test results
    console.print('[bold green]Accuracy: [bold cyan]%.5f[/]' % accuracy)

def run(data_root: str, logger: Logger | None = None) -> None:
    """
    Main pipeline for CIFAR-10 image classification training and evaluation.

    Workflow:
    1. Load and preprocess CIFAR-10 dataset.
    2. Initialize CNN model architecture.
    3. Train model with augmentation and save best weights.
    4. Load saved weights and evaluate on test set.

    Args:
        data_root (str): Root directory for dataset storage.
        logger (Logger, optional): Custom logger instance for tracking.
    """
    os.makedirs('./model', exist_ok=True)
    model_name = './model/cifar10_image.pth'
    # Load training and test datasets with augmentation
    train_data, test_data = load_cifar10(data_root)
    # Initialize CNN architecture for 32x32 RGB images
    layers = cifar_image_layers()
    # Create model instance and move to GPU if available
    model = Model(layers)
    if not os.path.exists(model_name):
        # Train model and save best weights based on validation accuracy
        model_train(train_data, model, logger)
    else:
        error = f'Model `{os.path.basename(model_name)}` already exists!'
        logger.warning(error) if logger is not None else console.print(error, style='bold red')
    model.load_state_dict(torch.load(model_name))
    # Evaluate model accuracy on untouched test dataset
    model_test(test_data, model)
