import os
from time import time
import pandas as pd
import torch
from torch.utils.data import (
    TensorDataset,
    DataLoader
)
import torch.nn as nn
import torch.optim as optim
from rich.console import Console
from rich.traceback import install
from warnings import filterwarnings
from utils import (
    DEVICE,
    Model,
    data_preprocessing,
    Logger
)

install()
filterwarnings('ignore')
console = Console()

def mobile_price_layers(input_dim: int, output_dim: int) -> nn.Sequential:
    """
    Define a sequential neural network architecture for mobile price prediction.

    Args:
        input_dim: Number of input features.
        output_dim: Number of output classes (price ranges).

    Returns:
        nn.Sequential: Sequential container of neural network layers.
    """
    return nn.Sequential(
        # Input layer to first hidden layer
        nn.Linear(in_features=input_dim, out_features=256),
        nn.BatchNorm1d(256),
        nn.ReLU(),
        nn.Dropout(0.3),
        # Hidden layer expansion
        nn.Linear(in_features=256, out_features=512),
        nn.BatchNorm1d(512),
        nn.ReLU(),
        nn.Dropout(0.4),
        # Bottleneck layer with highest dimensionality
        nn.Linear(in_features=512, out_features=512),
        nn.BatchNorm1d(512),
        nn.ReLU(),
        # Begin dimensionality reduction
        nn.Linear(in_features=512, out_features=256),
        nn.BatchNorm1d(256),
        nn.ReLU(),
        # Continue feature compression
        nn.Linear(in_features=256, out_features=128),
        nn.ReLU(),
        # Final output layer (no activation for raw logits)
        nn.Linear(in_features=128, out_features=output_dim)
    )

def model_train(data: TensorDataset, model, logger: Logger | None=None) -> None:
    """
    Train the neural network model and save the best performing version.

    Args:
        data: Training dataset containing feature-target pairs.
        model: Initialized neural network model to train.
        logger: Optional logging component for tracking progress.
    """
    # Set reproducibility seed
    torch.manual_seed(0)
    # Initialize data loader with shuffling
    dl = DataLoader(data, batch_size=8, shuffle=True)
    # Configure training components
    criterion = nn.CrossEntropyLoss().to(DEVICE)         # Classification loss
    optimizer = optim.SGD(model.parameters(), lr=0.001)  # Stochastic gradient descent
    num_epochs = 100  # Total training cycles
    # Training loop
    for epoch in range(num_epochs):
        # Initialize metrics
        total_loss, acc, total_num, start = 0.0, 0, 0, time()
        # Batch processing
        for x, y in dl:
            model.train()  # Set train mode
            x, y = x.to(DEVICE), y.to(DEVICE)
            # Forward pass
            output = model(x)
            # Accuracy calculation
            acc += (torch.argmax(output, dim=1).to(DEVICE) == y).sum().item()
            # Loss computation
            loss = criterion(output, y)
            # Backpropagation
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            # Metric accumulation
            total_num += 1
            total_loss += loss.item()
        # Epoch statistics
        accuracy = acc / len(data)
        # Progress reporting
        if logger is not None:
            logger.info('epoch: %03d, loss: %.2f, acc: %.5f, time: %.2fs'
                        % (epoch + 1, total_loss / total_num, accuracy, time() - start))
        else:
            console.print(
                '[bold green]epoch: [bold cyan]%03d[/],' % (epoch + 1),
                '[bold green]loss: [bold cyan]%.2f[/],' % (total_loss / total_num),
                '[bold green]acc: [bold cyan]%.5f[/],' % accuracy,
                '[bold green]time: [bold cyan]%.2fs[/]' % (time() - start)
            )
    # Model serialization
    torch.save(model.state_dict(), './model/mobile_price.pth')
    info = 'Model `mobile_price.pth` saved successfuly!'
    logger.info(info) if logger is not None else console.print(info, style='bold green')

def model_test(data: TensorDataset, model) -> None:
    """
    Evaluate model performance on test dataset.
    
    Args:
        data: Test dataset containing feature-target pairs.
        model: Trained model to evaluate (should be in eval mode).
    """
    # Initialize test data loader without shuffling
    dl = DataLoader(data, batch_size=8, shuffle=False)
    acc = 0
    # Disable gradient calculation for evaluation
    with torch.no_grad():
        for x, y in dl:
            # Device transfer
            x, y = x.to(DEVICE), y.to(DEVICE)
            # Model inference
            output = model(x)
            # Accumulate correct predictions
            acc += (torch.argmax(output, dim=1).to(DEVICE) == y).sum().item()
    # Calculate final accuracy
    accuracy = acc / len(data)
    # Display formatted results
    console.print('[bold green]Accuracy:[/] [bold white]%.5f[/]' % accuracy)

def run(data: pd.DataFrame, logger: Logger | None=None) -> None:
    """
    Main pipeline execution for mobile price prediction model.
    
    Args:
        data: Raw dataset containing mobile features and price ranges.
        logger: Optional logging instance for tracking pipeline progress.
    """
    os.makedirs('./model', exist_ok=True)
    model_name = './model/mobile_price.pth'
    # Data preprocessing pipeline
    train_data, test_data, input_dim, class_num = data_preprocessing(
        data, label_col='price_range'
    )
    # Model architecture construction
    layers = mobile_price_layers(input_dim, class_num)
    # Device configuration and model initialization
    model = Model(layers)
    if not os.path.exists(model_name):
        # Model training phase
        mobile_price_model = model_train(train_data, model, logger)
    else:
        error = f'Model `{os.path.basename(model_name)}` already exists!'
        logger.warning(error) if logger is not None else console.print(error, style='bold red')
    # Load best performing weights
    model.load_state_dict(torch.load(model_name))
    # Final evaluation on test set
    model_test(test_data, model)
