'''
This script trains a Swin Transformer model with Feature Pyramid Network (FPN)
for multi-scale feature extraction and classification.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
from torchvision.models import swin_v2_s, Swin_V2_S_Weights
from torch.optim import AdamW
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts
from torch.nn import CrossEntropyLoss
from tqdm.auto import tqdm
from timeit import default_timer as timer
from typing import Callable, Dict, List
from torch.utils.data import DataLoader

from data_torch_process import train_loader, val_loader, full_dataset

CHECKPOINT_PATH = "checkpoint6.pth"
BEST_MODEL_PATH = "flower_model_v6.pth"

device = "cuda" if torch.cuda.is_available() else "cpu"


class FeaturePyramidNetwork(nn.Module):
    """Feature Pyramid Network for multi-scale feature fusion"""
    def __init__(self, in_channels_list, out_channels=256):
        super(FeaturePyramidNetwork, self).__init__()
        self.out_channels = out_channels
        
        # Lateral connections (1x1 conv to unify channel dimensions)
        self.lateral_convs = nn.ModuleList([
            nn.Conv2d(in_ch, out_channels, kernel_size=1)
            for in_ch in in_channels_list
        ])
        
        # Output convs (3x3 conv to reduce aliasing)
        self.output_convs = nn.ModuleList([
            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)
            for _ in in_channels_list
        ])
        
    def forward(self, features):
        """
        Args:
            features: list of feature maps from different stages
                     [low_res, mid_res, high_res] (from deep to shallow)
        Returns:
            fused_features: pyramid fused features
        """
        # Build lateral connections
        laterals = [lateral_conv(feat) for lateral_conv, feat in zip(self.lateral_convs, features)]
        
        # Top-down pathway with lateral connections
        for i in range(len(laterals) - 1, 0, -1):
            # Upsample higher level feature
            upsampled = F.interpolate(laterals[i], size=laterals[i-1].shape[-2:], 
                                     mode='bilinear', align_corners=False)
            # Add to lower level feature
            laterals[i-1] = laterals[i-1] + upsampled
        
        # Apply output convolutions
        outputs = [output_conv(lateral) for output_conv, lateral in zip(self.output_convs, laterals)]
        
        return outputs


class SwinFPN(nn.Module):
    """Swin Transformer with Feature Pyramid Network"""
    def __init__(self, num_classes, fpn_out_channels=256):
        super(SwinFPN, self).__init__()
        
        # Load pre-trained Swin Transformer
        self.swin = swin_v2_s(weights=Swin_V2_S_Weights.DEFAULT)
        
        # Freeze early layers
        for param in self.swin.parameters():
            param.requires_grad = False
            
        # Unfreeze last 3 stages
        if hasattr(self.swin, 'features') and isinstance(self.swin.features, nn.Sequential):
            for param in self.swin.features[-3].parameters(): 
                param.requires_grad = True
            for param in self.swin.features[-2].parameters(): 
                param.requires_grad = True
            for param in self.swin.features[-1].parameters(): 
                param.requires_grad = True
        
        # Get feature dimensions from different stages
        # Collect from stages 3, 5, 7 to get multi-scale features
        # Stage 3: [192ch, 28x28], Stage 5: [384ch, 14x14], Stage 7: [768ch, 7x7]
        self.feature_channels = [192, 384, 768]  # Multi-scale: C3, C4, C5
        
        # Feature Pyramid Network
        self.fpn = FeaturePyramidNetwork(
            in_channels_list=self.feature_channels,
            out_channels=fpn_out_channels
        )
        
        # Global pooling for each pyramid level
        self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
        
        # Classifier head
        # Input: concatenated features from 3 pyramid levels
        classifier_in_features = fpn_out_channels * 3
        self.classifier = nn.Sequential(
            nn.Flatten(),
            nn.Linear(classifier_in_features, classifier_in_features // 2),
            nn.GELU(),
            nn.Dropout(p=0.5),
            nn.Linear(classifier_in_features // 2, num_classes)
        )
        
    def forward(self, x):
        # Extract multi-scale features from Swin Transformer
        features = []
        
        # Pass through Swin stages and collect intermediate features
        # Swin-V2-S features has 8 modules, but only 4 are actual stages
        # We need to identify which ones output the feature maps
        for i, stage in enumerate(self.swin.features):
            x = stage(x)
            # Collect from stages 3, 5, 7 for multi-scale features
            # Stage 3: 192ch@28x28, Stage 5: 384ch@14x14, Stage 7: 768ch@7x7
            if i in [3, 5, 7]:
                # Swin outputs are in NHWC format, convert to NCHW for Conv2d
                # x shape: [batch, height, width, channels]
                x_permuted = x.permute(0, 3, 1, 2)  # [batch, channels, height, width]
                features.append(x_permuted)
        
        # Apply FPN
        pyramid_features = self.fpn(features)
        
        # Global pooling on each pyramid level
        pooled_features = [self.global_pool(feat) for feat in pyramid_features]
        
        # Concatenate all pyramid features
        fused = torch.cat(pooled_features, dim=1)
        
        # Classification
        output = self.classifier(fused)
        
        return output


def rand_bbox(size, lam):
    W = size[2]
    H = size[3]
    cut_rat = np.sqrt(1. - lam)
    cut_w = int(W * cut_rat)
    cut_h = int(H * cut_rat)

    cx = np.random.randint(W)
    cy = np.random.randint(H)

    bbx1 = np.clip(cx - cut_w // 2, 0, W)
    bby1 = np.clip(cy - cut_h // 2, 0, H)
    bbx2 = np.clip(cx + cut_w // 2, 0, W)
    bby2 = np.clip(cy + cut_h // 2, 0, H)

    return bbx1, bby1, bbx2, bby2


def accuracy_fn(y_pred: torch.Tensor, y_true: torch.Tensor) -> float:
    if y_pred.size(0) != y_true.size(0):
        raise ValueError("[ERROR] Prediction and truth tensors must have the same length")
    correct = torch.eq(y_pred, y_true).sum().item()
    acc = (correct / len(y_true)) * 100
    return acc


def train_step(
    model: nn.Module,
    train_dataloader: DataLoader,
    criterion: nn.Module,
    acc_fn: Callable[[torch.Tensor, torch.Tensor], float],
    optimiser: torch.optim.Optimizer,
    cutmix_prob: float,
    beta_alpha: float,
    device: str = device
) -> tuple[float, float]:
    train_loss, train_acc = 0, 0
    model.train()
    for X, y in tqdm(train_dataloader):
        X, y = X.to(device), y.to(device)
        r = np.random.rand(1)
        if beta_alpha > 0 and r < cutmix_prob:
            lam = np.random.beta(beta_alpha, beta_alpha)
            rand_index = torch.randperm(X.size()[0]).to(device)
            target_a, target_b = y, y[rand_index]
            bbx1, bby1, bbx2, bby2 = rand_bbox(X.size(), lam)
            X[:, :, bbx1:bbx2, bby1:bby2] = X[rand_index, :, bbx1:bbx2, bby1:bby2]
            lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (X.size()[-1] * X.size()[-2]))
            y_pred_logits = model(X)
            loss = criterion(y_pred_logits, target_a) * lam + criterion(y_pred_logits, target_b) * (1. - lam)
        else:
            y_pred_logits = model(X)
            loss = criterion(y_pred_logits, y)
        y_preds = torch.argmax(torch.softmax(y_pred_logits, dim=1), dim=1)
        train_loss += loss.item()
        train_acc += acc_fn(y_preds, y)
        optimiser.zero_grad()
        loss.backward()
        nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
        optimiser.step()
    return train_loss / len(train_dataloader), train_acc / len(train_dataloader)


def val_step(
    model: nn.Module,
    val_dataloader: DataLoader,
    criterion: nn.Module,
    acc_fn: Callable[[torch.Tensor, torch.Tensor], float],
    device: str = device
) -> tuple[float, float]:
    val_loss, val_acc = 0, 0
    model.eval()
    with torch.inference_mode():
        for X, y in val_dataloader:
            X, y = X.to(device), y.to(device)
            y_pred_logits = model(X)
            loss = criterion(y_pred_logits, y)
            val_loss += loss.item()
            val_acc += acc_fn(torch.argmax(torch.softmax(y_pred_logits, dim=1), dim=1), y)
    return val_loss / len(val_dataloader), val_acc / len(val_dataloader)


def save_checkpoint(epoch, model, optimizer, scheduler, best_val_acc, results):
    print("--- Saving checkpoint ---")
    torch.save({
        'epoch': epoch,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'scheduler_state_dict': scheduler.state_dict(),
        'best_val_acc': best_val_acc,
        'results': results
    }, CHECKPOINT_PATH)


def train_model(
    model: nn.Module,
    train_dataloader: DataLoader,
    val_dataloader: DataLoader,
    criterion: nn.Module,
    acc_fn: Callable[[torch.Tensor, torch.Tensor], float],
    optimiser: torch.optim.Optimizer,
    scheduler,
    num_epochs: int,
    start_epoch: int,
    best_val_acc: float,
    results: Dict[str, List[float]],
    cutmix_prob: float = 0.5,
    beta_alpha: float = 1.0
) -> Dict[str, List[float]]:
    '''The main training loop.'''
    for epoch in range(start_epoch, num_epochs):
        print(f'Epoch {epoch+1}/{num_epochs}')
        train_loss, train_acc = train_step(
            model, train_dataloader, criterion, acc_fn, optimiser, cutmix_prob, beta_alpha, device
        )
        val_loss, val_acc = val_step(model, val_dataloader, criterion, acc_fn, device)

        print(f'Train Acc: {train_acc/100:.4f}, Val Acc: {val_acc/100:.4f}')

        if val_acc > best_val_acc:
            best_val_acc = val_acc
            torch.save(model.state_dict(), BEST_MODEL_PATH)
            print(f"New best model saved with validation accuracy: {val_acc/100:.4f}")

        results["train_loss"].append(train_loss)
        results["train_acc"].append(train_acc)
        results["val_loss"].append(val_loss)
        results["val_acc"].append(val_acc)

        scheduler.step()
        lr_string = ", ".join([f"{g['lr']:.6f}" for g in optimiser.param_groups])
        print(f"Current learning rates: [{lr_string}]")
        
        save_checkpoint(epoch + 1, model, optimiser, scheduler, best_val_acc, results)

    return results


if __name__ == '__main__':
    torch.manual_seed(42)
    torch.cuda.manual_seed(42)

    # 1. Model Definition
    num_classes = len(full_dataset.classes)
    model = SwinFPN(num_classes=num_classes, fpn_out_channels=256).to(device)

    # 2. Loss, Optimizer, Scheduler
    criterion = CrossEntropyLoss(label_smoothing=0.1)
    optimiser = AdamW([
        {'params': model.swin.features[-3].parameters(), 'lr': 1e-5},
        {'params': model.swin.features[-2].parameters(), 'lr': 1e-4},
        {'params': model.swin.features[-1].parameters(), 'lr': 1e-4},
        {'params': model.fpn.parameters(), 'lr': 5e-4},
        {'params': model.classifier.parameters(), 'lr': 1e-3}
    ], weight_decay=1e-2)
    scheduler = CosineAnnealingWarmRestarts(optimiser, T_0=10, T_mult=2, eta_min=1e-6)

    # 3. Load Checkpoint if exists
    start_epoch = 0
    best_val_acc = 0.0
    results = {"train_loss": [], "train_acc": [], "val_loss": [], "val_acc": []}
    if os.path.exists(CHECKPOINT_PATH):
        print("--- Loading checkpoint ---")
        checkpoint = torch.load(CHECKPOINT_PATH)
        model.load_state_dict(checkpoint['model_state_dict'])
        optimiser.load_state_dict(checkpoint['optimizer_state_dict'])
        scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        start_epoch = checkpoint['epoch']
        best_val_acc = checkpoint['best_val_acc']
        results = checkpoint['results']
        print(f"Resuming training from epoch {start_epoch + 1}")

    # 4. Start Training
    start_time = timer()
    train_model(
        model=model,
        train_dataloader=train_loader,
        val_dataloader=val_loader,
        criterion=criterion,
        acc_fn=accuracy_fn,
        optimiser=optimiser,
        scheduler=scheduler,
        num_epochs=500,
        start_epoch=start_epoch,
        best_val_acc=best_val_acc,
        results=results,
        cutmix_prob=0.2
    )
    end_time = timer()
    print(f"Total training time: {end_time - start_time:.3f} seconds")
