"""
Training script for DAT-SNet model using component-wise training strategy
"""
import os
import argparse
import torch
import numpy as np
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau, CosineAnnealingLR
import time
from datetime import datetime
import json
import random

from config import ModelConfig, TrainingConfig, SLEEP_EDF_DIR, CHECKPOINT_DIR, LOGS_DIR, RESULTS_DIR, RUN_ID
from dataloaders.dataset import create_kfold_splits, prepare_dataloaders, SleepEDFAugmentation
from models.dat_snet import DATSNet, FeatureExtractionLoss, SequenceLearningLoss
from utils import (
    TrainingLogger,
    ResultsLogger,
    evaluate_model,
    generate_evaluation_report,
    print_evaluation_report,
    plot_confusion_matrix
)


def set_seed(seed):
    """Set all random seeds for reproducibility"""
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False


def train_feature_extraction(model, train_loader, val_loader, criterion, optimizer,
                             scheduler, device, epochs, logger, patience=10, save_path=None):
    """
    Train the feature extraction module of DAT-SNet

    Args:
        model (nn.Module): DAT-SNet model
        train_loader (DataLoader): Training data loader
        val_loader (DataLoader): Validation data loader
        criterion (nn.Module): Loss function
        optimizer (Optimizer): Optimizer
        scheduler (Scheduler): Learning rate scheduler
        device (torch.device): Device to use
        epochs (int): Number of epochs
        logger (TrainingLogger): Logger
        patience (int): Early stopping patience
        save_path (str, optional): Path to save model checkpoints

    Returns:
        dict: Training history
    """
    history = {
        'train_loss': [],
        'train_acc': [],
        'train_f1': [],
        'train_kappa': [],
        'val_loss': [],
        'val_acc': [],
        'val_f1': [],
        'val_kappa': [],
        'lr': []
    }

    best_val_f1 = 0
    no_improvement = 0
    best_model_state = None

    for epoch in range(1, epochs + 1):
        # Training phase
        model.train()
        train_loss = 0
        train_preds = []
        train_targets = []

        start_time = time.time()

        for batch in train_loader:
            eeg = batch['eeg'].to(device)
            labels = batch['label'].to(device)

            # Forward pass
            logits = model(eeg, mode='feature')
            loss = criterion(logits, labels)

            # Backward pass and optimization
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # Track statistics
            train_loss += loss.item()
            _, preds = torch.max(logits, dim=1)
            train_preds.extend(preds.cpu().numpy())
            train_targets.extend(labels.cpu().numpy())

        # Calculate training metrics
        train_loss /= len(train_loader)
        train_metrics = generate_evaluation_report(
            np.array(train_targets),
            np.array(train_preds)
        )

        # Validation phase
        model.eval()
        val_loss = 0
        val_preds = []
        val_targets = []

        with torch.no_grad():
            for batch in val_loader:
                eeg = batch['eeg'].to(device)
                labels = batch['label'].to(device)

                # Forward pass
                logits = model(eeg, mode='feature')
                loss = criterion(logits, labels)

                # Track statistics
                val_loss += loss.item()
                _, preds = torch.max(logits, dim=1)
                val_preds.extend(preds.cpu().numpy())
                val_targets.extend(labels.cpu().numpy())

        # Calculate validation metrics
        val_loss /= len(val_loader)
        val_metrics = generate_evaluation_report(
            np.array(val_targets),
            np.array(val_preds)
        )

        # Update learning rate
        if scheduler is not None:
            if isinstance(scheduler, ReduceLROnPlateau):
                scheduler.step(val_loss)
            else:
                scheduler.step()

        # Log metrics
        logger.log_epoch(
            epoch, 'train',
            {
                'loss': train_loss,
                'accuracy': train_metrics['overall']['accuracy'],
                'macro_f1': train_metrics['overall']['macro_f1'],
                'kappa': train_metrics['overall']['kappa']
            },
            optimizer.param_groups[0]['lr']
        )

        logger.log_epoch(
            epoch, 'val',
            {
                'loss': val_loss,
                'accuracy': val_metrics['overall']['accuracy'],
                'macro_f1': val_metrics['overall']['macro_f1'],
                'kappa': val_metrics['overall']['kappa']
            }
        )

        # Update history
        history['train_loss'].append(train_loss)
        history['train_acc'].append(train_metrics['overall']['accuracy'])
        history['train_f1'].append(train_metrics['overall']['macro_f1'])
        history['train_kappa'].append(train_metrics['overall']['kappa'])
        history['val_loss'].append(val_loss)
        history['val_acc'].append(val_metrics['overall']['accuracy'])
        history['val_f1'].append(val_metrics['overall']['macro_f1'])
        history['val_kappa'].append(val_metrics['overall']['kappa'])
        history['lr'].append(optimizer.param_groups[0]['lr'])

        # Check for improvement
        current_val_f1 = val_metrics['overall']['macro_f1']

        if current_val_f1 > best_val_f1:
            best_val_f1 = current_val_f1
            no_improvement = 0

            # Save best model state
            best_model_state = model.state_dict()

            if save_path:
                torch.save(model.state_dict(), save_path)
                logger.log_message(f"Model saved to {save_path} (F1: {best_val_f1:.4f})")
        else:
            no_improvement += 1
            logger.log_message(f"No improvement for {no_improvement} epochs")

        # Early stopping
        if no_improvement >= patience:
            logger.log_message(f"Early stopping at epoch {epoch}")
            break

    # Load best model state
    if best_model_state is not None:
        model.load_state_dict(best_model_state)

    return history


def train_sequence_learning(model, train_loader, val_loader, criterion, optimizer,
                            scheduler, device, epochs, logger, patience=10, save_path=None):
    """
    Train the sequence learning module of DAT-SNet

    Args:
        model (nn.Module): DAT-SNet model
        train_loader (DataLoader): Training data loader
        val_loader (DataLoader): Validation data loader
        criterion (nn.Module): Loss function
        optimizer (Optimizer): Optimizer
        scheduler (Scheduler): Learning rate scheduler
        device (torch.device): Device to use
        epochs (int): Number of epochs
        logger (TrainingLogger): Logger
        patience (int): Early stopping patience
        save_path (str, optional): Path to save model checkpoints

    Returns:
        dict: Training history
    """
    history = {
        'train_loss': [],
        'train_acc': [],
        'train_f1': [],
        'train_kappa': [],
        'val_loss': [],
        'val_acc': [],
        'val_f1': [],
        'val_kappa': [],
        'lr': []
    }

    best_val_f1 = 0
    no_improvement = 0
    best_model_state = None

    for epoch in range(1, epochs + 1):
        # Training phase
        model.train()
        train_loss = 0
        train_preds = []
        train_targets = []

        start_time = time.time()

        for batch in train_loader:
            eeg_seq = batch['eeg_seq'].to(device)  # Shape: (batch_size, seq_len, 1, signal_length)
            target = batch['target'].to(device)  # Shape: (batch_size)

            # Forward pass
            logits, _, _ = model(eeg_seq, mode='full')
            loss = criterion(logits, target)

            # Backward pass and optimization
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # Track statistics
            train_loss += loss.item()
            _, preds = torch.max(logits, dim=1)
            train_preds.extend(preds.cpu().numpy())
            train_targets.extend(target.cpu().numpy())

        # Calculate training metrics
        train_loss /= len(train_loader)
        train_metrics = generate_evaluation_report(
            np.array(train_targets),
            np.array(train_preds)
        )

        # Validation phase
        model.eval()
        val_loss = 0
        val_preds = []
        val_targets = []

        with torch.no_grad():
            for batch in val_loader:
                eeg_seq = batch['eeg_seq'].to(device)
                target = batch['target'].to(device)

                # Forward pass
                logits, _, _ = model(eeg_seq, mode='full')
                loss = criterion(logits, target)

                # Track statistics
                val_loss += loss.item()
                _, preds = torch.max(logits, dim=1)
                val_preds.extend(preds.cpu().numpy())
                val_targets.extend(target.cpu().numpy())

        # Calculate validation metrics
        val_loss /= len(val_loader)
        val_metrics = generate_evaluation_report(
            np.array(val_targets),
            np.array(val_preds)
        )

        # Update learning rate
        if scheduler is not None:
            if isinstance(scheduler, ReduceLROnPlateau):
                scheduler.step(val_loss)
            else:
                scheduler.step()

        # Log metrics
        logger.log_epoch(
            epoch, 'train',
            {
                'loss': train_loss,
                'accuracy': train_metrics['overall']['accuracy'],
                'macro_f1': train_metrics['overall']['macro_f1'],
                'kappa': train_metrics['overall']['kappa']
            },
            optimizer.param_groups[0]['lr']
        )

        logger.log_epoch(
            epoch, 'val',
            {
                'loss': val_loss,
                'accuracy': val_metrics['overall']['accuracy'],
                'macro_f1': val_metrics['overall']['macro_f1'],
                'kappa': val_metrics['overall']['kappa']
            }
        )

        # Update history
        history['train_loss'].append(train_loss)
        history['train_acc'].append(train_metrics['overall']['accuracy'])
        history['train_f1'].append(train_metrics['overall']['macro_f1'])
        history['train_kappa'].append(train_metrics['overall']['kappa'])
        history['val_loss'].append(val_loss)
        history['val_acc'].append(val_metrics['overall']['accuracy'])
        history['val_f1'].append(val_metrics['overall']['macro_f1'])
        history['val_kappa'].append(val_metrics['overall']['kappa'])
        history['lr'].append(optimizer.param_groups[0]['lr'])

        # Check for improvement
        current_val_f1 = val_metrics['overall']['macro_f1']

        if current_val_f1 > best_val_f1:
            best_val_f1 = current_val_f1
            no_improvement = 0

            # Save best model state
            best_model_state = model.state_dict()

            if save_path:
                torch.save(model.state_dict(), save_path)
                logger.log_message(f"Model saved to {save_path} (F1: {best_val_f1:.4f})")
        else:
            no_improvement += 1
            logger.log_message(f"No improvement for {no_improvement} epochs")

        # Early stopping
        if no_improvement >= patience:
            logger.log_message(f"Early stopping at epoch {epoch}")
            break

    # Load best model state
    if best_model_state is not None:
        model.load_state_dict(best_model_state)

    return history


def train_fold(fold_id, train_files, val_files, test_files, args, model_config, training_config):
    """
    Train DAT-SNet on a single fold

    Args:
        fold_id (int): Fold ID
        train_files (list): List of training file paths
        val_files (list): List of validation file paths
        test_files (list): List of test file paths
        args (Namespace): Command line arguments
        model_config (ModelConfig): Model configuration
        training_config (TrainingConfig): Training configuration

    Returns:
        dict: Fold results
    """
    # Setup logging
    run_id = f"{RUN_ID}_{fold_id}"
    train_logger = TrainingLogger(
        log_dir=os.path.join(LOGS_DIR, run_id),
        run_id=run_id,
        config={
            'model': vars(model_config),
            'training': vars(training_config),
            'fold_id': fold_id
        }
    )

    results_logger = ResultsLogger(
        results_dir=RESULTS_DIR,
        run_id=run_id
    )

    # Create checkpoint directory
    checkpoint_dir = os.path.join(CHECKPOINT_DIR, run_id)
    os.makedirs(checkpoint_dir, exist_ok=True)

    # Prepare data
    train_logger.log_message(f"Preparing data for fold {fold_id}")
    dataloader_config = prepare_dataloaders(
        train_files=train_files,
        val_files=val_files,
        test_files=test_files,
        batch_size=training_config.batch_size,
        seq_length=model_config.sequence_length,
        num_workers=args.num_workers
    )

    feature_dataloaders = dataloader_config['feature_extraction']
    sequence_dataloaders = dataloader_config['sequence_learning']
    class_weights = dataloader_config['class_weights']

    train_loader, val_loader, test_loader = feature_dataloaders
    seq_train_loader, seq_val_loader, seq_test_loader = sequence_dataloaders

    # Initialize model
    train_logger.log_message("Initializing model")
    model = DATSNet(model_config).to(model_config.device)

    # Component-wise training
    train_logger.log_message("Starting feature extraction training phase")

    # Feature extraction phase
    feature_criterion = FeatureExtractionLoss(
        class_weights=torch.tensor(training_config.class_weights, device=model_config.device)
    )

    feature_optimizer = optim.Adam(
        model.feature_extraction.parameters(),
        lr=training_config.learning_rate,
        weight_decay=training_config.weight_decay
    )

    feature_scheduler = ReduceLROnPlateau(
        feature_optimizer,
        mode='min',
        factor=training_config.lr_scheduler_factor,
        patience=training_config.lr_scheduler_patience,
        verbose=True
    )

    feature_checkpoint_path = os.path.join(checkpoint_dir, "feature_extraction_best.pth")

    feature_history = train_feature_extraction(
        model=model,
        train_loader=train_loader,
        val_loader=val_loader,
        criterion=feature_criterion,
        optimizer=feature_optimizer,
        scheduler=feature_scheduler,
        device=model_config.device,
        epochs=training_config.feature_extraction_epochs,
        logger=train_logger,
        patience=training_config.early_stopping_patience,
        save_path=feature_checkpoint_path
    )

    # Load best feature extraction model
    model.load_state_dict(torch.load(feature_checkpoint_path), strict=False)
    train_logger.log_message("Feature extraction training phase completed")

    # Sequence learning phase
    train_logger.log_message("Starting sequence learning training phase")

    # Freeze feature extraction module
    for param in model.feature_extraction.parameters():
        param.requires_grad = False

    sequence_criterion = SequenceLearningLoss()

    sequence_optimizer = optim.Adam(
        model.sequence_learning.parameters(),
        lr=training_config.learning_rate,
        weight_decay=training_config.weight_decay
    )

    sequence_scheduler = ReduceLROnPlateau(
        sequence_optimizer,
        mode='min',
        factor=training_config.lr_scheduler_factor,
        patience=training_config.lr_scheduler_patience,
        verbose=True
    )

    sequence_checkpoint_path = os.path.join(checkpoint_dir, "sequence_learning_best.pth")

    sequence_history = train_sequence_learning(
        model=model,
        train_loader=seq_train_loader,
        val_loader=seq_val_loader,
        criterion=sequence_criterion,
        optimizer=sequence_optimizer,
        scheduler=sequence_scheduler,
        device=model_config.device,
        epochs=training_config.sequence_learning_epochs,
        logger=train_logger,
        patience=training_config.early_stopping_patience,
        save_path=sequence_checkpoint_path
    )

    # Load best full model
    model.load_state_dict(torch.load(sequence_checkpoint_path))
    train_logger.log_message("Sequence learning training phase completed")

    # Evaluate on test set
    train_logger.log_message("Evaluating model on test set")
    model.eval()

    y_true, y_pred, logits = evaluate_model(
        model=model,
        dataloader=seq_test_loader,
        device=model_config.device,
        mode='full'
    )

    test_report = generate_evaluation_report(y_true, y_pred)
    print_evaluation_report(test_report)

    # Save test results
    results_logger.save_fold_results(
        fold=fold_id,
        report=test_report,
        y_true=y_true,
        y_pred=y_pred,
        logits=logits
    )

    # Plot confusion matrix
    results_logger.save_confusion_matrix_plot(
        fold=fold_id,
        cm=test_report['confusion_matrix'],
        class_names=['W', 'N1', 'N2', 'N3', 'REM']
    )

    # Log test metrics
    train_logger.log_epoch(
        0, 'test',
        {
            'loss': 0.0,  # We don't have test loss
            'accuracy': test_report['overall']['accuracy'],
            'macro_f1': test_report['overall']['macro_f1'],
            'kappa': test_report['overall']['kappa']
        }
    )

    # Log best metrics
    train_logger.log_best_metrics()

    # Plot training history
    train_logger.plot_training_history(
        save_path=os.path.join(checkpoint_dir, "training_history.png")
    )

    # Save history to CSV
    train_logger.save_history_to_csv(
        path=os.path.join(checkpoint_dir, "training_history.csv")
    )

    # Close logger
    train_logger.close()

    return test_report


def main(args):
    """Main function"""
    # Set random seed
    set_seed(args.seed)

    # Load configurations
    model_config = ModelConfig()
    training_config = TrainingConfig()

    # Create k-fold splits
    splits = create_kfold_splits(
        data_dir=args.data_dir,
        n_folds=args.n_folds
    )

    # Train each fold
    fold_reports = []

    for fold_id, (train_files, val_files, test_files) in enumerate(splits[:args.num_folds]):
        print(f"\n{'=' * 40}")
        print(f"Training fold {fold_id + 1}/{args.num_folds}")
        print(f"{'=' * 40}")

        test_report = train_fold(
            fold_id=fold_id + 1,
            train_files=train_files,
            val_files=val_files,
            test_files=test_files,
            args=args,
            model_config=model_config,
            training_config=training_config
        )

        fold_reports.append(test_report)

    # Calculate and log aggregate results
    results_logger = ResultsLogger(
        results_dir=RESULTS_DIR,
        run_id=RUN_ID
    )

    aggregate_report = results_logger.save_aggregate_results(fold_reports)

    print("\n" + "=" * 40)
    print("Aggregate Results")
    print("=" * 40)
    print(
        f"Accuracy: {aggregate_report['overall']['accuracy']:.4f} ± {aggregate_report['overall']['std_accuracy']:.4f}")
    print(
        f"Macro F1: {aggregate_report['overall']['macro_f1']:.4f} ± {aggregate_report['overall']['std_macro_f1']:.4f}")
    print(f"Kappa: {aggregate_report['overall']['kappa']:.4f} ± {aggregate_report['overall']['std_kappa']:.4f}")

    # Per-class metrics
    print("\nPer-class F1-scores:")
    for cls, metrics in aggregate_report['per_class'].items():
        print(f"{cls}: {metrics['f1']:.4f} ± {metrics['std_f1']:.4f}")

    print("\nTraining completed successfully!")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Train DAT-SNet for sleep stage classification")

    parser.add_argument("--data_dir", type=str, default="E:/science/Dataset/Data-EDF-20-npz",
                        help="Directory containing the Sleep-EDF dataset")
    parser.add_argument("--seed", type=int, default=628,
                        help="Random seed for reproducibility")
    parser.add_argument("--n_folds", type=int, default=20,
                        help="Number of folds for cross-validation")
    parser.add_argument("--num_folds", type=int, default=20,
                        help="Number of folds to actually train (for testing with fewer folds)")
    parser.add_argument("--num_workers", type=int, default=4,
                        help="Number of workers for data loading")

    args = parser.parse_args()

    main(args)