#!/usr/bin/env python3
"""
Training script for segmentation models using PyTorch Lightning
Based on camvid_segmentation_multiclass.ipynb
"""

import os
import sys
import argparse
import torch
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning.loggers import CSVLogger

from dataset import SegmentationDataLoader
from model import SegmentationLightningModel
from config_loader import ConfigLoader

def main():
    parser = argparse.ArgumentParser(description='Train segmentation model with PyTorch Lightning')
    parser.add_argument('--config', type=str, required=True, 
                       help='Path to configuration file')
    parser.add_argument('--resume', type=str, default=None,
                       help='Path to checkpoint to resume training from')
    parser.add_argument('--epochs', type=int, default=None,
                       help='Number of epochs to train (overrides config)')
    parser.add_argument('--fast_dev_run', action='store_true',
                       help='Run a quick training for debugging')
    parser.add_argument('--gpus', type=int, default=1,
                       help='Number of GPUs to use')
    parser.add_argument('--visualize', action='store_true',
                       help='Automatically visualize training metrics after training')
    
    args = parser.parse_args()
    
    # Load configuration
    config = ConfigLoader.load_config(args.config)
    config.create_directories()
    
    # Override epochs if specified
    if args.epochs is not None:
        config.NUM_EPOCHS = args.epochs
    
    print(f"Training configuration:")
    print(f"  Model: {config.MODEL_NAME}")
    print(f"  Encoder: {config.ENCODER_NAME}")
    print(f"  Dataset: {config.DATASET_CONFIG.DATASET_NAME}")
    print(f"  Classes: {config.NUM_CLASSES}")
    print(f"  Epochs: {config.NUM_EPOCHS}")
    print(f"  Batch size: {config.BATCH_SIZE}")
    print(f"  Learning rate: {config.LEARNING_RATE}")
    print(f"  Device: {config.DEVICE}")
    
    # Validate dataset paths
    try:
        config.DATASET_CONFIG.validate_paths()
        print(f"Dataset paths validated successfully")
    except FileNotFoundError as e:
        print(f"Warning: {e}")
        print(f"Using dummy data for training")
    
    # Create data loaders
    data_loader = SegmentationDataLoader(config)
    train_loader, val_loader = data_loader.get_data_loaders()
    
    # Store loader length for scheduler
    config.TRAIN_LOADER_LENGTH = len(train_loader)
    
    print(f"Data loaders created:")
    print(f"  Training samples: {len(train_loader.dataset)}")
    print(f"  Validation samples: {len(val_loader.dataset)}")
    print(f"  Training batches: {len(train_loader)}")
    
    # Create model
    model = SegmentationLightningModel(config)
    
    # Setup callbacks
    checkpoint_callback = ModelCheckpoint(
        dirpath=config.CHECKPOINT_DIR,
        filename='{epoch}-{val_loss:.4f}',
        monitor='valid_loss',
        mode='min',
        save_top_k=3,
        save_last=True,
        verbose=True
    )
    
    early_stop_callback = EarlyStopping(
        monitor='valid_loss',
        min_delta=0.00,
        patience=10,
        verbose=True,
        mode='min'
    )
    
    # Setup logger
    logger = CSVLogger(config.LOG_DIR, name=f"{config.MODEL_NAME}_{config.ENCODER_NAME}")
    
    # Setup trainer
    trainer = pl.Trainer(
        max_epochs=config.NUM_EPOCHS,
        accelerator='gpu' if torch.cuda.is_available() else 'cpu',
        devices=args.gpus if torch.cuda.is_available() else 1,
        callbacks=[checkpoint_callback, early_stop_callback],
        logger=logger,
        fast_dev_run=args.fast_dev_run,
        log_every_n_steps=10,
        check_val_every_n_epoch=1,
        enable_progress_bar=True,
        enable_model_summary=True,
        deterministic=False  # Disable deterministic mode to avoid CUDA histc errors
    )
    
    # Resume training if specified
    if args.resume:
        print(f"Resuming training from checkpoint: {args.resume}")
        trainer.fit(model, train_loader, val_loader, ckpt_path=args.resume)
    else:
        # Start training
        print("Starting training...")
        trainer.fit(model, train_loader, val_loader)
    
    # Training completed
    print("Training completed!")
    
    # Load best model for testing
    best_model_path = checkpoint_callback.best_model_path
    if best_model_path:
        print(f"Best model saved at: {best_model_path}")
        
        # Load best model for testing
        best_model = SegmentationLightningModel.load_from_checkpoint(
            best_model_path, config=config
        )
        
        # Test the model
        print("Testing best model...")
        test_loader = data_loader.get_test_loader()
        trainer.test(best_model, test_loader)
    
    # Display training results
    print("\n=== Training Results ===")
    print(f"Training logs saved to: {logger.log_dir}")
    print(f"Best model saved to: {best_model_path if best_model_path else 'N/A'}")
    
    # Automatically visualize metrics if requested
    if args.visualize:
        print("\n=== Starting Metrics Visualization ===")
        try:
            # Import the visualization module
            from visualize_metrics import main as visualize_main
            
            # Create a mock argument list for the visualization script
            import sys
            old_argv = sys.argv
            sys.argv = ['visualize_metrics.py', '--log-dir', logger.log_dir, '--comprehensive', '--individual']
            
            # Run visualization
            visualize_main()
            
            # Restore original argv
            sys.argv = old_argv
            
        except ImportError:
            print("Visualization module not available. Please run separately:")
            print(f"python visualize_metrics.py --log-dir {logger.log_dir} --comprehensive --individual")
        except Exception as e:
            print(f"Error during visualization: {e}")
            print("You can run visualization separately:")
            print(f"python visualize_metrics.py --log-dir {logger.log_dir}")

if __name__ == "__main__":
    main()