#!/usr/bin/env python3
"""
Training metrics visualization script for segmentation models
Visualizes comprehensive training metrics with subplots
"""

import os
import argparse
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path

def load_and_analyze_metrics(csv_path):
    """Load CSV metrics and perform basic analysis"""
    if not os.path.exists(csv_path):
        raise FileNotFoundError(f"Metrics file not found: {csv_path}")
    
    df = pd.read_csv(csv_path)
    print(f"Loaded metrics with {len(df)} rows and {len(df.columns)} columns")
    print(f"Available columns: {list(df.columns)}")
    
    # Basic statistics
    print("\n=== Basic Statistics ===")
    for col in df.columns:
        if col != 'epoch' and df[col].dtype in ['float64', 'int64']:
            print(f"{col}: min={df[col].min():.4f}, max={df[col].max():.4f}, mean={df[col].mean():.4f}")
    
    return df

def create_comprehensive_plots(df, output_dir, prefix=""):
    """Create comprehensive visualization with subplots"""
    
    # Determine which metrics are available
    available_metrics = {
        'loss': ['train_loss', 'valid_loss'],
        'accuracy': ['train_acc', 'valid_acc'],
        'iou': ['train_iou', 'valid_iou'],
        'dice': ['train_dice', 'valid_dice'],
        'learning_rate': ['lr-Adam']
    }
    
    # Filter available metrics
    plot_metrics = {}
    for metric_type, metrics in available_metrics.items():
        available = [m for m in metrics if m in df.columns]
        if available:
            plot_metrics[metric_type] = available
    
    # Calculate number of subplots needed
    n_plots = len(plot_metrics)
    if n_plots == 0:
        print("No valid metrics found for plotting")
        return
    
    # Create figure with subplots
    fig, axes = plt.subplots(n_plots, 1, figsize=(12, 4 * n_plots))
    if n_plots == 1:
        axes = [axes]
    
    # Plot each metric type
    for idx, (metric_type, metrics) in enumerate(plot_metrics.items()):
        ax = axes[idx]
        
        for metric in metrics:
            if metric in df.columns:
                # Handle NaN values
                valid_data = df[['epoch', metric]].dropna()
                if len(valid_data) > 0:
                    label = metric.replace('_', ' ').title()
                    ax.plot(valid_data['epoch'], valid_data[metric], 
                           label=label, linewidth=2, alpha=0.8)
        
        ax.set_xlabel('Epoch')
        ax.set_ylabel(metric_type.title())
        ax.set_title(f'{metric_type.title()} Metrics')
        ax.legend()
        ax.grid(True, alpha=0.3)
        
        # Set y-axis to start from 0 for loss and accuracy metrics
        if metric_type in ['loss', 'accuracy', 'iou', 'dice']:
            ax.set_ylim(bottom=0)
    
    plt.tight_layout()
    
    # Save the comprehensive plot
    output_path = os.path.join(output_dir, f"{prefix}comprehensive_metrics.png")
    plt.savefig(output_path, dpi=300, bbox_inches='tight')
    print(f"Comprehensive metrics plot saved to: {output_path}")
    plt.close()

def create_individual_plots(df, output_dir, prefix=""):
    """Create individual plots for each metric"""
    
    # Plot loss metrics
    if 'train_loss' in df.columns or 'valid_loss' in df.columns:
        plt.figure(figsize=(10, 6))
        
        if 'train_loss' in df.columns:
            plt.plot(df['epoch'], df['train_loss'], label='Training Loss', 
                    linewidth=2, alpha=0.8, color='blue')
        
        if 'valid_loss' in df.columns:
            plt.plot(df['epoch'], df['valid_loss'], label='Validation Loss', 
                    linewidth=2, alpha=0.8, color='red')
        
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.title('Training and Validation Loss')
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.ylim(bottom=0)
        
        output_path = os.path.join(output_dir, f"{prefix}loss_metrics.png")
        plt.savefig(output_path, dpi=300, bbox_inches='tight')
        print(f"Loss plot saved to: {output_path}")
        plt.close()
    
    # Plot accuracy metrics
    if 'train_acc' in df.columns or 'valid_acc' in df.columns:
        plt.figure(figsize=(10, 6))
        
        if 'train_acc' in df.columns:
            plt.plot(df['epoch'], df['train_acc'], label='Training Accuracy', 
                    linewidth=2, alpha=0.8, color='green')
        
        if 'valid_acc' in df.columns:
            plt.plot(df['epoch'], df['valid_acc'], label='Validation Accuracy', 
                    linewidth=2, alpha=0.8, color='orange')
        
        plt.xlabel('Epoch')
        plt.ylabel('Accuracy')
        plt.title('Training and Validation Accuracy')
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.ylim(0, 1)
        
        output_path = os.path.join(output_dir, f"{prefix}accuracy_metrics.png")
        plt.savefig(output_path, dpi=300, bbox_inches='tight')
        print(f"Accuracy plot saved to: {output_path}")
        plt.close()
    
    # Plot IoU metrics
    if 'train_iou' in df.columns or 'valid_iou' in df.columns:
        plt.figure(figsize=(10, 6))
        
        if 'train_iou' in df.columns:
            plt.plot(df['epoch'], df['train_iou'], label='Training IoU', 
                    linewidth=2, alpha=0.8, color='purple')
        
        if 'valid_iou' in df.columns:
            plt.plot(df['epoch'], df['valid_iou'], label='Validation IoU', 
                    linewidth=2, alpha=0.8, color='brown')
        
        plt.xlabel('Epoch')
        plt.ylabel('IoU Score')
        plt.title('Training and Validation IoU')
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.ylim(0, 1)
        
        output_path = os.path.join(output_dir, f"{prefix}iou_metrics.png")
        plt.savefig(output_path, dpi=300, bbox_inches='tight')
        print(f"IoU plot saved to: {output_path}")
        plt.close()
    
    # Plot learning rate
    if 'lr-Adam' in df.columns:
        plt.figure(figsize=(10, 6))
        plt.plot(df['epoch'], df['lr-Adam'], label='Learning Rate', 
                linewidth=2, alpha=0.8, color='teal')
        plt.xlabel('Epoch')
        plt.ylabel('Learning Rate')
        plt.title('Learning Rate Schedule')
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.yscale('log')
        
        output_path = os.path.join(output_dir, f"{prefix}learning_rate.png")
        plt.savefig(output_path, dpi=300, bbox_inches='tight')
        print(f"Learning rate plot saved to: {output_path}")
        plt.close()

def create_summary_statistics(df, output_dir, prefix=""):
    """Create summary statistics and analysis"""
    
    print("\n=== Detailed Analysis ===")
    
    # Loss analysis
    if 'valid_loss' in df.columns:
        best_valid_loss = df['valid_loss'].min()
        best_epoch = df['valid_loss'].idxmin() + 1
        final_valid_loss = df['valid_loss'].iloc[-1]
        
        print(f"Best Validation Loss: {best_valid_loss:.4f} (Epoch {best_epoch})")
        print(f"Final Validation Loss: {final_valid_loss:.4f}")
        
        if 'train_loss' in df.columns:
            final_train_loss = df['train_loss'].iloc[-1]
            overfitting_ratio = final_valid_loss / final_train_loss
            print(f"Final Training Loss: {final_train_loss:.4f}")
            print(f"Overfitting Ratio: {overfitting_ratio:.2f}")
    
    # Accuracy analysis
    if 'valid_acc' in df.columns:
        best_valid_acc = df['valid_acc'].max()
        best_acc_epoch = df['valid_acc'].idxmax() + 1
        final_valid_acc = df['valid_acc'].iloc[-1]
        
        print(f"Best Validation Accuracy: {best_valid_acc:.4f} (Epoch {best_acc_epoch})")
        print(f"Final Validation Accuracy: {final_valid_acc:.4f}")
    
    # IoU analysis
    if 'valid_iou' in df.columns:
        best_valid_iou = df['valid_iou'].max()
        best_iou_epoch = df['valid_iou'].idxmax() + 1
        final_valid_iou = df['valid_iou'].iloc[-1]
        
        print(f"Best Validation IoU: {best_valid_iou:.4f} (Epoch {best_iou_epoch})")
        print(f"Final Validation IoU: {final_valid_iou:.4f}")

def main():
    parser = argparse.ArgumentParser(description='Visualize training metrics for segmentation models')
    parser.add_argument('--log-dir', type=str, required=True,
                       help='Path to directory containing metrics.csv file')
    parser.add_argument('--output-dir', type=str, default=None,
                       help='Output directory for plots (default: same as log-dir)')
    parser.add_argument('--prefix', type=str, default='',
                       help='Prefix for output filenames')
    parser.add_argument('--comprehensive', action='store_true',
                       help='Create comprehensive subplot visualization')
    parser.add_argument('--individual', action='store_true',
                       help='Create individual plots for each metric type')
    
    args = parser.parse_args()
    
    # Set output directory
    if args.output_dir is None:
        args.output_dir = args.log_dir
    
    # Create output directory if it doesn't exist
    os.makedirs(args.output_dir, exist_ok=True)
    
    # Find metrics.csv file
    csv_path = os.path.join(args.log_dir, 'metrics.csv')
    
    try:
        # Load and analyze metrics
        df = load_and_analyze_metrics(csv_path)
        
        # Create plots based on arguments
        if args.comprehensive:
            create_comprehensive_plots(df, args.output_dir, args.prefix)
        
        if args.individual:
            create_individual_plots(df, args.output_dir, args.prefix)
        
        # If no specific plot type requested, create both
        if not args.comprehensive and not args.individual:
            create_comprehensive_plots(df, args.output_dir, args.prefix)
            create_individual_plots(df, args.output_dir, args.prefix)
        
        # Create summary statistics
        create_summary_statistics(df, args.output_dir, args.prefix)
        
        print(f"\nVisualization completed!")
        print(f"Log directory: {args.log_dir}")
        print(f"Output directory: {args.output_dir}")
        
    except Exception as e:
        print(f"Error: {e}")
        print("Please check that the log directory contains a valid metrics.csv file")

if __name__ == "__main__":
    main()