#!/usr/bin/env python3

"""
Baseline metrics extraction script for Neural-SLAM project
Extracts and analyzes performance metrics from training/evaluation logs
"""

import json
import numpy as np
import os
import glob
import re
from datetime import datetime
import matplotlib.pyplot as plt

def extract_metrics_from_log_files(log_dir="./tmp/"):
    """Extract metrics from log files"""
    print(f"📊 Extracting metrics from: {log_dir}")
    
    metrics = {
        'episode_rewards': [],
        'step_rewards': [],
        'success_rate': [],
        'spl': [],
        'coverage': [],
        'path_length': [],
        'explored_area': [],
        'explored_ratio': [],
        'slam_losses': {
            'proj_loss': [],
            'exp_loss': [], 
            'pose_loss': []
        },
        'policy_losses': {
            'local_loss': [],
            'global_value_loss': [],
            'global_action_loss': [],
            'global_entropy': []
        }
    }
    
    # Find all log files
    log_pattern = os.path.join(log_dir, "**/train.log")
    log_files = glob.glob(log_pattern, recursive=True)
    
    for log_file in log_files:
        print(f"Processing: {log_file}")
        try:
            with open(log_file, 'r') as f:
                lines = f.readlines()
            
            for line in lines:
                # Extract episode rewards
                reward_match = re.search(r'Global eps mean/med/min/max eps rew: ([0-9.-]+)/([0-9.-]+)/([0-9.-]+)/([0-9.-]+)', line)
                if reward_match:
                    mean_reward = float(reward_match.group(1))
                    metrics['episode_rewards'].append(mean_reward)
                
                # Extract step rewards
                step_reward_match = re.search(r'Global step mean/med rew: ([0-9.-]+)/([0-9.-]+)', line)
                if step_reward_match:
                    mean_step_reward = float(step_reward_match.group(1))
                    metrics['step_rewards'].append(mean_step_reward)
                
                # Extract SLAM losses
                slam_loss_match = re.search(r'SLAM Loss proj/exp/pose:([0-9.-]+)/([0-9.-]+)/([0-9.-]+)', line)
                if slam_loss_match:
                    proj_loss = float(slam_loss_match.group(1))
                    exp_loss = float(slam_loss_match.group(2))
                    pose_loss = float(slam_loss_match.group(3))
                    metrics['slam_losses']['proj_loss'].append(proj_loss)
                    metrics['slam_losses']['exp_loss'].append(exp_loss)
                    metrics['slam_losses']['pose_loss'].append(pose_loss)
                
                # Extract policy losses
                local_loss_match = re.search(r'Local Loss: ([0-9.-]+)', line)
                if local_loss_match:
                    local_loss = float(local_loss_match.group(1))
                    metrics['policy_losses']['local_loss'].append(local_loss)
                
                global_loss_match = re.search(r'Global Loss value/action/dist: ([0-9.-]+)/([0-9.-]+)/([0-9.-]+)', line)
                if global_loss_match:
                    value_loss = float(global_loss_match.group(1))
                    action_loss = float(global_loss_match.group(2))
                    entropy = float(global_loss_match.group(3))
                    metrics['policy_losses']['global_value_loss'].append(value_loss)
                    metrics['policy_losses']['global_action_loss'].append(action_loss)
                    metrics['policy_losses']['global_entropy'].append(entropy)
                    
        except Exception as e:
            print(f"Error processing {log_file}: {str(e)}")
    
    # Extract explored area/ratio data
    explored_area_files = glob.glob(os.path.join(log_dir, "**/explored_area.txt"), recursive=True)
    for area_file in explored_area_files:
        try:
            with open(area_file, 'r') as f:
                lines = f.readlines()
            for line in lines:
                values = eval(line.strip())  # Convert string array to actual array
                if isinstance(values, (list, np.ndarray)):
                    metrics['explored_area'].extend(values)
        except Exception as e:
            print(f"Error processing {area_file}: {str(e)}")
    
    explored_ratio_files = glob.glob(os.path.join(log_dir, "**/explored_ratio.txt"), recursive=True)
    for ratio_file in explored_ratio_files:
        try:
            with open(ratio_file, 'r') as f:
                lines = f.readlines()
            for line in lines:
                values = eval(line.strip())
                if isinstance(values, (list, np.ndarray)):
                    metrics['explored_ratio'].extend(values)
        except Exception as e:
            print(f"Error processing {ratio_file}: {str(e)}")
    
    return metrics

def compute_baseline_statistics(metrics):
    """Compute statistical summaries of baseline performance"""
    
    baseline_stats = {}
    
    # Process episode rewards
    if metrics['episode_rewards']:
        baseline_stats['episode_rewards'] = {
            'mean': np.mean(metrics['episode_rewards']),
            'std': np.std(metrics['episode_rewards']),
            'median': np.median(metrics['episode_rewards']),
            'min': np.min(metrics['episode_rewards']),
            'max': np.max(metrics['episode_rewards']),
            'count': len(metrics['episode_rewards'])
        }
    
    # Process step rewards
    if metrics['step_rewards']:
        baseline_stats['step_rewards'] = {
            'mean': np.mean(metrics['step_rewards']),
            'std': np.std(metrics['step_rewards']),
            'median': np.median(metrics['step_rewards']),
            'count': len(metrics['step_rewards'])
        }
    
    # Process explored area (proxy for coverage/success metrics)
    if metrics['explored_area']:
        # Convert to final explored area per episode
        final_areas = []
        for area_sequence in metrics['explored_area']:
            if isinstance(area_sequence, (list, np.ndarray)) and len(area_sequence) > 0:
                final_areas.append(area_sequence[-1])  # Take final value
        
        if final_areas:
            baseline_stats['coverage'] = {
                'mean': np.mean(final_areas),
                'std': np.std(final_areas),
                'median': np.median(final_areas),
                'count': len(final_areas)
            }
    
    # Process explored ratio (proxy for success rate)
    if metrics['explored_ratio']:
        final_ratios = []
        for ratio_sequence in metrics['explored_ratio']:
            if isinstance(ratio_sequence, (list, np.ndarray)) and len(ratio_sequence) > 0:
                final_ratios.append(ratio_sequence[-1])
        
        if final_ratios:
            baseline_stats['success_rate'] = {
                'mean': np.mean(final_ratios),
                'std': np.std(final_ratios),
                'median': np.median(final_ratios),
                'count': len(final_ratios)
            }
    
    # Process SLAM losses
    for loss_type, loss_values in metrics['slam_losses'].items():
        if loss_values:
            baseline_stats[f'slam_{loss_type}'] = {
                'mean': np.mean(loss_values),
                'std': np.std(loss_values),
                'median': np.median(loss_values),
                'final': loss_values[-1] if loss_values else 0,
                'count': len(loss_values)
            }
    
    # Process policy losses  
    for loss_type, loss_values in metrics['policy_losses'].items():
        if loss_values:
            baseline_stats[f'policy_{loss_type}'] = {
                'mean': np.mean(loss_values),
                'std': np.std(loss_values), 
                'median': np.median(loss_values),
                'final': loss_values[-1] if loss_values else 0,
                'count': len(loss_values)
            }
    
    return baseline_stats

def generate_baseline_report(baseline_stats, output_file="baseline_performance.json"):
    """Generate comprehensive baseline performance report"""
    
    # Add metadata
    report = {
        'metadata': {
            'extraction_date': datetime.now().isoformat(),
            'description': 'Baseline Neural-SLAM performance metrics',
            'model': 'Original Neural-SLAM (2020)',
            'environment': 'AI2-THOR / Habitat'
        },
        'metrics': baseline_stats
    }
    
    # Save detailed report
    with open(output_file, 'w') as f:
        json.dump(report, f, indent=2)
    
    # Print summary
    print("\n🎯 BASELINE PERFORMANCE SUMMARY")
    print("=" * 60)
    print(f"📅 Generated: {report['metadata']['extraction_date']}")
    print("=" * 60)
    
    # Key metrics for paper
    key_metrics = {
        'Episode Rewards': baseline_stats.get('episode_rewards', {}),
        'Success Rate (proxy)': baseline_stats.get('success_rate', {}),
        'Coverage': baseline_stats.get('coverage', {}),
        'Step Rewards': baseline_stats.get('step_rewards', {})
    }
    
    for metric_name, stats in key_metrics.items():
        if stats:
            print(f"\n📊 {metric_name}:")
            print(f"   Mean: {stats.get('mean', 0):.4f} ± {stats.get('std', 0):.4f}")
            print(f"   Median: {stats.get('median', 0):.4f}")
            if 'min' in stats and 'max' in stats:
                print(f"   Range: [{stats['min']:.4f}, {stats['max']:.4f}]")
            print(f"   Samples: {stats.get('count', 0)}")
    
    # Loss convergence info
    print("\n🔧 Training Convergence:")
    slam_losses = ['slam_proj_loss', 'slam_exp_loss', 'slam_pose_loss']
    for loss_name in slam_losses:
        loss_stats = baseline_stats.get(loss_name, {})
        if loss_stats:
            final_loss = loss_stats.get('final', 0)
            mean_loss = loss_stats.get('mean', 0)
            print(f"   {loss_name}: {final_loss:.6f} (avg: {mean_loss:.6f})")
    
    print("=" * 60)
    print(f"📄 Detailed report saved to: {output_file}")
    print("=" * 60)
    
    return report

def create_baseline_visualizations(metrics, output_dir="./baseline_plots/"):
    """Create visualization plots for baseline metrics"""
    
    os.makedirs(output_dir, exist_ok=True)
    
    # Training curves
    fig, axes = plt.subplots(2, 2, figsize=(15, 10))
    fig.suptitle('Neural-SLAM Baseline Training Curves', fontsize=16)
    
    # Episode rewards
    if metrics['episode_rewards']:
        axes[0, 0].plot(metrics['episode_rewards'], 'b-', alpha=0.7)
        axes[0, 0].set_title('Episode Rewards')
        axes[0, 0].set_xlabel('Training Updates')
        axes[0, 0].set_ylabel('Mean Episode Reward')
        axes[0, 0].grid(True, alpha=0.3)
    
    # SLAM projection loss
    if metrics['slam_losses']['proj_loss']:
        axes[0, 1].plot(metrics['slam_losses']['proj_loss'], 'r-', alpha=0.7)
        axes[0, 1].set_title('SLAM Projection Loss')
        axes[0, 1].set_xlabel('Training Updates')
        axes[0, 1].set_ylabel('Projection Loss')
        axes[0, 1].set_yscale('log')
        axes[0, 1].grid(True, alpha=0.3)
    
    # Local policy loss
    if metrics['policy_losses']['local_loss']:
        axes[1, 0].plot(metrics['policy_losses']['local_loss'], 'g-', alpha=0.7)
        axes[1, 0].set_title('Local Policy Loss')
        axes[1, 0].set_xlabel('Training Updates')
        axes[1, 0].set_ylabel('Local Loss')
        axes[1, 0].grid(True, alpha=0.3)
    
    # Global policy value loss
    if metrics['policy_losses']['global_value_loss']:
        axes[1, 1].plot(metrics['policy_losses']['global_value_loss'], 'purple', alpha=0.7)
        axes[1, 1].set_title('Global Policy Value Loss')
        axes[1, 1].set_xlabel('Training Updates')
        axes[1, 1].set_ylabel('Value Loss')
        axes[1, 1].grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'baseline_training_curves.png'), dpi=300, bbox_inches='tight')
    plt.close()
    
    # Loss distribution plot
    fig, axes = plt.subplots(1, 3, figsize=(18, 5))
    
    loss_types = ['proj_loss', 'exp_loss', 'pose_loss']
    colors = ['red', 'blue', 'green']
    
    for i, (loss_type, color) in enumerate(zip(loss_types, colors)):
        loss_values = metrics['slam_losses'][loss_type]
        if loss_values:
            axes[i].hist(loss_values, bins=50, alpha=0.7, color=color, edgecolor='black')
            axes[i].set_title(f'SLAM {loss_type.replace("_", " ").title()} Distribution')
            axes[i].set_xlabel('Loss Value')
            axes[i].set_ylabel('Frequency')
            axes[i].grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.savefig(os.path.join(output_dir, 'baseline_loss_distributions.png'), dpi=300, bbox_inches='tight')
    plt.close()
    
    print(f"📈 Baseline visualizations saved to: {output_dir}")

def main(log_directories=None):
    """Main function to extract and analyze baseline metrics"""
    
    if log_directories is None:
        # Default locations to search for logs
        log_directories = [
            "./tmp/",
            "./logs/", 
            "./dump/",
            "./models/"
        ]
    
    print("🔍 Neural-SLAM Baseline Metrics Extraction")
    print("=" * 50)
    
    all_metrics = {
        'episode_rewards': [],
        'step_rewards': [],
        'success_rate': [],
        'spl': [],
        'coverage': [],
        'path_length': [],
        'explored_area': [],
        'explored_ratio': [],
        'slam_losses': {
            'proj_loss': [],
            'exp_loss': [], 
            'pose_loss': []
        },
        'policy_losses': {
            'local_loss': [],
            'global_value_loss': [],
            'global_action_loss': [],
            'global_entropy': []
        }
    }
    
    # Extract metrics from all directories
    for log_dir in log_directories:
        if os.path.exists(log_dir):
            print(f"\n📂 Searching directory: {log_dir}")
            metrics = extract_metrics_from_log_files(log_dir)
            
            # Merge metrics
            for key in all_metrics:
                if key in ['slam_losses', 'policy_losses']:
                    for sub_key in all_metrics[key]:
                        all_metrics[key][sub_key].extend(metrics[key][sub_key])
                else:
                    all_metrics[key].extend(metrics[key])
    
    # Compute statistics
    baseline_stats = compute_baseline_statistics(all_metrics)
    
    # Generate report
    report = generate_baseline_report(baseline_stats)
    
    # Create visualizations
    create_baseline_visualizations(all_metrics)
    
    print("\n✅ Baseline metrics extraction completed!")
    print("   Use these results as your Table 1 baseline comparison.")
    
    return report

if __name__ == "__main__":
    import sys
    
    # Allow custom log directories via command line
    if len(sys.argv) > 1:
        log_dirs = sys.argv[1:]
        main(log_dirs)
    else:
        main()
