import os
import sys
import torch
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix, classification_report
import argparse

# Add current directory to Python path
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from config_loader import ConfigLoader
from model import SegmentationLightningModel
from dataset import SegmentationDataLoader
import pytorch_lightning as pl

class Evaluator:
    """Evaluator class for model evaluation"""
    
    def __init__(self, config):
        self.config = config
        self.device = torch.device(config.DEVICE)
        
        # Create necessary directories
        config.create_directories()
        
        # Set random seed
        torch.manual_seed(42)
        np.random.seed(42)
    
    def load_best_model(self):
        """Load best trained model"""
        print("Loading best trained model...")
        
        # Find the best checkpoint
        checkpoint_files = [f for f in os.listdir(self.config.CHECKPOINT_DIR) 
                          if f.endswith('.ckpt') and 'best' in f]
        
        if not checkpoint_files:
            # If no 'best' checkpoint, try to find the latest or any checkpoint
            checkpoint_files = [f for f in os.listdir(self.config.CHECKPOINT_DIR) 
                              if f.endswith('.ckpt')]
            
        if not checkpoint_files:
            print("Error: No checkpoint files found")
            return None
            
        # Use the first checkpoint found
        checkpoint_path = os.path.join(self.config.CHECKPOINT_DIR, checkpoint_files[0])
        
        try:
            # Load model using PyTorch Lightning
            model = SegmentationLightningModel.load_from_checkpoint(
                checkpoint_path, config=self.config
            )
            print(f"Best model loaded successfully: {checkpoint_path}")
            
            # Set model to evaluation mode
            model.eval()
            return model
        except Exception as e:
            print(f"Error loading model: {e}")
            return None

    def evaluate_model(self):
        """Evaluate model performance"""
        print("Evaluating model performance...")
        
        # Load model
        model = self.load_best_model()
        if model is None:
            return None
        
        # Validate dataset paths
        try:
            self.config.DATASET_CONFIG.validate_paths()
            print(f"Dataset paths validated successfully")
        except FileNotFoundError as e:
            print(f"Error: {e}")
            print(f"Please ensure the dataset is properly set up at: {self.config.DATASET_CONFIG.DATA_DIR}")
            return None
        
        # Load validation data
        try:
            data_loader = SegmentationDataLoader(self.config)
            val_loader = data_loader.get_test_loader()
            print(f"Evaluating on {len(val_loader.dataset)} validation samples...")
        except Exception as e:
            print(f"Error: Cannot load validation data: {e}")
            return None
        
        # Use PyTorch Lightning trainer for evaluation
        trainer = pl.Trainer(
            accelerator='gpu' if torch.cuda.is_available() else 'cpu',
            devices=1,
            logger=False,
            enable_checkpointing=False,
            enable_progress_bar=True,
            enable_model_summary=False
        )
        
        # Run evaluation
        results = trainer.test(model, val_loader)
        
        # Manual evaluation for detailed metrics
        model.eval()
        total_loss = 0.0
        correct_pixels = 0
        total_pixels = 0
        
        # Store predictions and targets for detailed analysis
        all_predictions = []
        all_targets = []
        
        with torch.no_grad():
            for batch_idx, (images, masks) in enumerate(val_loader):
                images = images.to(self.device)
                masks = masks.to(self.device)
                
                # Forward pass using the model's forward method
                outputs = model(images)
                
                # Calculate loss
                loss = torch.nn.functional.cross_entropy(outputs, masks, ignore_index=255)
                total_loss += loss.item()
                
                # Get predictions
                _, predicted = torch.max(outputs.data, 1)
                
                # Calculate accuracy
                correct_pixels += (predicted == masks).sum().item()
                total_pixels += masks.numel()
                
                # Store for detailed analysis
                all_predictions.extend(predicted.cpu().numpy().flatten())
                all_targets.extend(masks.cpu().numpy().flatten())
                
                if batch_idx % 10 == 0:
                    print(f"Batch {batch_idx}/{len(val_loader)}, Loss: {loss.item():.4f}")
        
        # Calculate metrics
        avg_loss = total_loss / len(val_loader)
        accuracy = correct_pixels / total_pixels
        
        print(f"\nEvaluation Results:")
        print(f"Average Loss: {avg_loss:.4f}")
        print(f"Pixel Accuracy: {accuracy:.4f}")
        
        # Generate confusion matrix
        cm = confusion_matrix(all_targets, all_predictions)
        
        # Calculate IoU for each class
        iou_scores = []
        for i in range(self.config.NUM_CLASSES):
            intersection = np.logical_and(all_targets == i, all_predictions == i).sum()
            union = np.logical_or(all_targets == i, all_predictions == i).sum()
            iou = intersection / union if union > 0 else 0
            iou_scores.append(iou)
        
        mean_iou = np.mean(iou_scores)
        print(f"Mean IoU: {mean_iou:.4f}")
        
        # Generate classification report
        print("\nClassification Report:")
        print(classification_report(all_targets, all_predictions, 
                                   target_names=[f"Class {i}" for i in range(self.config.NUM_CLASSES)]))
        
        return {
            'loss': avg_loss,
            'accuracy': accuracy,
            'mean_iou': mean_iou,
            'iou_scores': iou_scores,
            'confusion_matrix': cm
        }

    def visualize_predictions(self, num_samples=5):
        """Visualize model predictions"""
        print("Generating prediction visualizations...")
        
        # Load model
        model = self.load_best_model()
        if model is None:
            return
        
        # Load validation data
        try:
            data_loader = SegmentationDataLoader(self.config)
            val_loader = data_loader.get_test_loader()
        except Exception as e:
            print(f"Error: Cannot load validation data: {e}")
            return
        
        # Get sample data
        samples = []
        for images, masks in val_loader:
            samples.append((images, masks))
            if len(samples) >= num_samples:
                break
        
        # Create visualization
        fig, axes = plt.subplots(num_samples, 3, figsize=(15, 5*num_samples))
        
        for i, (images, masks) in enumerate(samples):
            if i >= num_samples:
                break
                
            # Get single sample
            image = images[0].unsqueeze(0).to(self.device)
            mask = masks[0].cpu().numpy()
            
            # Prediction
        with torch.no_grad():
            output = model(image)
            pred_mask = torch.argmax(output, dim=1).squeeze().cpu().numpy()
            
            # Original image
            original_image = image.squeeze().permute(1, 2, 0).cpu().numpy()
            original_image = np.clip(original_image * 0.5 + 0.5, 0, 1)  # Denormalize
            
            # Plot
            if num_samples == 1:
                ax_img, ax_gt, ax_pred = axes
            else:
                ax_img, ax_gt, ax_pred = axes[i]
            
            ax_img.imshow(original_image)
            ax_img.set_title(f"Sample {i+1} - Original Image")
            ax_img.axis('off')
            
            ax_gt.imshow(mask, cmap='tab20')
            ax_gt.set_title(f"Sample {i+1} - Ground Truth")
            ax_gt.axis('off')
            
            ax_pred.imshow(pred_mask, cmap='tab20')
            ax_pred.set_title(f"Sample {i+1} - Prediction")
            ax_pred.axis('off')
        
        plt.tight_layout()
        vis_path = os.path.join(self.config.LOG_DIR, 'evaluation_visualization.png')
        plt.savefig(vis_path, dpi=300, bbox_inches='tight')
        plt.close()
        print(f"Visualization saved: {vis_path}")

    def generate_class_distribution(self):
        """Generate class distribution analysis"""
        print("Analyzing class distribution...")
        
        # Load validation data
        try:
            data_loader = SegmentationDataLoader(self.config)
            val_loader = data_loader.get_test_loader()
        except Exception as e:
            print(f"Error: Cannot load validation data: {e}")
            return None
        
        # Collect class distribution
        class_counts = np.zeros(self.config.NUM_CLASSES)
        
        for _, masks in val_loader:
            unique, counts = np.unique(masks.numpy(), return_counts=True)
            for cls, count in zip(unique, counts):
                if cls < self.config.NUM_CLASSES:
                    class_counts[int(cls)] += count
        
        # Plot distribution
        plt.figure(figsize=(12, 6))
        plt.bar(range(self.config.NUM_CLASSES), class_counts)
        plt.xlabel('Class ID')
        plt.ylabel('Pixel Count')
        plt.title('Class Distribution in Validation Set')
        plt.xticks(range(self.config.NUM_CLASSES), [f"{i}" for i in range(self.config.NUM_CLASSES)], rotation=45)
        plt.tight_layout()
        dist_path = os.path.join(self.config.LOG_DIR, 'class_distribution.png')
        plt.savefig(dist_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"Total pixels: {class_counts.sum():,}")
        print(f"Most frequent class: {class_counts.argmax()} ({class_counts.max():,} pixels)")
        print(f"Least frequent class: {class_counts.argmin()} ({class_counts.min():,} pixels)")
        
        return class_counts

    def evaluate(self, evaluate=True, visualize=True, distribution=True):
        """Execute evaluation"""
        print("=" * 60)
        print(f"Starting {self.config.MODEL_NAME} model evaluation")
        print("=" * 60)
        
        results = {}
        
        if evaluate:
            print("\n1. Running model evaluation...")
            results['evaluation'] = self.evaluate_model()
        
        if visualize:
            print("\n2. Generating prediction visualizations...")
            self.visualize_predictions()
        
        if distribution:
            print("\n3. Analyzing class distribution...")
            results['distribution'] = self.generate_class_distribution()
        
        print("\nEvaluation completed!")
        print("=" * 60)
        
        return results

def main():
    """Main function"""
    parser = argparse.ArgumentParser(description="Model evaluation")
    parser.add_argument("--config", type=str, required=True, help="Configuration file path")
    parser.add_argument("--evaluate", action="store_true", help="Run model evaluation")
    parser.add_argument("--visualize", action="store_true", help="Visualize predictions")
    parser.add_argument("--distribution", action="store_true", help="Analyze class distribution")
    parser.add_argument("--all", action="store_true", help="Run all evaluation tasks")
    
    args = parser.parse_args()
    
    if not any([args.evaluate, args.visualize, args.distribution, args.all]):
        print("Please specify at least one evaluation task")
        return
    
    # Load configuration
    config = ConfigLoader.load_config(args.config)
    
    # Create evaluator
    evaluator = Evaluator(config)
    
    # Execute evaluation
    evaluator.evaluate(
        evaluate=args.evaluate or args.all,
        visualize=args.visualize or args.all,
        distribution=args.distribution or args.all
    )

if __name__ == "__main__":
    main()