import torch
import numpy as np
import time
import csv
import datetime
import tracemalloc
import gc
from pathlib import Path
from tqdm import tqdm
from executorch.exir import to_edge
from executorch.runtime import Runtime
from sklearn.metrics import roc_curve

# Import your model definitions and data preparation functions
from process_data2 import prepare_dataloaders
from model2 import CNN, DeepCNN, MediumCNN, CNN_LSTM, CNN_LSTM_Attention


def export_model_to_executorch(model, example_input, output_path="model.pte"):
    """Export PyTorch model to ExecuTorch format"""
    print(f"Exporting model to ExecuTorch...")
    model.eval()

    try:
        print(f"Using export input shape: {example_input.shape}")

        # Option 1: Use default settings (safer)
        exported_program = torch.export.export(model, (example_input,))

        # Option 2: With dynamic shapes (use if needed)
        # exported_program = torch.export.export(
        #     model,
        #     (example_input,),
        #     dynamic_shapes={"x": {0: torch.export.Dim("batch_size")}}
        # )

        # Convert to Edge IR for mobile deployment
        edge_program = to_edge(exported_program)

        # Lower the model to be compatible with ExecuTorch
        executorch_program = edge_program.to_executorch()

        # Save the model
        with open(output_path, "wb") as f:
            f.write(executorch_program.buffer)

        print(f"✅ Model exported successfully to {output_path}")
        return True
    except Exception as e:
        print(f"❌ Export failed: {str(e)}")
        return False


def benchmark_speed(pytorch_model, executorch_path, input_data, num_runs=50, batch_size=32):
    """Benchmark execution speed with proper batch processing"""
    print(f"Benchmarking speed over {num_runs} runs with batch size {batch_size}...")

    # Load ExecuTorch model
    runtime = Runtime.get()
    program = runtime.load_program(executorch_path)
    executorch_method = program.load_method("forward")

    # Use the full batch for both models
    full_batch = input_data[:batch_size]

    # Warm-up runs
    for _ in range(5):
        with torch.no_grad():
            _ = pytorch_model(full_batch)
        _ = executorch_method.execute([full_batch])

    # PyTorch benchmark
    pytorch_times = []
    for _ in range(num_runs):
        start = time.time()
        with torch.no_grad():
            _ = pytorch_model(full_batch)
        pytorch_times.append(time.time() - start)

    # ExecuTorch benchmark - full batch processing
    executorch_times = []
    for _ in range(num_runs):
        start = time.time()
        _ = executorch_method.execute([full_batch])
        executorch_times.append(time.time() - start)

    # Calculate per-batch and per-sample times
    avg_pytorch_batch = sum(pytorch_times) / len(pytorch_times) * 1000  # ms per batch
    avg_executorch_batch = sum(executorch_times) / len(executorch_times) * 1000  # ms per batch

    avg_pytorch_sample = avg_pytorch_batch / batch_size  # ms per sample
    avg_executorch_sample = avg_executorch_batch / batch_size  # ms per sample

    speedup = avg_pytorch_sample / avg_executorch_sample

    print(f"PyTorch average: {avg_pytorch_sample:.2f} ms/sample | {avg_pytorch_batch:.2f} ms/batch")
    print(f"ExecuTorch average: {avg_executorch_sample:.2f} ms/sample | {avg_executorch_batch:.2f} ms/batch")
    print(f"Speedup: {speedup:.2f}x")

    return {
        "pytorch_avg_ms": avg_pytorch_sample,
        "executorch_avg_ms": avg_executorch_sample,
        "pytorch_batch_ms": avg_pytorch_batch,
        "executorch_batch_ms": avg_executorch_batch,
        "speedup": speedup,
        "batch_size": batch_size
    }


def calculate_eer(model, dataloader, device):
    """Calculate Equal Error Rate for a PyTorch model"""
    model.eval()
    all_scores = []
    all_labels = []

    with torch.no_grad():
        for inputs, labels in tqdm(dataloader, desc="Calculating PyTorch EER"):
            inputs = inputs.to(device)
            labels = labels.to(device)
            batch_size = inputs.size(0)
            true_classes = torch.argmax(labels, dim=1).cpu().numpy()

            # Get model outputs
            outputs = model(inputs)

            # Convert to probabilities
            probabilities = torch.softmax(outputs, dim=1).cpu().numpy()

            # For each sample, store scores for all classes
            for i in range(batch_size):
                for class_idx in range(probabilities.shape[1]):
                    all_scores.append(probabilities[i, class_idx])
                    # 1 if it's the true class, 0 otherwise
                    all_labels.append(1 if class_idx == true_classes[i] else 0)

    all_scores = np.array(all_scores)
    all_labels = np.array(all_labels)

    # Calculate FPR and TPR at different thresholds
    fpr, tpr, thresholds = roc_curve(all_labels, all_scores)

    # Find the threshold where FPR = 1 - TPR
    eer_idx = np.nanargmin(np.absolute(fpr - (1 - tpr)))
    eer = fpr[eer_idx]
    eer_threshold = thresholds[eer_idx]

    return eer, eer_threshold


def calculate_eer_executorch(executorch_path, dataloader):
    """Calculate EER for ExecuTorch model using full batches"""
    all_scores = []
    all_labels = []
    batch_size = None

    # Load ExecuTorch model
    runtime = Runtime.get()
    program = runtime.load_program(executorch_path)
    executorch_method = program.load_method("forward")

    # Determine batch size
    for inputs, _ in dataloader:
        batch_size = inputs.shape[0]
        break

    for inputs, labels in tqdm(dataloader, desc="Calculating ExecuTorch EER"):
        # Skip batches with different size
        if inputs.shape[0] != batch_size:
            continue

        true_classes = torch.argmax(labels, dim=1).cpu().numpy()

        # Process full batch at once
        output = executorch_method.execute([inputs])[0]
        probs = torch.softmax(output, dim=1).cpu().numpy()

        # Store all scores and labels
        for i in range(batch_size):
            for class_idx in range(probs.shape[1]):
                all_scores.append(probs[i, class_idx])
                all_labels.append(1 if class_idx == true_classes[i] else 0)

    all_scores = np.array(all_scores)
    all_labels = np.array(all_labels)

    # Calculate FPR and TPR at different thresholds
    fpr, tpr, thresholds = roc_curve(all_labels, all_scores)

    # Find the threshold where FPR = 1 - TPR
    eer_idx = np.nanargmin(np.absolute(fpr - (1 - tpr)))
    eer = fpr[eer_idx]

    return eer


def calculate_per_user_eer(model, dataloader, device, num_classes):
    """Calculate per-user Equal Error Rate for a PyTorch model"""
    model.eval()
    all_true_classes = []
    all_probabilities = []

    with torch.no_grad():
        for inputs, labels in tqdm(dataloader, desc="Calculating PyTorch per-user EER"):
            inputs = inputs.to(device)
            labels = labels.to(device)

            # Get ground truth classes
            true_classes = torch.argmax(labels, dim=1).cpu().numpy()
            all_true_classes.extend(true_classes)

            # Get model outputs and convert to probabilities
            outputs = model(inputs)
            probabilities = torch.softmax(outputs, dim=1).cpu().numpy()
            all_probabilities.append(probabilities)

    # Convert to numpy arrays
    all_true_classes = np.array(all_true_classes)
    all_probabilities = np.vstack(all_probabilities)

    # Calculate EER for each user
    user_eers = {}
    for user_id in range(num_classes):
        # Binary labels for this user (1) vs all others (0)
        binary_labels = (all_true_classes == user_id).astype(int)
        # Scores for this user
        user_scores = all_probabilities[:, user_id]

        # Calculate ROC curve
        fpr, tpr, thresholds = roc_curve(binary_labels, user_scores)
        fnr = 1 - tpr

        # Find threshold where FPR=FNR
        eer_idx = np.nanargmin(np.absolute(fpr - fnr))
        eer = (fpr[eer_idx] + fnr[eer_idx]) / 2.0
        user_eers[user_id] = eer

    # Calculate average EER
    avg_eer = sum(user_eers.values()) / len(user_eers)

    return avg_eer, user_eers


def calculate_per_user_eer_executorch(executorch_path, dataloader, num_classes):
    """Calculate per-user EER for ExecuTorch model using full batches"""
    all_true_classes = []
    all_probabilities = []
    batch_size = None

    # Load ExecuTorch model
    runtime = Runtime.get()
    program = runtime.load_program(executorch_path)
    executorch_method = program.load_method("forward")

    # Determine batch size
    for inputs, _ in dataloader:
        batch_size = inputs.shape[0]
        break

    for inputs, labels in tqdm(dataloader, desc="Calculating ExecuTorch per-user EER"):
        # Skip batches with different size
        if inputs.shape[0] != batch_size:
            continue

        true_classes = torch.argmax(labels, dim=1).cpu().numpy()
        all_true_classes.extend(true_classes)

        # Process full batch at once
        output = executorch_method.execute([inputs])[0]
        probs = torch.softmax(output, dim=1).cpu().numpy()
        all_probabilities.append(probs)

    # Convert to numpy arrays
    all_true_classes = np.array(all_true_classes)
    all_probabilities = np.vstack(all_probabilities)

    # Calculate EER for each user
    user_eers = {}
    for user_id in range(num_classes):
        binary_labels = (all_true_classes == user_id).astype(int)
        user_scores = all_probabilities[:, user_id]

        # Calculate ROC curve
        fpr, tpr, thresholds = roc_curve(binary_labels, user_scores)
        fnr = 1 - tpr

        # Find threshold where FPR=FNR
        eer_idx = np.nanargmin(np.absolute(fpr - fnr))
        eer = (fpr[eer_idx] + fnr[eer_idx]) / 2.0
        user_eers[user_id] = eer

    # Calculate average EER
    avg_eer = sum(user_eers.values()) / len(user_eers)

    return avg_eer


def compare_model_accuracy(pytorch_model, executorch_path, test_loader, num_classes):
    """Compare accuracy and EER metrics between PyTorch and ExecuTorch models"""
    print("Comparing model accuracy and EER metrics...")
    device = torch.device("cpu")
    batch_size = None

    # Load ExecuTorch model
    runtime = Runtime.get()
    program = runtime.load_program(executorch_path)
    executorch_method = program.load_method("forward")

    # Get the batch size we used for export
    for inputs, _ in test_loader:
        batch_size = inputs.shape[0]
        break

    # For tracking results
    pytorch_correct = 0
    pytorch_total = 0
    executorch_correct = 0
    executorch_total = 0
    pytorch_preds = []
    executorch_preds = []
    all_true_labels = []

    # Process all batches
    with torch.no_grad():
        for inputs, labels in tqdm(test_loader, desc="Evaluating PyTorch model"):
            # Skip incomplete batches
            if inputs.shape[0] != batch_size:
                continue

            true_labels = torch.argmax(labels, dim=1)
            all_true_labels.extend(true_labels.cpu().numpy())

            # PyTorch model evaluation
            outputs = pytorch_model(inputs)
            _, predicted = torch.max(outputs, 1)
            pytorch_preds.extend(predicted.cpu().numpy())
            pytorch_total += true_labels.size(0)
            pytorch_correct += (predicted == true_labels).sum().item()

    # Process same batches for ExecuTorch
    for inputs, labels in tqdm(test_loader, desc="Evaluating ExecuTorch model"):
        # Skip incomplete batches - must use exact same shape as during export
        if inputs.shape[0] != batch_size:
            continue

        # ExecuTorch model evaluation
        output = executorch_method.execute([inputs])[0]
        _, predicted = torch.max(output, 1)
        true_labels = torch.argmax(labels, dim=1)
        executorch_preds.extend(predicted.cpu().numpy())
        executorch_total += true_labels.size(0)
        executorch_correct += (predicted == true_labels).sum().item()

    # Calculate metrics
    pytorch_accuracy = 100 * pytorch_correct / pytorch_total
    executorch_accuracy = 100 * executorch_correct / executorch_total

    # Calculate model agreement
    agreement = sum(1 for p1, p2 in zip(pytorch_preds, executorch_preds) if p1 == p2)
    model_agreement = 100 * agreement / len(pytorch_preds)

    # Calculate accuracy difference
    accuracy_diff = executorch_accuracy - pytorch_accuracy

    print(f"PyTorch Accuracy: {pytorch_accuracy:.2f}%")
    print(f"ExecuTorch Accuracy: {executorch_accuracy:.2f}%")
    print(f"Accuracy Difference: {accuracy_diff:.2f}%")
    print(f"Model Agreement: {model_agreement:.2f}%")

    # Calculate EER metrics
    print("Calculating PyTorch EER...")
    pytorch_global_eer, _ = calculate_eer(pytorch_model, test_loader, device)

    print("Calculating ExecuTorch EER...")
    executorch_global_eer = calculate_eer_executorch(executorch_path, test_loader)

    # Calculate per-user EER
    print("Calculating PyTorch per-user EER...")
    pytorch_avg_eer, _ = calculate_per_user_eer(pytorch_model, test_loader, device, num_classes)

    print("Calculating ExecuTorch per-user EER...")
    executorch_avg_eer = calculate_per_user_eer_executorch(executorch_path, test_loader, num_classes)

    print(f"PyTorch Global EER: {pytorch_global_eer:.4f}")
    print(f"ExecuTorch Global EER: {executorch_global_eer:.4f}")
    print(f"PyTorch Avg Per-User EER: {pytorch_avg_eer:.4f}")
    print(f"ExecuTorch Avg Per-User EER: {executorch_avg_eer:.4f}")

    return {
        "pytorch_accuracy": pytorch_accuracy,
        "executorch_accuracy": executorch_accuracy,
        "accuracy_diff": accuracy_diff,
        "model_agreement": model_agreement,
        "pytorch_global_eer": pytorch_global_eer,
        "executorch_global_eer": executorch_global_eer,
        "pytorch_avg_eer": pytorch_avg_eer,
        "executorch_avg_eer": executorch_avg_eer
    }


def measure_memory_usage(pytorch_model, executorch_path, input_data, batch_size=32):
    """Compare memory usage between PyTorch and ExecuTorch models"""
    import psutil
    import os
    print("Measuring memory footprint...")
    process = psutil.Process(os.getpid())

    # Measure model sizes
    pytorch_model_size = sum(p.nelement() * p.element_size() for p in pytorch_model.parameters()) / (1024 * 1024)  # MB
    executorch_file_size = Path(executorch_path).stat().st_size / (1024 * 1024)  # MB

    # Create test batch
    test_batch = input_data[:batch_size]

    # Force garbage collection
    gc.collect()

    # Measure PyTorch memory
    baseline_memory = process.memory_info().rss / (1024 * 1024)
    with torch.no_grad():
        _ = pytorch_model(test_batch)
    pytorch_memory = process.memory_info().rss / (1024 * 1024) - baseline_memory

    # Force garbage collection again
    gc.collect()

    # Measure ExecuTorch memory
    baseline_memory = process.memory_info().rss / (1024 * 1024)
    runtime = Runtime.get()
    program = runtime.load_program(executorch_path)
    executorch_method = program.load_method("forward")
    _ = executorch_method.execute([test_batch])
    executorch_memory = process.memory_info().rss / (1024 * 1024) - baseline_memory

    print(f"PyTorch model size: {pytorch_model_size:.2f} MB")
    print(f"ExecuTorch model size: {executorch_file_size:.2f} MB")
    print(f"PyTorch runtime memory: {pytorch_memory:.2f} MB")
    print(f"ExecuTorch runtime memory: {executorch_memory:.2f} MB")

    return {
        "pytorch_model_size": pytorch_model_size,
        "executorch_model_size": executorch_file_size,
        "pytorch_runtime_memory": pytorch_memory,
        "executorch_runtime_memory": executorch_memory
    }

def log_to_csv(model_name, results):
    """Log benchmark results to CSV"""
    timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    csv_path = Path("../logs/executorch_benchmark.csv")
    file_exists = csv_path.exists()

    with open(csv_path, mode='a', newline='') as file:
        writer = csv.writer(file)

        # Write header if file doesn't exist
        if not file_exists:
            writer.writerow([
                "Timestamp", "Model", "PyTorch Accuracy (%)", "ExecuTorch Accuracy (%)",
                "Accuracy Diff (%)", "Model Agreement (%)",
                "PyTorch Global EER", "ExecuTorch Global EER",
                "PyTorch Avg User EER", "ExecuTorch Avg User EER",
                "PyTorch Inference (ms)", "ExecuTorch Inference (ms)", "Speedup",
                "PyTorch Model Size (MB)", "ExecuTorch Model Size (MB)",
                "PyTorch Runtime Memory (MB)", "ExecuTorch Runtime Memory (MB)"
            ])

        # Write data
        if "error" in results:
            row = [timestamp, model_name] + ["Error"] * 15
        else:
            row = [
                timestamp, model_name,
                f"{results['accuracy']['pytorch_accuracy']:.2f}",
                f"{results['accuracy']['executorch_accuracy']:.2f}",
                f"{abs(results['accuracy']['accuracy_diff']):.2f}",
                f"{results['accuracy']['model_agreement']:.2f}",
                f"{results['accuracy']['pytorch_global_eer']:.4f}",
                f"{results['accuracy']['executorch_global_eer']:.4f}",
                f"{results['accuracy']['pytorch_avg_eer']:.4f}",
                f"{results['accuracy']['executorch_avg_eer']:.4f}",
                f"{results['speed']['pytorch_avg_ms']:.2f}",
                f"{results['speed']['executorch_avg_ms']:.2f}",
                f"{results['speed']['speedup']:.2f}",
                f"{results['memory']['pytorch_model_size']:.2f}",
                f"{results['memory']['executorch_model_size']:.2f}",
                f"{results['memory']['pytorch_runtime_memory']:.2f}",
                f"{results['memory']['executorch_runtime_memory']:.2f}"
            ]
        writer.writerow(row)

    print(f"Benchmark results logged to {csv_path}")


def get_model_class(model_name):
    """Get model class based on model name"""
    model_classes = {
        "cnn": CNN,
        "mediumcnn": MediumCNN,
        "deepcnn": DeepCNN,
        "cnn_lstm": CNN_LSTM,
        "cnn_lstm_attention": CNN_LSTM_Attention
    }

    model_name = model_name.lower().replace("-", "_")

    if model_name not in model_classes:
        raise ValueError(f"Unknown model: {model_name}. Available models: {list(model_classes.keys())}")

    return model_classes[model_name]


def main():
    # Use hardcoded values instead of args parser for simpler testing
    model_name = "DeepCNN"
    model_path = Path("./trained_models/DeepCNN_model.pth")
    runs = 50
    batch_size = 32

    # Print benchmark information
    print("\n" + "="*60)
    print(f"EXECUTORCH MODEL BENCHMARK")
    print(f"Model: {model_name}")
    print(f"Model path: {model_path}")
    print(f"Benchmark runs: {runs}")
    print(f"Batch size: {batch_size}")
    print("="*60 + "\n")

    # Set device to CPU for ExecuTorch compatibility
    device = torch.device("cpu")
    print(f"Using device: {device}")

    # Load data
    train_loader, val_loader, test_loader, num_classes = prepare_dataloaders()
    print(f"Data loaded with {num_classes} classes")

    # Get input shape from a batch
    for inputs, _ in test_loader:
        input_shape = inputs.shape
        example_input = inputs  # Use a full batch
        break
    print(f"Input shape: {input_shape}")

    # Initialize model
    try:
        ModelClass = get_model_class(model_name)
        model = ModelClass(num_classes=num_classes, in_channels=input_shape[-1]).to(device)
        print(f"Model {model_name} initialized with {num_classes} classes")
    except ValueError as e:
        print(f"Error: {str(e)}")
        return

    # Load pre-trained weights
    if model_path.exists():
        try:
            model.load_state_dict(torch.load(model_path, map_location=device))
            print(f"✅ Loaded weights from {model_path}")
        except Exception as e:
            print(f"❌ Failed to load weights: {str(e)}")
            return
    else:
        print(f"❌ Model weights not found at {model_path}")
        return

    # Create results dictionary
    results = {}

    # Export to ExecuTorch
    executorch_path = f"./trained_models/{model_name.lower()}_model.pte"
    export_success = export_model_to_executorch(model, example_input, executorch_path)

    if not export_success:
        print("Export failed, exiting.")
        results["error"] = "Export failed"
        log_to_csv(model_name, results)
        return

    # Benchmark speed
    print("\n--- Speed Benchmarking ---")
    speed_results = benchmark_speed(model, executorch_path, example_input, runs, batch_size)
    results["speed"] = speed_results

    # Measure memory
    print("\n--- Memory Usage ---")
    memory_results = measure_memory_usage(model, executorch_path, example_input, batch_size)
    results["memory"] = memory_results

    # Compare accuracy metrics
    print("\n--- Accuracy Comparison ---")
    accuracy_results = compare_model_accuracy(model, executorch_path, test_loader, num_classes)
    results["accuracy"] = accuracy_results

    # Log results
    log_to_csv(model_name, results)

    # Print final summary
    print("\nBenchmark Summary:")
    print("-" * 60)
    print(f"Model: {model_name}")
    print(f"PyTorch Accuracy: {results['accuracy']['pytorch_accuracy']:.2f}%")
    print(f"ExecuTorch Accuracy: {results['accuracy']['executorch_accuracy']:.2f}%")
    print(f"Accuracy Difference: {abs(results['accuracy']['accuracy_diff']):.2f}%")
    print(f"Model Agreement: {results['accuracy']['model_agreement']:.2f}%")
    print(f"PyTorch Global EER: {results['accuracy']['pytorch_global_eer']:.4f}")
    print(f"ExecuTorch Global EER: {results['accuracy']['executorch_global_eer']:.4f}")
    print(f"PyTorch Inference: {results['speed']['pytorch_avg_ms']:.2f} ms/sample")
    print(f"ExecuTorch Inference: {results['speed']['executorch_avg_ms']:.2f} ms/sample")
    print(f"Speedup Factor: {results['speed']['speedup']:.2f}x")
    print(f"PyTorch Model Size: {results['memory']['pytorch_model_size']:.2f} MB")
    print(f"ExecuTorch Model Size: {results['memory']['executorch_model_size']:.2f} MB")
    print(f"PyTorch Runtime Memory: {results['memory']['pytorch_runtime_memory']:.2f} MB")
    print(f"ExecuTorch Runtime Memory: {results['memory']['executorch_runtime_memory']:.2f} MB")
    print("-" * 60)


if __name__ == "__main__":
    main()