import sys

from sklearn.metrics import roc_curve

from process_data2 import prepare_dataloaders  # updated import
from model2 import CNN, DeepCNN, MediumCNN, CNN_LSTM, CNN_LSTM_Attention, OptimizedCNN_LSTM
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
import torch
import numpy as np

#DATA_PATH = "../balanced_data"
RANDOM_SEED = 42
EPOCHS = 1
#EPOCHS = 10
LEARNING_RATE = 0.001
#LEARNING_RATE = 0.0001

def log_model_details(model, optimizer, epochs, learning_rate):
    print("Model Architecture:")
    print(model)
    print("\nOptimizer Settings:")
    print(optimizer)
    print(f"\nTraining Parameters:\nEpochs: {epochs}\nLearning Rate: {learning_rate}\n")


import csv
import os
import datetime
import inspect
from pathlib import Path


def log_to_csv(model, final_accuracy, eer, avg_per_user_eer, best_accuracy, epochs, learning_rate, dataset="", notes=""):
    """
    Log model performance metrics to a CSV file
    """
    # Get model name and create a string of its architecture
    model_name = model.__class__.__name__
    model_arch = str(model).replace('\n', ' ').replace(',', ';')

    # Get current date and time
    timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")

    # Prepare row data
    row = [
        timestamp,
        f"{model_name} ({dataset})",
        f"{final_accuracy:.2f}",
        f"{eer:.4f}",
        f"{avg_per_user_eer:.4f}",
        f"{best_accuracy:.2f}",
        epochs,
        learning_rate,
        f"{model_arch} {notes}"
    ]

    # Define file path
    csv_path = Path("../logs/model_performance_log.csv")
    file_exists = csv_path.exists()

    # Create/append to CSV
    with open(csv_path, mode='a', newline='') as file:
        writer = csv.writer(file)

        # Write header if file doesn't exist
        if not file_exists:
            writer.writerow([
                "Timestamp",
                "Model",
                "Test Accuracy (%)",
                "Global EER",
                "Avg Per-User EER",
                "Best Val Accuracy (%)",
                "Epochs",
                "Learning Rate",
                "Architecture"
            ])

        # Write the data row
        writer.writerow(row)

    print(f"Results logged to {csv_path}")


def calculate_eer(model, dataloader, device):
    model.eval()
    all_scores = []
    all_labels = []

    with torch.no_grad():
        for inputs, labels in tqdm(dataloader, desc="Calculating EER"):
            inputs = inputs.to(device)
            labels = labels.to(device)
            batch_size = inputs.size(0)
            true_classes = torch.argmax(labels, dim=1).cpu().numpy()

            # Get model outputs
            outputs = model(inputs)

            # Convert to probabilities
            probabilities = torch.softmax(outputs, dim=1).cpu().numpy()

            # For each sample, store scores for all classes
            for i in range(batch_size):
                for class_idx in range(probabilities.shape[1]):
                    all_scores.append(probabilities[i, class_idx])
                    # 1 if it's the true class, 0 otherwise
                    all_labels.append(1 if class_idx == true_classes[i] else 0)

    all_scores = np.array(all_scores)
    all_labels = np.array(all_labels)

    # Calculate FPR and TPR at different thresholds
    fpr, tpr, thresholds = roc_curve(all_labels, all_scores)

    # Find the threshold where FPR = 1 - TPR
    # This is where FAR (false accept rate) = FRR (false reject rate)
    eer_idx = np.nanargmin(np.absolute(fpr - (1 - tpr)))
    eer = fpr[eer_idx]
    eer_threshold = thresholds[eer_idx]

    return eer, eer_threshold


def calculate_per_user_eer(model, dataloader, device, num_classes):
    model.eval()
    all_true_classes = []
    all_probabilities = []

    with torch.no_grad():
        for inputs, labels in tqdm(dataloader, desc="Calculating per-user EER"):
            inputs = inputs.to(device)
            labels = labels.to(device)

            # Get ground truth classes
            true_classes = torch.argmax(labels, dim=1).cpu().numpy()
            all_true_classes.extend(true_classes)

            # Get model outputs and convert to probabilities
            outputs = model(inputs)
            probabilities = torch.softmax(outputs, dim=1).cpu().numpy()
            all_probabilities.append(probabilities)

    # Convert to numpy arrays
    all_true_classes = np.array(all_true_classes)
    all_probabilities = np.vstack(all_probabilities)

    # Calculate EER for each user
    user_eers = {}
    for user_id in range(num_classes):
        # Binary labels for this user (1) vs all others (0)
        binary_labels = (all_true_classes == user_id).astype(int)
        # Scores for this user
        user_scores = all_probabilities[:, user_id]

        # Calculate ROC curve
        fpr, tpr, thresholds = roc_curve(binary_labels, user_scores)
        fnr = 1 - tpr

        # Find threshold where FPR=FNR
        eer_idx = np.nanargmin(np.absolute(fpr - fnr))
        eer = (fpr[eer_idx] + fnr[eer_idx]) / 2.0
        user_eers[user_id] = eer

    # Calculate average EER
    avg_eer = sum(user_eers.values()) / len(user_eers)

    return avg_eer, user_eers

def train_model(model, train_loader, val_loader, device, epochs=EPOCHS, save_model=False):
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=2)

    best_accuracy = 0.0

    log_model_details(model, optimizer, epochs, LEARNING_RATE)

    for epoch in range(epochs):
        # Training phase
        model.train()
        running_loss = 0.0
        correct = 0
        total = 0

        progress_bar = tqdm(train_loader, desc=f"Epoch {epoch + 1}/{epochs}")
        for inputs, labels in progress_bar:
            inputs, labels = inputs.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, torch.argmax(labels, dim=1))
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == torch.argmax(labels, dim=1)).sum().item()

            progress_bar.set_postfix({
                'loss': running_loss / (progress_bar.n + 1),
                'acc': 100 * correct / total
            })

        train_accuracy = 100 * correct / total

        # Validation phase
        model.eval()
        val_loss = 0
        val_correct = 0
        val_total = 0

        with torch.no_grad():
            for inputs, labels in tqdm(val_loader, desc="Validation"):
                inputs, labels = inputs.to(device), labels.to(device)
                outputs = model(inputs)
                loss = criterion(outputs, torch.argmax(labels, dim=1))
                val_loss += loss.item()

                _, predicted = torch.max(outputs.data, 1)
                val_total += labels.size(0)
                val_correct += (predicted == torch.argmax(labels, dim=1)).sum().item()

        val_accuracy = 100 * val_correct / val_total

        scheduler.step(val_accuracy)

        print(f"Epoch {epoch + 1}/{epochs}")
        print(f"Train Loss: {running_loss / len(train_loader):.4f}, Train Accuracy: {train_accuracy:.2f}%")
        print(f"Val Loss: {val_loss / len(val_loader):.4f}, Val Accuracy: {val_accuracy:.2f}%")

        if val_accuracy > best_accuracy:
            best_accuracy = val_accuracy
            if save_model:
                torch.save(model.state_dict(), "./best_user_identification_model.pth")
                print(f"New best model saved with accuracy: {best_accuracy:.2f}%")

    return model, best_accuracy


def evaluate_model(model, test_loader, device, num_classes):
    # Calculate accuracy as before
    model.eval()
    correct = 0
    total = 0

    with torch.no_grad():
        for inputs, labels in tqdm(test_loader, desc="Final Evaluation"):
            inputs, labels = inputs.to(device), labels.to(device)
            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == torch.argmax(labels, dim=1)).sum().item()

    accuracy = 100 * correct / total
    print(f"Final Test Accuracy: {accuracy:.2f}%")

    # Calculate global EER
    eer, threshold = calculate_eer(model, test_loader, device)
    print(f"Global Equal Error Rate (EER): {eer:.4f} at threshold {threshold:.4f}")

    # Calculate per-user EER
    avg_eer, user_eers = calculate_per_user_eer(model, test_loader, device, num_classes)
    print(f"Average per-user EER: {avg_eer:.4f}")

    # Display users with highest EERs (most challenging to identify)
    worst_users = sorted(user_eers.items(), key=lambda x: x[1], reverse=True)[:5]
    print("Users with highest EER (hardest to identify):")
    for user_id, eer_value in worst_users:
        print(f"  User {user_id}: EER = {eer_value:.4f}")

    return accuracy, eer, avg_eer

if __name__ == '__main__':
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    #device = torch.device("cuda" if torch.cuda.is_available() else "mps")
    print(f"Using device {device}")

    train_loader, val_loader, test_loader, num_classes = prepare_dataloaders()

    for inputs, _ in train_loader:
        input_shape = inputs.shape
        break

    print(f"Input shape: {input_shape}")

    #model = CNN(num_classes=num_classes, in_channels=input_shape[-1]).to(device)
    #model = DeepCNN(num_classes=num_classes, in_channels=input_shape[-1]).to(device)
    #model = MediumCNN(num_classes=num_classes, in_channels=input_shape[-1]).to(device)
    #model = CNN_LSTM(num_classes=num_classes, in_channels=input_shape[-1]).to(device)
    #model = OptimizedCNN_LSTM(num_classes=num_classes, in_channels=input_shape[-1]).to(device)
    model = CNN_LSTM_Attention(num_classes=num_classes, in_channels=input_shape[-1]).to(device)
    print(f"Model initialized with {num_classes} classes")

    eval_model = True
    save_model = True


    model, best_accuracy = train_model(model, train_loader, val_loader, device, EPOCHS)

    if save_model:
        models_dir = "./trained_models"
        model_name = model.__class__.__name__
        model_path = os.path.join(models_dir, f"{model_name}_model.pth")
        torch.save(model.state_dict(), model_path)
        print(f"Model saved to {model_path}")

    if eval_model:

        final_accuracy, eer, avg_per_user_eer = evaluate_model(model, test_loader, device, num_classes)

        print(f"Training completed. Best accuracy: {best_accuracy:.2f}%")
        print(f"Final Equal Error Rate (EER): {eer:.4f}")
        print(f"Final Avg Per-User EER: {avg_per_user_eer:.4f}")

        # Log results to CSV
        log_to_csv(model, final_accuracy, eer, avg_per_user_eer, best_accuracy, EPOCHS, LEARNING_RATE, dataset="EMOGNITION",
                   )