"""
Logging utilities for training and evaluation
"""
import os
import logging
import csv
import time
from datetime import datetime
import json
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np


class TrainingLogger:
    """
    Logger for training process
    """

    def __init__(self, log_dir, run_id=None, config=None):
        """
        Initialize the training logger

        Args:
            log_dir (str): Directory to save logs
            run_id (str, optional): Unique identifier for this run
            config (dict, optional): Configuration parameters to log
        """
        self.log_dir = log_dir
        self.run_id = run_id or datetime.now().strftime("%Y%m%d_%H%M%S")

        # Create log directory
        os.makedirs(log_dir, exist_ok=True)

        # Setup file paths
        self.log_file = os.path.join(log_dir, f"training_{self.run_id}.log")
        self.csv_file = os.path.join(log_dir, f"metrics_{self.run_id}.csv")
        self.config_file = os.path.join(log_dir, f"config_{self.run_id}.json")

        # Setup logger
        self.logger = logging.getLogger(f"training_{self.run_id}")
        self.logger.setLevel(logging.INFO)

        # File handler
        file_handler = logging.FileHandler(self.log_file)
        file_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
        file_handler.setFormatter(file_formatter)
        self.logger.addHandler(file_handler)

        # Console handler
        console_handler = logging.StreamHandler()
        console_formatter = logging.Formatter('%(asctime)s - %(message)s')
        console_handler.setFormatter(console_formatter)
        self.logger.addHandler(console_handler)

        # CSV writer
        self.csv_header = ['epoch', 'phase', 'loss', 'accuracy', 'macro_f1', 'kappa', 'learning_rate', 'time']
        self.csv_file_handle = open(self.csv_file, 'w', newline='')
        self.csv_writer = csv.writer(self.csv_file_handle)
        self.csv_writer.writerow(self.csv_header)

        # Save configuration
        if config:
            with open(self.config_file, 'w') as f:
                json.dump(config, f, indent=4)

        # Metrics history
        self.history = {
            'train_loss': [],
            'train_acc': [],
            'train_f1': [],
            'train_kappa': [],
            'val_loss': [],
            'val_acc': [],
            'val_f1': [],
            'val_kappa': [],
            'test_loss': [],
            'test_acc': [],
            'test_f1': [],
            'test_kappa': [],
            'lr': []
        }

        self.best_metrics = {
            'val_acc': 0.0,
            'val_f1': 0.0,
            'val_loss': float('inf'),
            'val_kappa': 0.0,
            'epoch': 0
        }

        self.start_time = time.time()

        # Log start info
        self.logger.info(f"Starting training run {self.run_id}")
        if config:
            self.logger.info(f"Configuration: {json.dumps(config, indent=2)}")

    def log_epoch(self, epoch, phase, metrics, learning_rate=None):
        """
        Log metrics for an epoch

        Args:
            epoch (int): Current epoch
            phase (str): Training phase ('train', 'val', 'test')
            metrics (dict): Dictionary of metrics
            learning_rate (float, optional): Current learning rate
        """
        # Extract metrics
        loss = metrics.get('loss', float('nan'))
        accuracy = metrics.get('accuracy', float('nan'))
        macro_f1 = metrics.get('macro_f1', float('nan'))
        kappa = metrics.get('kappa', float('nan'))

        # Calculate elapsed time
        elapsed = time.time() - self.start_time

        # Write to CSV
        self.csv_writer.writerow([
            epoch, phase, loss, accuracy, macro_f1, kappa, learning_rate, elapsed
        ])
        self.csv_file_handle.flush()

        # Update history
        if phase == 'train':
            self.history['train_loss'].append(loss)
            self.history['train_acc'].append(accuracy)
            self.history['train_f1'].append(macro_f1)
            self.history['train_kappa'].append(kappa)
            if learning_rate:
                self.history['lr'].append(learning_rate)
        elif phase == 'val':
            self.history['val_loss'].append(loss)
            self.history['val_acc'].append(accuracy)
            self.history['val_f1'].append(macro_f1)
            self.history['val_kappa'].append(kappa)

            # Check for best model
            if macro_f1 > self.best_metrics['val_f1']:
                self.best_metrics['val_f1'] = macro_f1
                self.best_metrics['val_acc'] = accuracy
                self.best_metrics['val_loss'] = loss
                self.best_metrics['val_kappa'] = kappa
                self.best_metrics['epoch'] = epoch
        elif phase == 'test':
            self.history['test_loss'].append(loss)
            self.history['test_acc'].append(accuracy)
            self.history['test_f1'].append(macro_f1)
            self.history['test_kappa'].append(kappa)

        # Log to console
        self.logger.info(
            f"Epoch {epoch} [{phase}] - "
            f"Loss: {loss:.4f}, "
            f"Accuracy: {accuracy:.4f}, "
            f"Macro F1: {macro_f1:.4f}, "
            f"Kappa: {kappa:.4f}"
            + (f", LR: {learning_rate:.6f}" if learning_rate else "")
        )

    def log_message(self, message, level='info'):
        """
        Log a message

        Args:
            message (str): Message to log
            level (str): Log level ('info', 'warning', 'error', 'debug')
        """
        if level == 'info':
            self.logger.info(message)
        elif level == 'warning':
            self.logger.warning(message)
        elif level == 'error':
            self.logger.error(message)
        elif level == 'debug':
            self.logger.debug(message)

    def log_best_metrics(self):
        """Log the best metrics achieved during training"""
        self.logger.info(
            f"Best model at epoch {self.best_metrics['epoch']} - "
            f"Validation Accuracy: {self.best_metrics['val_acc']:.4f}, "
            f"Validation Macro F1: {self.best_metrics['val_f1']:.4f}, "
            f"Validation Kappa: {self.best_metrics['val_kappa']:.4f}, "
            f"Validation Loss: {self.best_metrics['val_loss']:.4f}"
        )

    def plot_training_history(self, save_path=None):
        """
        Plot training history

        Args:
            save_path (str, optional): Path to save the figure
        """
        # Create figure
        plt.figure(figsize=(15, 10))

        # Plot accuracy
        plt.subplot(2, 2, 1)
        plt.plot(self.history['train_acc'], label='Train')
        plt.plot(self.history['val_acc'], label='Validation')
        plt.title('Model Accuracy')
        plt.ylabel('Accuracy')
        plt.xlabel('Epoch')
        plt.legend()

        # Plot loss
        plt.subplot(2, 2, 2)
        plt.plot(self.history['train_loss'], label='Train')
        plt.plot(self.history['val_loss'], label='Validation')
        plt.title('Model Loss')
        plt.ylabel('Loss')
        plt.xlabel('Epoch')
        plt.legend()

        # Plot Macro F1
        plt.subplot(2, 2, 3)
        plt.plot(self.history['train_f1'], label='Train')
        plt.plot(self.history['val_f1'], label='Validation')
        plt.title('Macro F1-Score')
        plt.ylabel('F1-Score')
        plt.xlabel('Epoch')
        plt.legend()

        # Plot Kappa
        plt.subplot(2, 2, 4)
        plt.plot(self.history['train_kappa'], label='Train')
        plt.plot(self.history['val_kappa'], label='Validation')
        plt.title('Cohen\'s Kappa')
        plt.ylabel('Kappa')
        plt.xlabel('Epoch')
        plt.legend()

        plt.tight_layout()

        # Save or show
        if save_path:
            plt.savefig(save_path)
            plt.close()
        else:
            plt.show()

    def save_history_to_csv(self, path=None):
        """
        Save training history to CSV

        Args:
            path (str, optional): Path to save the CSV file
        """
        if path is None:
            path = os.path.join(self.log_dir, f"history_{self.run_id}.csv")

        # Convert history to DataFrame
        history_df = pd.DataFrame({
            'epoch': np.arange(1, len(self.history['train_loss']) + 1),
            'train_loss': self.history['train_loss'],
            'val_loss': self.history['val_loss'],
            'train_acc': self.history['train_acc'],
            'val_acc': self.history['val_acc'],
            'train_f1': self.history['train_f1'],
            'val_f1': self.history['val_f1'],
            'train_kappa': self.history['train_kappa'],
            'val_kappa': self.history['val_kappa'],
        })

        # Add learning rate if available
        if self.history['lr']:
            history_df['learning_rate'] = self.history['lr']

        # Save to CSV
        history_df.to_csv(path, index=False)

    def close(self):
        """Close the logger"""
        self.csv_file_handle.close()

        # Remove handlers to avoid duplicate logging
        for handler in self.logger.handlers[:]:
            handler.close()
            self.logger.removeHandler(handler)


class ResultsLogger:
    """
    Logger for evaluation results
    """

    def __init__(self, results_dir, run_id=None):
        """
        Initialize the results logger

        Args:
            results_dir (str): Directory to save results
            run_id (str, optional): Unique identifier for this run
        """
        self.results_dir = results_dir
        self.run_id = run_id or datetime.now().strftime("%Y%m%d_%H%M%S")

        # Create results directory
        os.makedirs(results_dir, exist_ok=True)

        # Create run directory
        self.run_dir = os.path.join(results_dir, self.run_id)
        os.makedirs(self.run_dir, exist_ok=True)

    def save_fold_results(self, fold, report, y_true, y_pred, logits=None):
        """
        Save results for a fold

        Args:
            fold (int): Fold number
            report (dict): Evaluation report
            y_true (numpy.ndarray): Ground truth labels
            y_pred (numpy.ndarray): Predicted labels
            logits (numpy.ndarray, optional): Predicted logits
        """
        # Save fold directory
        fold_dir = os.path.join(self.run_dir, f"fold_{fold}")
        os.makedirs(fold_dir, exist_ok=True)

        # Save report
        with open(os.path.join(fold_dir, "report.json"), 'w') as f:
            # Convert numpy arrays to lists for JSON serialization
            serializable_report = dict(report)
            serializable_report['confusion_matrix'] = report['confusion_matrix'].tolist()
            serializable_report['confusion_matrix_normalized'] = report['confusion_matrix_normalized'].tolist()
            json.dump(serializable_report, f, indent=4)

        # Save predictions
        predictions_df = pd.DataFrame({
            'y_true': y_true,
            'y_pred': y_pred
        })
        predictions_df.to_csv(os.path.join(fold_dir, "predictions.csv"), index=False)

        # Save logits if provided
        if logits is not None:
            np.save(os.path.join(fold_dir, "logits.npy"), logits)

    def save_confusion_matrix_plot(self, fold, cm, class_names):
        """
        Save confusion matrix plot

        Args:
            fold (int): Fold number
            cm (numpy.ndarray): Confusion matrix
            class_names (list): List of class names
        """
        fold_dir = os.path.join(self.run_dir, f"fold_{fold}")
        os.makedirs(fold_dir, exist_ok=True)

        plt.figure(figsize=(10, 8))

        # Normalize the confusion matrix
        cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]

        plt.imshow(cm_norm, interpolation='nearest', cmap=plt.cm.Blues)
        plt.title('Confusion Matrix')
        plt.colorbar()

        tick_marks = np.arange(len(class_names))
        plt.xticks(tick_marks, class_names, rotation=45)
        plt.yticks(tick_marks, class_names)

        # Add normalized values inside the plot
        thresh = cm_norm.max() / 2.
        for i in range(cm_norm.shape[0]):
            for j in range(cm_norm.shape[1]):
                plt.text(j, i, f'{cm[i, j]}\n({cm_norm[i, j]:.2f})',
                         horizontalalignment="center",
                         color="white" if cm_norm[i, j] > thresh else "black")

        plt.tight_layout()
        plt.ylabel('True label')
        plt.xlabel('Predicted label')

        plt.savefig(os.path.join(fold_dir, "confusion_matrix.png"))
        plt.close()

    def save_aggregate_results(self, fold_reports):
        """
        Save aggregate results across all folds

        Args:
            fold_reports (list): List of evaluation reports for each fold
        """
        # Calculate average metrics
        avg_metrics = {
            'accuracy': np.mean([report['overall']['accuracy'] for report in fold_reports]),
            'macro_f1': np.mean([report['overall']['macro_f1'] for report in fold_reports]),
            'weighted_f1': np.mean([report['overall']['weighted_f1'] for report in fold_reports]),
            'kappa': np.mean([report['overall']['kappa'] for report in fold_reports]),
            'std_accuracy': np.std([report['overall']['accuracy'] for report in fold_reports]),
            'std_macro_f1': np.std([report['overall']['macro_f1'] for report in fold_reports]),
            'std_weighted_f1': np.std([report['overall']['weighted_f1'] for report in fold_reports]),
            'std_kappa': np.std([report['overall']['kappa'] for report in fold_reports])
        }

        # Per-class metrics
        classes = list(fold_reports[0]['per_class'].keys())
        per_class = {}

        for cls in classes:
            per_class[cls] = {
                'precision': np.mean([report['per_class'][cls]['precision'] for report in fold_reports]),
                'recall': np.mean([report['per_class'][cls]['recall'] for report in fold_reports]),
                'f1': np.mean([report['per_class'][cls]['f1'] for report in fold_reports]),
                'std_precision': np.std([report['per_class'][cls]['precision'] for report in fold_reports]),
                'std_recall': np.std([report['per_class'][cls]['recall'] for report in fold_reports]),
                'std_f1': np.std([report['per_class'][cls]['f1'] for report in fold_reports])
            }

        # Save aggregate results
        aggregate_report = {
            'overall': avg_metrics,
            'per_class': per_class,
            'num_folds': len(fold_reports)
        }

        with open(os.path.join(self.run_dir, "aggregate_report.json"), 'w') as f:
            json.dump(aggregate_report, f, indent=4)

        # Save as CSV for easy import to papers
        overall_df = pd.DataFrame({
            'Metric': ['Accuracy', 'Macro F1', 'Weighted F1', 'Kappa'],
            'Mean': [
                avg_metrics['accuracy'],
                avg_metrics['macro_f1'],
                avg_metrics['weighted_f1'],
                avg_metrics['kappa']
            ],
            'Std': [
                avg_metrics['std_accuracy'],
                avg_metrics['std_macro_f1'],
                avg_metrics['std_weighted_f1'],
                avg_metrics['std_kappa']
            ]
        })

        overall_df.to_csv(os.path.join(self.run_dir, "overall_metrics.csv"), index=False)

        # Per-class metrics
        per_class_data = []
        for cls in classes:
            per_class_data.append({
                'Class': cls,
                'Precision': per_class[cls]['precision'],
                'Std Precision': per_class[cls]['std_precision'],
                'Recall': per_class[cls]['recall'],
                'Std Recall': per_class[cls]['std_recall'],
                'F1': per_class[cls]['f1'],
                'Std F1': per_class[cls]['std_f1']
            })

        per_class_df = pd.DataFrame(per_class_data)
        per_class_df.to_csv(os.path.join(self.run_dir, "per_class_metrics.csv"), index=False)

        # Return the aggregate report
        return aggregate_report