import random
import numpy as np
import torch
import logging
from tqdm import tqdm  # Import tqdm for TqdmHandler
from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix


# Function to set random seed for reproducibility
def set_random_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)

# Custom logging handler for tqdm
class TqdmHandler(logging.Handler):
    def __init__(self):
        super().__init__()

    def emit(self, record):
        try:
            msg = self.format(record)
            tqdm.write(msg)
            self.flush()
        except Exception:
            self.handleError(record)

# Function to calculate accuracy
def accuracy(output, target, topk=(1,)):
    with torch.no_grad():
        maxk = max(topk)
        batch_size = target.size(0)

        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
        correct = pred.eq(target.view(1, -1).expand_as(pred))

        res = []
        for k in topk:
            correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(100.0 / batch_size))
        return res

# AverageMeter class to keep track of metrics
class AverageMeter:
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count

# Timer class to measure time intervals
class Timer:
    def __init__(self):
        self.start_time = None

    def start(self):
        self.start_time = torch.cuda.Event(enable_timing=True)
        self.end_time = torch.cuda.Event(enable_timing=True)
        self.start_time.record()

    def stop(self):
        self.end_time.record()
        torch.cuda.synchronize()
        return self.start_time.elapsed_time(self.end_time) / 1000  # Convert to seconds

# PerformanceMeter class to track performance metrics
class PerformanceMeter:
    def __init__(self, higher_is_better=True):
        self.higher_is_better = higher_is_better
        self.best_value = None
        self.current_value = None

    def update(self, value):
        self.current_value = value
        if self.best_value is None or (self.higher_is_better and value > self.best_value) or (not self.higher_is_better and value < self.best_value):
            self.best_value = value


def calculate_metrics(outputs, labels):
    _, preds = torch.max(outputs, 1)
    precision = precision_score(labels.cpu(), preds.cpu(), average='weighted')
    recall = recall_score(labels.cpu(), preds.cpu(), average='weighted')
    f1 = f1_score(labels.cpu(), preds.cpu(), average='weighted')
    return precision, recall, f1


def calculate_top_k_accuracy(outputs, labels, k=5):
    top_k_preds = torch.topk(outputs, k, dim=1).indices
    correct = top_k_preds.eq(labels.view(-1, 1).expand_as(top_k_preds))
    top_k_acc = correct.any(dim=1).float().mean().item()
    return top_k_acc
