import torch
from enum import StrEnum
from tqdm import tqdm
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from preprocess.dataset import get_dataloader, DataType
from model.bert_classifier import BertClassifier
from runner.predict import predict_batch
from configuration import config


class Metric(StrEnum):
    ACCURACY = 'accuracy'
    PRECISION = 'precision'
    RECALL = 'recall'
    F1 = 'f1'


def evaluate(model, dataloader, device, metrics):
    model.eval()
    all_labels = []
    all_predicts = []

    for batch in tqdm(dataloader, desc="evaluate"):
        input_ids = batch['input_ids'].to(device)  # [batch_size, seq_len]
        attention_mask = batch['attention_mask'].to(device)  # [batch_size, seq_len]
        labels = batch['label'].tolist()  # [batch_size]

        predicts = predict_batch(model, input_ids, attention_mask)  # [batch_size]

        all_labels.extend(labels)
        all_predicts.extend(predicts)

    results = {}
    for metric in metrics:
        if metric == Metric.ACCURACY:
            results[Metric.ACCURACY] = accuracy_score(all_labels, all_predicts)
        elif metric == Metric.PRECISION:
            results[Metric.PRECISION] = precision_score(all_labels, all_predicts, average='macro', zero_division=0)
        elif metric == Metric.RECALL:
            results[Metric.RECALL] = recall_score(all_labels, all_predicts, average='macro', zero_division=0)
        elif metric == Metric.F1:
            results[Metric.F1] = f1_score(all_labels, all_predicts, average='macro', zero_division=0)

    return results


def run_evaluate():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    dataloader = get_dataloader(type=DataType.TEST)

    model = BertClassifier(freeze_bert=True).to(device)
    model.load_state_dict(torch.load(config.MODELS_DIR / 'model.pt'))

    metrics = [Metric.ACCURACY, Metric.PRECISION, Metric.RECALL, Metric.F1]

    results = evaluate(model, dataloader, device, metrics)
    for metric, score in results.items():
        print(f'{metric}: {score:4f}')
