from torch import Tensor, load
from sklearn.metrics import (
    precision_score,
    recall_score,
    f1_score
)
from rich import print
from warnings import filterwarnings
from core import (
    get_dataloader,
    MovieBiLSTM_CRF
)
from utils import conf, data

filterwarnings('ignore', module='sklearn')

def valid() -> None:
    print('[bold gold1]Validating model...[/]')
    # Get valid dataloader
    _, valid_dl = get_dataloader()
    # Initialize arguments
    avg_loss = 0.0
    predicts, golds = [], []
    # Load model
    model = MovieBiLSTM_CRF().to(conf.device)
    model.load_state_dict(load(conf.model.path_torch))
    model.eval()
    # Valid model
    for _, (input_ids, labels, mask) in enumerate(valid_dl):
        input_ids: Tensor = input_ids.to(conf.device)
        labels: Tensor = labels.to(conf.device)
        mask: Tensor = mask.to(conf.device)
        # Get predict
        predict: Tensor = model(input_ids, mask)
        # Get loss
        loss: Tensor = model.log_likelihood(input_ids, labels, mask)
        avg_loss += loss.item()
        # Get predict and gold
        for one_predict, one_true in zip(predict, labels.tolist()):
            pad_len = one_true.count(data.labels['PAD'])
            no_pad_len = len(one_true) - pad_len
            predicts.extend(one_predict[:no_pad_len])
            golds.extend(one_true[:no_pad_len])
    # Get average loss
    avg_loss /= (len(valid_dl) * conf.model.batch_size)
    # Get precision, recall and f1
    precision = precision_score(golds, predicts, average='macro')
    recall = recall_score(golds, predicts, average='macro')
    f1 = f1_score(golds, predicts, average='macro')
    print(
        f'[bold green]Precision: [bold white]{precision:.3f}[/]\n'
        f'[bold green]Recall: [bold white]{recall:.3f}[/]\n'
        f'[bold green]F1: [bold white]{f1:.3f}[/]'
    )
