
from typing import Any
from collections import Counter

import torch

from utils import to_cuda, seq_mask_by_lens


class FinetuneFoward():
    def __init__(self, loss_fn, metrics_fn, fp16) -> None:
        self.loss_fn = loss_fn
        self.metrics_fn = metrics_fn
        self.fp16 = fp16

    def compute_forward(self, batch, model, cuda:bool = False, evaluate:bool = False, class_balance:bool = False):
        input_ids, attention_mask, labels = batch

        if cuda and torch.cuda.is_available():  # type: ignore
            input_ids, attention_mask, labels = to_cuda(data=(input_ids, attention_mask, labels))
            model = model.cuda()
        
        with torch.cuda.amp.autocast(enabled=self.fp16):
            if evaluate:
                with torch.no_grad():
                    logits = model(input_ids, attention_mask)
                    return logits
            
            logits = model(input_ids, attention_mask)
            loss = self.loss_fn(input=logits, target=labels)
        return logits, loss
    
    def __call__(self, *args: Any, **kwds: Any) -> Any:
        return self.compute_forward(*args, **kwds)


class UnbiasForward():
    def __init__(self, 
                loss_fn, 
                metrics_fn,
                fp16, 
                compute_template: bool = False, 
                alpha: float = 0.0, 
                beta: float = 0.0) -> None:
        self.loss_fn = loss_fn
        self.metrics_fn = metrics_fn

        self.fp16 = fp16
        self.compute_template = compute_template
        self.alpha = alpha
        self.beta = beta

    def compute_forward(self, 
                        batch, 
                        model, 
                        cuda:bool = False, 
                        evaluate:bool = False, 
                        class_balance:bool = False):
        input_ids, attention_mask, labels, template_len = batch

        if cuda and torch.cuda.is_available():  # type: ignore
            input_ids, attention_mask, labels = to_cuda(data=(input_ids, attention_mask, labels))
            model = model.cuda()

        with torch.cuda.amp.autocast(enabled=self.fp16):
            if evaluate:
                with torch.no_grad():
                    logits, template_logits = model(input_ids, attention_mask, template_len)
                    if self.compute_template:
                        logits = logits - self.alpha * template_logits
                    return logits
            
            logits, template_logits = model(input_ids, attention_mask, template_len)

            loss = self.compute_loss(logits, labels, class_balance)
            if self.compute_template:
                template_loss = self.compute_loss(template_logits, labels, class_balance)
                loss = loss + self.beta * template_loss
        return logits, loss
    
    def compute_loss(self, logits, labels, class_balance):
        if class_balance:
            cnt = Counter(labels.cpu().tolist())  # type: ignore
            weight = [1, 1]
            weight[0] = 1 - cnt[0] / sum(cnt.values())  # type: ignore
            weight[1] = 1 - cnt[1] / sum(cnt.values())  # type: ignore
            loss = self.loss_fn(input=logits, target=labels, weight=torch.tensor(weight).cuda())
        else:
            loss = self.loss_fn(input=logits, target=labels)
        return loss
        
    def __call__(self, *args: Any, **kwds: Any) -> Any:
        return self.compute_forward(*args, **kwds)

