from transformers import T5Tokenizer, T5EncoderModel, T5PreTrainedModel
from transformers.models.t5.modeling_t5 import T5Stack
from transformers.utils import ModelOutput
import re
from torch import nn
import torch
import copy

import torch.nn.functional as F
from torch import nn

class FocalLossBinary(nn.Module):
    def __init__(self, alpha=1., gamma=2., redution='mean'):
        super(FocalLossBinary, self).__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.redution = redution

    def forward(self, inputs, targets):
        p = torch.sigmoid(inputs)
        ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")

        p_t = p * targets + (1 - p) * (1 - targets)
        loss = ce_loss * ((1 - p_t) ** self.gamma)

        if self.alpha >= 0:
            self.alpha_t = self.alpha * targets + (1 - self.alpha) * (1 - targets)
            loss = self.alpha_t * loss

        # Check self.redution option and return loss accordingly
        if self.redution == "none":
            pass
        elif self.redution == "mean":
            loss = loss.mean()
        elif self.redution == "sum":
            loss = loss.sum()
        else:
            raise ValueError(
                f"Invalid Value for arg 'self.redution': '{self.redution} \n Supported self.redution modes: 'none', 'mean', 'sum'"
            )
        return loss


class T5MatchModel(T5PreTrainedModel):
    def __init__(self, config):
        super().__init__(config)
        self.model_dim = config.d_model

        self.shared = nn.Embedding(config.vocab_size, config.d_model)

        encoder_config = copy.deepcopy(config)

        # encoder
        encoder_config.is_decoder = False
        encoder_config.use_cache = False
        encoder_config.is_encoder_decoder = False

        self.encoder = T5Stack(encoder_config, self.shared)
        self.cls = nn.Linear(1024,1)

        self.post_init()
        self.model_parallel = False
        self.device_map = None

    def forward(self, input_ids, attention_mask, labels=None):
        embedding_repr = self.encoder(input_ids=input_ids, attention_mask=attention_mask)
        hidden_state = embedding_repr.last_hidden_state
        # forward
        hidden_state = torch.mean(hidden_state, dim=1)
        out = self.cls(hidden_state)
        loss = None
        if labels is not None:
            loss_fn = FocalLossBinary(alpha=0.5, gamma=2)
            loss = loss_fn(out.view(-1), labels)
        return ModelOutput(loss=loss, logits=out)
