import torch.nn as nn

from model.LayoutLMv2Model import LayoutLMv2Model


class LAYOUYLMV2(nn.Module):
    """
    LayoutLM Language Model
    covered Prediction Model + Masked Language Model
    """

    def __init__(self, layoutlmv2: LayoutLMv2Model,config):
        """
        :param LayoutLM: LayoutLM model which should be trained
        :param vocab_size: total vocab size for masked_lm
        """

        super().__init__()
        self.config = config
        self.layoutlmv2 = layoutlmv2(self.config)
        self.covered_sentence = CoveredPrediction(self.config.hidden_size,self.config.TRAIN.COVERED_LABELS)
        self.mask_lm = MaskedLanguageModel(self.config.hidden_size, self.config.vocab_size)
    def forward(self, curpus_input, bbox,image):
        x = self.layoutlmv2(curpus_input, bbox,image)
        return self.covered_sentence(x), self.mask_lm(x)


class CoveredPrediction(nn.Module):
    """
    num_box class classification model : covered, uncovered
    """

    def __init__(self, hidden,covered):
        """
        :param hidden: LayoutLM model output size
        """
        super().__init__()
        self.hidden = hidden
        self.covered = covered
        self.linear = nn.Linear(self.hidden, self.covered)
        self.softmax = nn.LogSoftmax(dim=-1)

    def forward(self, x):
        return self.softmax(self.linear(x))


class MaskedLanguageModel(nn.Module):
    """
    predicting origin token from masked input sequence
    n-class classification problem, n-class = vocab_size
    """

    def __init__(self, hidden, vocab_size):
        """
        :param hidden: output size of LayoutLM model
        :param vocab_size: total vocab size
        """
        super().__init__()
        self.linear = nn.Linear(hidden, vocab_size)
        self.softmax = nn.LogSoftmax(dim=-1)

    def forward(self, x):
        return self.softmax(self.linear(x))
