

import torch
from torch import nn
from torch.nn import functional as F
from transformers import AutoModel, AutoModelWithLMHead, AutoModelForSequenceClassification


# model_name_or_path = "roberta-base"


class BertPromptTune(nn.Module):
    def __init__(self,
                 backbone,
                 vocab_size,
                 mask_token_id,
                 positive_token_ids,
                 negative_token_ids,
                 mlm_head_size=768,
                 with_learnable_emb=True,
                 with_answer_weights=True,
                 with_position_weights=False,
                 num_learnable_token=2,
                 zero_shot=False,
                 fine_tune_all=True,
                 mlm_pretrained=False,
                 compute_template=False,
                 add_xavg=False):
        super().__init__()
        
        # encoder
        plm = AutoModelWithLMHead.from_pretrained(backbone)
        if not fine_tune_all:  # freeze the pretrained encoder
            for param in roberta.base_model.parameters():  # type: ignore
                param.requires_grad = False
        
        self.plm_cfg = plm.config

        self.word_embeddings = plm.base_model.embeddings.word_embeddings
        self.encoder = plm.base_model
        self.lm_head = plm.lm_head
        
        if not mlm_pretrained:
            # randomly initialized
            self.lm_head = LMHead(in_features=self.plm_cfg.hidden_size, mlm_head_size=mlm_head_size, vocab_size=vocab_size)
        else:
            self.lm_head = plm.lm_head

        self.compute_template = compute_template
        self.add_xavg = add_xavg

        self.vocab_size = vocab_size
        self.mask_token_id = mask_token_id

        self.positive_token_ids = positive_token_ids
        self.negative_token_ids = negative_token_ids

        # when in zero shot condition, simply sum over all ids
        if zero_shot:
            with_learnable_emb = False
            with_answer_weights = False

        if with_answer_weights:
            # assume weights follow a uniform distribution
            self.positive_weights = nn.Parameter(torch.ones(
                len(positive_token_ids)), requires_grad=True)
            self.negative_weights = nn.Parameter(torch.ones(
                len(negative_token_ids)), requires_grad=True)
        else:
            self.positive_weights = nn.Parameter(torch.ones(
                len(positive_token_ids)), requires_grad=False)
            self.negative_weights = nn.Parameter(torch.ones(
                len(negative_token_ids)), requires_grad=False)

        if with_position_weights:
            self.position_weights = nn.Parameter(
                torch.ones(2), requires_grad=True)
        else:
            self.position_weights = nn.Parameter(
                torch.ones(2), requires_grad=False)

        self.learnable_tokens = - 1
        self.num_learnable_token = num_learnable_token
        if with_learnable_emb:
            self.learnable_token_emb = nn.Embedding(
                num_embeddings=self.num_learnable_token, embedding_dim=300)
            self.learnable_token_ffn = nn.Linear(in_features=300, out_features=self.plm_cfg.hidden_size)
        else:
            self.learnable_token_emb = None

    def forward(self, 
                input_ids, 
                attention_mask, 
                template_len):
        batch_size, seq_len = input_ids.size()
        mask_ids = (input_ids == self.mask_token_id).nonzero(as_tuple=True)

        if self.learnable_token_emb is not None:
            add_ids = (input_ids == self.learnable_tokens).nonzero(
                as_tuple=True)
            input_ids[add_ids] = self.mask_token_id

            # add learnable token embeddings
            replace_embeds = self.learnable_token_emb(torch.arange(
                self.num_learnable_token).cuda())  # num_learnable_token, embed_dim
            replace_embeds = replace_embeds.unsqueeze(0).repeat(
                batch_size, 1, 1)  # batch_size, num_learnable_token, embed_dim
            
            with torch.cuda.amp.autocast(enabled=False):
                # batch_size, num_learnable_token, hidden_size
                replace_embeds = self.learnable_token_ffn(replace_embeds)
            # batch_size * num_learnable_token, hidden_size
            replace_embeds = replace_embeds.reshape(
                batch_size*self.num_learnable_token, -1)

            # replace the corresponding token embeddings
            input_emb = self.word_embeddings(
                input_ids)  # type: ignore
            input_emb[add_ids] = replace_embeds
            # batch_size, seq_len, embed_dim
            input_emb = input_emb.view(batch_size, seq_len, -1)
        else:
            input_emb = self.word_embeddings(input_ids)
        hidden_states = self.encoder(inputs_embeds=input_emb, attention_mask=attention_mask).last_hidden_state
        
        mask_logits = self.lm_head(hidden_states)  # batch_size, seq_len, vocab_size
        if self.add_xavg:
            x_mask_logits = self.lm_head(hidden_states[:, template_len:, :]) 
            x_avg = F.avg_pool1d(x_mask_logits.transpose(1, 2), kernel_size=seq_len - template_len).squeeze(2)
            cls_logits = self.compute_cls_logits(mask_logits=mask_logits[mask_ids] + x_avg)
        else:
            cls_logits = self.compute_cls_logits(mask_logits=mask_logits[mask_ids])
        
        # template logits
        if self.compute_template:
            template_logits = self.lm_head(hidden_states[:, :template_len, :])
            t_avg = F.avg_pool1d(template_logits.transpose(1, 2), kernel_size=template_len).squeeze(2)
            template_logits = self.compute_cls_logits(mask_logits=template_logits[mask_ids] + t_avg)

            return cls_logits, template_logits
        
        return cls_logits, None
    
    def compute_cls_logits(self, mask_logits):
        batch_size, vocab_size = mask_logits.size()

        mask_logits = F.log_softmax(mask_logits, dim=1)
        # batch_size, mask_num, vocab_size
        mask_logits = mask_logits.view(batch_size, -1, vocab_size)
        _, mask_num, _ = mask_logits.size()

        # batch_size, mask_num, vocab_size
        mask_logits = (mask_logits.transpose(1, 2) *
                       self.position_weights[:mask_num]).transpose(1, 2)

        mask_logits = mask_logits.sum(dim=1).squeeze(
            1)  # batch_size, vocab_size

        # batch_size, len(positive_token_ids)
        positive_logits = mask_logits[:,
                                      self.positive_token_ids] * F.softmax(self.positive_weights, dim=0)
        # batch_size, len(negative_token_ids)
        negative_logits = mask_logits[:,
                                      self.negative_token_ids] * F.softmax(self.negative_weights, dim=0)

        positive_logits = positive_logits.sum(1).unsqueeze(1)  # batch_size, 1
        negative_logits = negative_logits.sum(1).unsqueeze(1)  # batch_size, 1

        cls_logits = torch.cat([positive_logits, negative_logits], dim=1)

        return cls_logits


class LMHead(nn.Module):
    """Roberta Head for masked language modeling."""
    def __init__(self, vocab_size: int, in_features: int = 768, mlm_head_size: int = 768, layer_norm_eps: float = 1e-5):   
        super().__init__()
        self.dense = nn.Linear(in_features, mlm_head_size)
        self.layer_norm = nn.LayerNorm(mlm_head_size, eps=layer_norm_eps)

        self.decoder = nn.Linear(mlm_head_size, vocab_size)
        self.bias = nn.Parameter(torch.zeros(vocab_size))
        self.decoder.bias = self.bias

    def forward(self, features, **kwargs):
        x = self.dense(features)
        x = nn.functional.gelu(x)  # type: ignore
        x = self.layer_norm(x)

        # project back to size of vocabulary with bias
        x = self.decoder(x)

        return x


class Classifier(nn.Module):
    """Head for sentence-level classification tasks."""

    def __init__(self, in_features = 768, nums_label = 2, dropout = 0.33, hidden_size: int = 768):  # TODO: configuration
        super().__init__()
        self.dense = nn.Linear(in_features, hidden_size)
        self.dropout = nn.Dropout(dropout)
        self.out_proj = nn.Linear(hidden_size, nums_label)

    def forward(self, features, **kwargs):
        x = features[:, 0, :]  # take <s> token (equiv. to [CLS])
        x = self.dropout(x)
        x = self.dense(x)
        x = torch.tanh(x)
        x = self.dropout(x)
        x = self.out_proj(x)
        return x


class BertFineTune(nn.Module):
    def __init__(self, backbone, nums_label, hidden_size, dropout, fine_tune_all=True):
        super().__init__()
        # encoder
        plm = AutoModelForSequenceClassification.from_pretrained(backbone)

        self.backbone = plm.base_model
        self.classifier = plm.classifier

        if not fine_tune_all:  # freeze the pretrained encoder
            for param in self.backbone.parameters():  # type: ignore
                param.requires_grad = False
    
    def forward(self, input_ids, attention_mask):
        # bert
        roberta_outputs = self.backbone(
            input_ids, attention_mask)  # type: ignore
        sequence_output = roberta_outputs.last_hidden_state

        logits = self.classifier(sequence_output)

        return logits