import torch
import torch.nn as nn
from transformers.modeling_bert import BertConfig, BertModel


class FeedForward(nn.Module):

    def __init__(self, hidden_size, d_ff, tgt_size, keep_prob):
        super(FeedForward, self).__init__()
        self.w_1 = nn.Linear(hidden_size, d_ff)
        self.w_2 = nn.Linear(d_ff, tgt_size)
        self.dropout = nn.Dropout(keep_prob)

    def forward(self, hidden_state):
        return self.w_2(self.dropout(torch.relu(self.w_1(hidden_state))))


class BertEmbedding(nn.Module):

    def __init__(self, pretrained_model_name_or_path, hidden_size, d_ff, emb_dim, keep_prob, pad_idx, use_token):
        super(BertEmbedding, self).__init__()
        self.use_token = use_token
        config = BertConfig.from_pretrained(pretrained_model_name_or_path, output_hidden_states=True)
        config.pad_token_id = pad_idx
        self.bert = BertModel(config)
        for param in self.bert.parameters():
            param.requires_grad = False
        self.ffn = FeedForward(hidden_size, d_ff, emb_dim, keep_prob)
        self.fc = nn.Linear(hidden_size * (2 if self.use_token else 1), emb_dim)
        self.dropout = nn.Dropout(keep_prob)
        self.layerNorm = nn.LayerNorm(emb_dim, eps=-1e-6)
        self.init_params()

    def init_params(self):
        modules = (self.fc, self.ffn, self.layerNorm)
        for module in modules:
            for name, w in module.named_parameters():
                if w.dim() < 2:
                    nn.init.normal_(w)
                else:
                    if "weight" in name:
                        nn.init.xavier_uniform_(w)
                    if "bias" in name:
                        nn.init.constant_(w, 0)

    def forward(self, sentences, tokens, masks):
        output = self.bert(sentences, masks)  # last-layer hidden state, pooled_output, (all hidden states), (all attentions)
        # out = self.ffn(output[0])
        hidden_state = output[0]  # output[2][2]  # (batch_size, max_len, hidden_size)
        if self.use_token:
            token_output = self.bert(tokens, masks)
            token_hx = token_output[0]  # token_output[2][2]
            hidden_state = torch.cat([hidden_state, token_hx], dim=-1)
        out = self.fc(hidden_state)
        out = self.layerNorm(out)
        out = self.dropout(out)
        return out
