# -*- coding: utf-8 -*-

import torch
import torch.nn as nn

from lib.bert.model import BertConfig, BertModel


class FeedForward(nn.Module):

    def __init__(self, hidden_size, d_ff, tgt_size, keep_prob):
        super(FeedForward, self).__init__()
        self.w_1 = nn.Linear(hidden_size, d_ff)
        self.w_2 = nn.Linear(d_ff, tgt_size)
        self.dropout = nn.Dropout(keep_prob)

    def forward(self, x):
        return self.w_2(self.dropout(torch.relu(self.w_1(x))))


class Bert_FC(nn.Module):

    def __init__(self, pretrained_model_name_or_path, hidden_size, tgt_size, keep_prob, use_cuda):
        super(Bert_FC, self).__init__()
        self.bert = BertModel.from_pretrained(pretrained_model_name_or_path, output_hidden_states=True)
        for param in self.bert.parameters():
            param.requires_grad = True
        # self.linear = nn.Linear(hidden_size, tgt_size)
        self.ffn = FeedForward(hidden_size, 512, tgt_size, keep_prob)
        self.dropout = nn.Dropout(keep_prob)
        self.to(torch.device("cuda:0" if use_cuda and torch.cuda.is_available() else "cpu:0"))

    def forward(self, entities, masks):
        output = self.bert(entities, attention_mask=masks)
        out = self.dropout(output[1])
        # out = self.linear(out)
        out = self.ffn(out)
        return out

