import torch
import torch.nn as nn
from transformers import *
import json
import torch.functional as F
from torchcrf import CRF


class BERT_LSTM(nn.Module):
    def __init__(self, tag_size, hidden_dim, bert_dir='bert-base-chinese', load_pre=False, num_layers=1):
        super(BERT_LSTM, self).__init__()
        self.tag_size = tag_size  # num of tags for final softmax layer

        if load_pre:
            self.bert_encoder = BertModel.from_pretrained(bert_dir)
        else:
            config_file = open('./data/bert_base_chinese/config.json', 'r')
            config = json.load(config_file)
            config_file.close()

            #my_config = BertConfig(**config)
            my_config=BertConfig.from_pretrained('./data/bert_base_chinese')
            self.bert_encoder = BertModel(my_config)
        # also input dim of LSTM
        self.bert_out_dim = self.bert_encoder.config.hidden_size
        # LSTM layer
        self.lstm = nn.LSTM(self.bert_out_dim,
                            hidden_dim // 2,
                            batch_first=True,
                            num_layers=num_layers,
                            bidirectional=True)
        # map LSTM output to tag space
        self.hidden2tag = nn.Linear(hidden_dim, self.tag_size)
        self.crf = CRF(self.tag_size, batch_first=True)

    def tag_outputs(self, input_ids, input_mask=None):
        sequence_output = self.bert_encoder(input_ids, input_mask)[0]
        sequence_output, _ = self.lstm(sequence_output)
        emissions = self.hidden2tag(sequence_output)
        return emissions

    def forward(self, input_ids, tags, input_mask):
        emissions = self.tag_outputs(input_ids, input_mask)
        loss = -1*self.crf(emissions, tags, mask=input_mask.byte())
        return loss
    
    def predict(self, input_ids, input_mask):
        emissions = self.tag_outputs(input_ids, input_mask)
        return self.crf.decode(emissions, input_mask.byte())

    def eval(self):
        self.bert_encoder.eval()

    def train(self):
        self.bert_encoder.train()
