
import torch.nn as nn
from transformers import BertForMaskedLM,BertForPreTraining,BertModel
from crf import CRF
import torch

# def getNerLabels():
#     return ['B-ORG','O','B-PER','B-LOC','I-LOC','I-ORG','I-PER']

class modelForNer(nn.Module):
    def __init__(self,bert_model):
        super().__init__()
        self.bert = BertModel.from_pretrained(bert_model)
        print("Init model:"+bert_model)
        # self.bert = BertModel.from_pretrained("bert-base-multilingual-cased")
        # self.bert = BertModel.from_pretrained("bert-base-chinese")
        config = self.bert.config
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        chunk_num_labels = 9
        self.classifier = nn.Linear(config.hidden_size, chunk_num_labels)
        self.crf = CRF(num_tags=chunk_num_labels, batch_first=True)
        parameter_isStart = torch.rand(1,768)
        parameter_isEnd = torch.rand(1,768)
        self.parameter_isStart = torch.nn.Parameter(parameter_isStart)
        self.parameter_isEnd = torch.nn.Parameter(parameter_isEnd)

    def forward(self,input_ids,attention_mask,token_type_ids,labels,tokens_starts):
        # print("---")
        # print(token_type_ids.cpu().numpy().tolist())

        # import sys
        # sys.exit()
        # for param in list(self.bert.embeddings.parameters()):
        #     param.requires_grad = False
        outputs = self.bert(input_ids = input_ids,attention_mask=attention_mask,token_type_ids=token_type_ids,return_dict=True)
        sequence_output = outputs['last_hidden_state']
        # print("loss")
        # print(sequence_output.detach().cpu().numpy().tolist())
        # print("input_ids")
        # print(input_ids.cpu().numpy().tolist())
        # print("attention_mask")
        # print(attention_mask.cpu().numpy().tolist())
        # print("token_type_ids")
        # print(token_type_ids.cpu().numpy().tolist())
        # import sys
        # sys.exit()
        se = torch.zeros_like(sequence_output)
        for i in range(sequence_output.shape[0]):
            for k in range(sequence_output.shape[1]):
                if tokens_starts[i][k]==1:
                    sequence_output[i][k] = sequence_output[i][k] + self.parameter_isStart
                else:
                    sequence_output[i][k] = sequence_output[i][k] + self.parameter_isEnd
        sequence_output = self.dropout(sequence_output)
        

        # tokens_starts = tokens_starts.unsqueeze(2)
        # a = torch.mm(tokens_starts,self.parameter_isStart)
        logits = self.classifier(sequence_output)
        if labels is not None:
                loss = -1 * self.crf(emissions = logits, tags=labels, mask=attention_mask)
                # print(labels)
                return loss
        return logits
