# -*- coding:utf-8 -*-
# editor: zzh

import torch.nn as nn
import torch
from transformers import BertModel
from configs import *
import torch.functional as F
from torchcrf import CRF

from openprompt.plms import load_plm
from openprompt.prompts import ManualTemplate
from openprompt.prompts import ManualVerbalizer

class BertNerPrompt(nn.Module):
    classes = [

    ]
    def __init__(self, num_class):
        super(BertNerPrompt, self).__init__()
        # self.bert = BertModel.from_pretrained(pretrain_path)
        plm, tokenizer, model_config, WrapperClass = load_plm("bert", pretrain_path)

        promptTemplate = ManualTemplate(
            text='{"placeholder":"text_a"} It was {"mask"}',
            tokenizer=tokenizer,
        )

        promptVerbalizer = ManualVerbalizer(
            classes=self.classes,
            label_words={
                "negative": ["bad"],
                "positive": ["good", "wonderful", "great"],
            },
            tokenizer=tokenizer,
        )
        from openprompt import PromptForClassification
        self.bert = PromptForClassification(
            template=promptTemplate,
            plm=plm,
            verbalizer=promptVerbalizer,
        )

        for p in self.bert.parameters():
            p.requires_grad = True
        self.l1 = nn.Linear(768, num_class)
        self.dropout = nn.Dropout(0.5)
        self.crf = CRF(num_tags=num_class, batch_first=True)
        self.softmax = nn.Softmax()

    def forward(self, inputs, mask_logit = None):
        token_embeddings = inputs[0]
        segment_embeddings = inputs[1]
        bert_output = self.bert(token_embeddings, segment_embeddings) # batch_size, length, hidden_size
        # print(bert_output[0].shape)
        cls_output = bert_output[0]
        # print(cls_output.shape)
        cls_output = torch.squeeze(cls_output, dim=1)
        # print(cls_output.shape)
        cls_output = self.dropout(cls_output)
        # print(cls_output.shape)
        ner_output = self.l1(cls_output)

        if mask_logit != None:
            mask_logit = torch.unsqueeze(mask_logit, dim=1)
            mask_logit = 1 - mask_logit
            ner_output = ner_output - mask_logit * 1e3

        crf_output = self.crf.decode(ner_output, mask = (inputs[0] != 0))

        return ner_output, crf_output

    def calc_loss(self,inputs,labels):
        ner_output, _ = self.forward(inputs)
        mask = (inputs[0] != 0)
        crfloss = self.crf(ner_output, labels, mask = mask)
        return -crfloss, ner_output


class BertNer(nn.Module):
    def __init__(self, num_class):
        super(BertNer, self).__init__()
        self.bert = BertModel.from_pretrained(pretrain_path)
        for p in self.bert.parameters():
            p.requires_grad = True
        self.l1 = nn.Linear(768, num_class)
        self.dropout = nn.Dropout(0.5)
        self.crf = CRF(num_tags=num_class, batch_first=True)
        self.softmax = nn.Softmax()

    def forward(self, inputs, mask_logit = None):
        token_embeddings = inputs[0]
        segment_embeddings = inputs[1]
        bert_output = self.bert(token_embeddings, segment_embeddings) # batch_size, length, hidden_size
        # print(bert_output[0].shape)
        cls_output = bert_output[0]
        # print(cls_output.shape)
        cls_output = torch.squeeze(cls_output, dim=1)
        # print(cls_output.shape)
        cls_output = self.dropout(cls_output)
        # print(cls_output.shape)
        ner_output = self.l1(cls_output)

        if mask_logit != None:
            mask_logit = torch.unsqueeze(mask_logit, dim=1)
            mask_logit = 1 - mask_logit
            ner_output = ner_output - mask_logit * 1e3

        crf_output = self.crf.decode(ner_output, mask = (inputs[0] != 0))

        return ner_output, crf_output

    def calc_loss(self,inputs,labels):
        ner_output, _ = self.forward(inputs)
        mask = (inputs[0] != 0)
        crfloss = self.crf(ner_output, labels, mask = mask)
        return -crfloss, ner_output


class BertNerLstm(nn.Module):

    def __init__(self, num_class):
        super(BertNerLstm, self).__init__()
        self.bert = BertModel.from_pretrained(pretrain_path)
        for p in self.bert.parameters():
            p.requires_grad = True
        self.lstm = nn.LSTM(input_size=768,hidden_size=384, num_layers=2,bidirectional=True, batch_first=True)
        self.l1 = nn.Linear(768, num_class)
        self.dropout = nn.Dropout(0.5)
        self.crf = CRF(num_tags=num_class,batch_first=True)
        self.softmax = nn.Softmax()


    def forward(self,inputs):
        token_embeddings = inputs[0]
        segment_embeddings = inputs[1]
        bert_output = self.bert(token_embeddings,segment_embeddings) #batch_size, length, hidden_size
        # print(bert_output[0].shape)
        cls_output = bert_output[0]
        # print(cls_output.shape)
        cls_output = torch.squeeze(cls_output,dim=1)
        # print(cls_output.shape)
        cls_output,(h,c) = self.lstm(cls_output)
        ner_output = self.l1(cls_output)
        crf_output = self.crf.decode(ner_output, mask = (inputs[0] != 0))

        return ner_output, crf_output

    def calc_loss(self,inputs,labels):
        ner_output, _ = self.forward(inputs)
        mask = (inputs[0] != 0)
        crfloss = self.crf(ner_output, labels, mask = mask)
        return -crfloss, ner_output


