# -*- coding:utf8 -*-
# @Time : 2023/2/23 10:35
# @Author : WanJie Wu

import torch.nn as nn
import torch.nn.functional as F
from transformers import BertModel, BertPreTrainedModel
from torchcrf import CRF
from torch import Tensor

import torch
import torch.nn as nn
import torch.nn.functional as F


class FocalLoss(nn.Module):
    def __init__(self, gamma=2, alpha=None, size_average=True):
        super(FocalLoss, self).__init__()
        self.gamma = gamma
        self.alpha = alpha
        if isinstance(alpha,(float,int,long)):
            self.alpha = torch.Tensor([alpha,1-alpha])
        if isinstance(alpha,list):
            self.alpha = torch.Tensor(alpha)
        self.size_average = size_average

    def forward(self, input, target):
        if input.dim()>2:
            input = input.view(input.size(0),input.size(1),-1)  # N,C,H,W => N,C,H*W
            input = input.transpose(1,2)    # N,C,H*W => N,H*W,C
            input = input.contiguous().view(-1,input.size(2))   # N,H*W,C => N*H*W,C
        target = target.view(-1,1)

        logpt = F.log_softmax(input)
        logpt = logpt.gather(1,target)
        logpt = logpt.view(-1)
        pt = logpt.data.exp()

        if self.alpha is not None:
            if self.alpha.type()!=input.data.type():
                self.alpha = self.alpha.type_as(input.data)
            at = self.alpha.gather(0,target.data.view(-1))
            logpt = logpt * at

        loss = -1 * (1-pt)**self.gamma * logpt
        if self.size_average: return loss.mean()
        else: return loss.sum()


class BertCRF(BertPreTrainedModel):
    def __init__(self, config, num_labels, add_bilstm=False, num_layers=1, lstm_hidden_size=256):
        super(BertCRF, self).__init__(config)
        self.num_labels = num_labels
        self.add_bilstm = add_bilstm
        self.num_layers = num_layers
        self.lstm_hidden_size = lstm_hidden_size

        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        output_dim = 2 * self.lstm_hidden_size if add_bilstm else config.hidden_size
        self.classifier = nn.Linear(output_dim, self.num_labels)
        self.bi_lstm = nn.LSTM(
            input_size=config.hidden_size,  # Bert输出层的特征数量
            hidden_size=self.lstm_hidden_size,  #
            batch_first=True,
            num_layers=self.num_layers,  # LSTM的层数
            bidirectional=True,  # LSTM性质，即双向
        )
        self.crf = CRF(num_tags=self.num_labels, batch_first=True)

    def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, labels=None):
        """
        1. bert 获取最后一层信息, batch_size * sequence_length * dim
        2. 经过dropout
        3. 分类设计logits, batch_size * sequence_length * num_labels
        4. 针对logits和labels, 计算CRF信息
        5. 返回loss, logits
        """
        outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
        last_hidden_state = outputs[0]

        if self.add_bilstm:
            last_hidden_state, _ = self.bi_lstm(last_hidden_state)

        sequence_output = self.dropout(last_hidden_state)
        logits = self.classifier(sequence_output)
        if labels is not None:
            loss = -1 * self.crf(emissions=logits, tags=labels, mask=attention_mask)
            return loss, logits
        return None, logits


class BertSoftmax(BertPreTrainedModel):
    def __init__(self, config, num_labels, loss_type, add_bilstm=False, num_layers=1, lstm_hidden_size=256):
        super(BertSoftmax, self).__init__(config)
        self.loss_type = loss_type
        self.num_labels = num_labels
        self.add_bilstm = add_bilstm
        self.num_layers = num_layers
        self.lstm_hidden_size = lstm_hidden_size

        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.bi_lstm = nn.LSTM(
            input_size=config.hidden_size,  # Bert输出层的特征数量
            hidden_size=self.lstm_hidden_size,  #
            batch_first=True,
            num_layers=self.num_layers,  # LSTM的层数
            bidirectional=True,  # LSTM性质，即双向
        )
        output_dim = 2 * self.lstm_hidden_size if add_bilstm else config.hidden_size
        self.classifier = nn.Linear(output_dim, self.num_labels)

    def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, labels=None):
        outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
        last_hidden_state = outputs[0]
        if self.add_bilstm:
            last_hidden_state, _ = self.bi_lstm(last_hidden_state)
        sequence_output = self.dropout(last_hidden_state)
        logits = self.classifier(sequence_output)
        if labels is not None:
            assert self.loss_type in ["ce", "focal", "lsr"]
            if self.loss_type == "ce":
                loss_fct = nn.CrossEntropyLoss()
            else:
                raise NotImplementedError("待实现")
            if attention_mask is not None:
                active_loss = attention_mask.view(-1) == 1
                active_logits = logits.view(-1, self.num_labels)[active_loss]
                active_labels = labels.view(-1)[active_loss]
                loss = loss_fct(active_logits, active_labels)
            else:
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            return loss, logits
        return None, logits
