"""
@Time: 2021/2/5 下午 8:23
@Author: jinzhuan
@File: bert_capsule.py
@Desc: 
"""
import torch
import torch.nn as nn
import torch.nn.functional as f
from transformers import BertModel
from cognlp.models.base.base_function import BaseFunction
from cognlp.modules.decoder.capsule import Capsule


class Bert4CapsuleFunction(BaseFunction):
    def __init__(self):
        super().__init__()

    def forward(
            self,
            batch=None,
    ):
        input_ids, attention_mask, segment_ids, head_indexes, relation_mentions, relation_mentions_mask = batch
        sequence_output = self.bert(input_ids=input_ids, attention_mask=attention_mask)[0]
        batch_size, max_len, feat_dim = sequence_output.shape
        for i in range(batch_size):
            sequence_output[i] = torch.index_select(sequence_output[i], 0, head_indexes[i])
        relation_labels = []
        for i in range(batch_size):
            valid_len = 0
            for j in range(max_len):
                if relation_mentions_mask[i][j].item() == 1:
                    valid_len += 1
                else:
                    break
            for j in range(valid_len):
                relation_mention = relation_mentions[i][j]
                relation_labels.append(relation_mention[4].item())
        golden_labels = torch.LongTensor(relation_labels).to(self.device)
        output = self.capsule(sequence_output)
        output = output.norm(p=2, dim=-1)
        return output, golden_labels

    def predict(
            self,
            batch=None,
    ):
        input_ids, attention_mask, segment_ids, head_indexes, relation_mentions, relation_mentions_mask = batch
        prediction, golden_labels = self.forward(batch)
        return prediction, golden_labels

    def loss(self, batch=None, loss_function=None):
        predict, target = self.predict(batch)
        reduction = 'mean'
        m_plus, m_minus, loss_lambda = 0.9, 0.1, 0.5

        target = to_one_hot(target, 11)
        max_l = (torch.relu(m_plus - predict))**2
        max_r = (torch.relu(predict - m_minus))**2
        loss = target * max_l + loss_lambda * (1 - target) * max_r
        loss = torch.sum(loss, dim=-1)

        if reduction == 'sum':
            return loss.sum()
        else:
            # 默认情况为求平均
            return loss.mean()

    def evaluate(
            self,
            batch=None,
            metrics=None,
    ):
        prediction, target = self.predict(batch)
        prediction_labels = torch.argmax(f.log_softmax(prediction, dim=1), dim=1)
        metrics.evaluate(prediction_labels, target)


def to_one_hot(x: torch.Tensor, length: int) -> torch.Tensor:
    B = x.size(0)
    x_one_hot = torch.zeros(B, length)
    for i in range(B):
        x_one_hot[i, x[i]] = 1.0

    return x_one_hot.to(device=x.device)


class Bert4Capsule(Bert4CapsuleFunction, nn.Module):
    def __init__(
            self,
            vocabulary,
            embedding_size=768 * 2,
            bert_model='hfl/chinese-roberta-wwm-ext',
            device=torch.device("cuda"),
    ):
        super().__init__()
        self.vocabulary = vocabulary
        self.embedding_size = embedding_size
        self.label_size = len(self.vocabulary)
        self.device = device
        self.bert = BertModel.from_pretrained(bert_model)
        self.capsule = Capsule()

