'''
Model.py 模型封装
'''

from model.decoders.CRF import CRF
from model.processores.CommonProcessores import *
device = 'cuda' if torch.cuda.is_available() else 'cpu'


class MMNerModel(torch.nn.Module):

    def __init__(self, args):
        super(MMNerModel, self).__init__()

        self.crf = CRF(len(args.train.tag2idx), batch_first=True)
        self.max_word_len = args.train.max_word_length
        self.bert_dim = args.train.bert_dim
        self.module_dim = args.train.module_dim
        self.resnet_dim = 2048
        self.hidden_dim = 256
        self.blocks = 6
        self.dropout = 0.4
        self.att_head = 8

        # trans
        self.trans_t = nn.Linear(self.bert_dim, self.module_dim)
        self.hidden2tag = torch.nn.Linear(self.bert_dim + self.module_dim, len(args.train.tag2idx))

        # text
        self.att_text = clone(MultiHeadAttention(self.att_head, self.module_dim, self.dropout), self.blocks)
        self.res4att_text = clone(AddandNorm(self.module_dim, self.dropout), self.blocks)

    def cal(self, data):
        crf_attention_mask = data["crf_attention_mask"]
        token_feat_origin = data["ntokens_embeddings"]

        token_feat = self.trans_t(token_feat_origin)
        for i in range(self.blocks):
            # text -- self-attention
            token_feat = self.res4att_text[i](token_feat, self.att_text[i](token_feat, token_feat, token_feat,
                                                                           crf_attention_mask.unsqueeze(1)))
        token_feat = torch.cat((token_feat_origin, token_feat), dim=2)
        x = self.hidden2tag(token_feat)
        return x


    def log_likelihood(self, data):
        return -self.crf(self.cal(data), data["label"], mask=data["crf_attention_mask"])

    def forward(self, data):
        return self.crf.decode(self.cal(data), mask=data["crf_attention_mask"])
