'''
Model.py 模型封装
'''
import numpy as np
import torch

from model.decoders.CRF import CRF
from transformers import BertModel
from model.processores.CommonProcessores import *

# torch.set_printoptions(profile="full")
device = 'cuda' if torch.cuda.is_available() else 'cpu'
from torch.nn import functional as F

Fx = [[1, 1], [1, -1], [-1, 1], [-1, -1]]


def numpy_sofmax(logits):
    e_x = np.exp(logits)
    probs = e_x / np.sum(e_x, axis=-1, keepdims=True)
    return probs


def get_Phrase(max_length, token_index_list, phrase_info, Text_feat):
    phrase = phrase_info[0]
    pindex = phrase_info[1]
    plen = phrase_info[2]

    Phrase_feature = []
    for i in range(len(phrase)):       # 测试所有短语
        index = token_index_list[pindex[i]]
        feat = Text_feat[index:index+plen[i], :]
        Phrase_feature.append(feat)

    Phrase_marker = np.full(max_length, 0, dtype='float32')     # 1e-5
    for i in range(len(phrase)):  # 测试所有短语
        for j in range(plen[i]):
            index = token_index_list[pindex[i]]
            Phrase_marker[index + j] = 1                         # 1. - 1e-5

    return Phrase_feature, torch.tensor(Phrase_marker).to(device)


# 利用中心视觉块下标，得到斜四点邻居视觉块的下标及特征
def get_Neighbor(index, Visual_feature):
    Row = index // 7
    Column = index % 7
    neighbors_index = []
    neighbors_feat = []

    for fx in Fx:
        row = Row + fx[0]
        col = Column + fx[1]

        if 0<=row and row<=6 and 0<=col and col<=6:
            neighbors_index.append(row * 7 + col)
            neighbors_feat.append(Visual_feature[row * 7 + col])

    return neighbors_index, neighbors_feat


def get_Region(center_index, max_neigh_index, Visual_feat):
    # 以 x上[2] x[9] x下[16] 为例
    region_feat = []
    if max_neigh_index < center_index - 7:                                          # 左上   x < 2
        region_feat.append(Visual_feat[max_neigh_index])
        region_feat.append(Visual_feat[max_neigh_index + 1])
        region_feat.append(Visual_feat[center_index - 1])
        region_feat.append(Visual_feat[center_index])
    elif (center_index - 7 < max_neigh_index) and (max_neigh_index < center_index):  # 右上 2 < x < 9
        region_feat.append(Visual_feat[max_neigh_index - 1])
        region_feat.append(Visual_feat[max_neigh_index])
        region_feat.append(Visual_feat[center_index])
        region_feat.append(Visual_feat[center_index + 1])
    elif (center_index < max_neigh_index) and (max_neigh_index < center_index + 7):  # 左下 9 < x < 16
        region_feat.append(Visual_feat[center_index - 1])
        region_feat.append(Visual_feat[center_index])
        region_feat.append(Visual_feat[max_neigh_index])
        region_feat.append(Visual_feat[max_neigh_index + 1])
    elif max_neigh_index > center_index + 7:                                         # 右下   x > 16
        region_feat.append(Visual_feat[center_index])
        region_feat.append(Visual_feat[center_index + 1])
        region_feat.append(Visual_feat[max_neigh_index - 1])
        region_feat.append(Visual_feat[max_neigh_index])
    return torch.stack(region_feat)


class MMNerModel(torch.nn.Module):

    def __init__(self, args):
        super(MMNerModel, self).__init__()

        self.bert = BertModel.from_pretrained('bert-base-uncased').to(device)      # ('./utils/bert-base-uncased')
        self.crf = CRF(len(args.train.tag2idx), batch_first=True)
        self.max_word_length = args.train.max_word_length
        self.bert_dim = args.train.bert_dim
        self.module_dim = args.train.module_dim
        self.resnet_dim = 2048
        self.hidden_dim = 512
        self.blocks = 6
        self.dropout = 0.4
        self.att_head = 8
        self.iter_number = 5
        self.batch_size = args.batch_size

        self.att_text = clone(MultiHeadAttention(self.att_head, self.module_dim, self.dropout), self.blocks)    # x
        self.res4att_text = clone(AddandNorm(self.module_dim, self.dropout), self.blocks)                       # x

        self.trans_t = torch.nn.Linear(self.bert_dim, self.module_dim)                                          # x
        self.trans_v = torch.nn.Linear(self.resnet_dim, self.module_dim)

        self.hidden2tag = torch.nn.Linear(self.bert_dim + self.module_dim, len(args.train.tag2idx))

    # 提取名词短语 Model_base.py的基础上进行解码测试
    def cal(self, data):
        # obtain data
        Bert_input = data["Bert_input"]                             # Bert输入            {input_ids;attention_mask;token_type_ids}
        Bert_input_token_index = data["Bert_input_token_index"]     # len(list)==batch  [Bert token index]
        Phrase_extract_info = data["Phrase_extract_info"]           # len(list)==batch  [[phrase], [phrase start index], [phrase length]]

        # 一：[Module 1] calculate features
        Text_feature = self.bert(**Bert_input)[0]           # (batch,128,768)
        Text_feature_origin = Text_feature                  # (batch,128,768)
        Text_feature = self.trans_t(Text_feature).squeeze(0)                        # (batch,128,1024)

        # 1.1 Transformer Layer with Self-attention
        for i in range(self.blocks):
            Text_feature = self.res4att_text[i](Text_feature, self.att_text[i](Text_feature, Text_feature, Text_feature,
                                                                               Bert_input["attention_mask"].unsqueeze(1)))              # (batch,128,768)
        # --------------------------------131-139
        Text_feat_list = []
        for batch in range(Text_feature.size(0)):
            Text_feat = Text_feature[batch]
            Phrase_feat, Phrase_marker = get_Phrase(self.max_word_length, Bert_input_token_index[batch], Phrase_extract_info[batch], Text_feat)
            Text_feat = Phrase_marker.unsqueeze(-1) * Text_feat

            Text_feat_list.append(Text_feat)

        Text_feature = torch.stack(Text_feat_list)

        # 四: [Module 4] Decode Moduleg
        Dual_channel_feat = torch.cat((Text_feature_origin, Text_feature), dim=2)                 # (batch, 128, 1024+768)
        Dual_channel_feat = self.hidden2tag(Dual_channel_feat)                                      # (batch, 128, 13)

        return Dual_channel_feat

    def log_likelihood(self, data):
        return -self.crf(self.cal(data), data["label"], mask=data["crf_attention_mask"])

    def forward(self, data):
        return self.crf.decode(self.cal(data), mask=data["crf_attention_mask"])
