'''
Model.py 模型封装
'''
import numpy as np
import torch

from model.decoders.CRF import CRF
from transformers import BertModel
from model.processores.CommonProcessores import *

# torch.set_printoptions(profile="full")
device = 'cuda' if torch.cuda.is_available() else 'cpu'
from torch.nn import functional as F

Fx = [[1, 1], [1, -1], [-1, 1], [-1, -1]]


def numpy_sofmax(logits):
    e_x = np.exp(logits)
    probs = e_x / np.sum(e_x, axis=-1, keepdims=True)
    return probs


def get_Phrase(max_length, token_index_list, phrase_info, Text_feat):
    phrase = phrase_info[0]
    pindex = phrase_info[1]
    plen = phrase_info[2]

    Phrase_feature = []
    for i in range(len(phrase)):       # 测试所有短语
        index = token_index_list[pindex[i]]
        feat = Text_feat[index:index+plen[i], :]
        Phrase_feature.append(feat)

    Phrase_marker = np.full(max_length, 1e-5, dtype='float32')
    for i in range(len(phrase)):  # 测试所有短语
        for j in range(plen[i]):
            index = token_index_list[pindex[i]]
            Phrase_marker[index + j] = 1. - 1e-5

    return Phrase_feature, Phrase_marker


# 利用中心视觉块下标，得到斜四点邻居视觉块的下标及特征
def get_Neighbor(index, Visual_feature):
    Row = index // 7
    Column = index % 7
    neighbors_index = []
    neighbors_feat = []

    for fx in Fx:
        row = Row + fx[0]
        col = Column + fx[1]

        if 0<=row and row<=6 and 0<=col and col<=6:
            neighbors_index.append(row * 7 + col)
            neighbors_feat.append(Visual_feature[row * 7 + col])

    return neighbors_index, neighbors_feat


def get_Region(center_index, max_neigh_index, Visual_feat):
    # 以 x上[2] x[9] x下[16] 为例
    region_feat = []
    if max_neigh_index < center_index - 7:                                          # 左上   x < 2
        region_feat.append(Visual_feat[max_neigh_index])
        region_feat.append(Visual_feat[max_neigh_index + 1])
        region_feat.append(Visual_feat[center_index - 1])
        region_feat.append(Visual_feat[center_index])
    elif (center_index - 7 < max_neigh_index) and (max_neigh_index < center_index):  # 右上 2 < x < 9
        region_feat.append(Visual_feat[max_neigh_index - 1])
        region_feat.append(Visual_feat[max_neigh_index])
        region_feat.append(Visual_feat[center_index])
        region_feat.append(Visual_feat[center_index + 1])
    elif (center_index < max_neigh_index) and (max_neigh_index < center_index + 7):  # 左下 9 < x < 16
        region_feat.append(Visual_feat[center_index - 1])
        region_feat.append(Visual_feat[center_index])
        region_feat.append(Visual_feat[max_neigh_index])
        region_feat.append(Visual_feat[max_neigh_index + 1])
    elif max_neigh_index > center_index + 7:                                         # 右下   x > 16
        region_feat.append(Visual_feat[center_index])
        region_feat.append(Visual_feat[center_index + 1])
        region_feat.append(Visual_feat[max_neigh_index - 1])
        region_feat.append(Visual_feat[max_neigh_index])
    return torch.stack(region_feat)


class MMNerModel(torch.nn.Module):

    def __init__(self, args):
        super(MMNerModel, self).__init__()

        self.bert = BertModel.from_pretrained('bert-base-uncased').to(device)      # ('./utils/bert-base-uncased')
        self.crf = CRF(len(args.train.tag2idx), batch_first=True)
        self.max_word_length = args.train.max_word_length
        self.bert_dim = args.train.bert_dim
        self.module_dim = args.train.module_dim
        self.resnet_dim = 2048
        self.hidden_dim = 512
        self.blocks = 6
        self.dropout = 0.4
        self.att_head = 8
        self.iter_number = 5
        self.batch_size = args.batch_size

        self.att_text = clone(MultiHeadAttention(self.att_head, self.module_dim, self.dropout), self.blocks)
        self.res4att_text = clone(AddandNorm(self.module_dim, self.dropout), self.blocks)

        self.att_image = clone(MultiHeadAttention(self.att_head, self.module_dim, self.dropout), self.blocks)
        self.res4att_image = clone(AddandNorm(self.module_dim, self.dropout), self.blocks)

        self.trans_t = torch.nn.Linear(self.bert_dim, self.module_dim)
        self.trans_v = torch.nn.Linear(self.resnet_dim, self.module_dim)

        self.att = Attention(self.module_dim)
        self.att_phrase2block = Attention(self.module_dim, self.dropout)

        self.att_vis2text_mha = MultiHeadAttention(self.att_head, self.module_dim, self.dropout)
        self.att_vis2text_addnorm1 = AddandNorm(self.module_dim, self.dropout)
        self.att_vis2text_ffn = PositionwiseFeedForward(self.module_dim, self.module_dim, self.dropout)
        self.att_vis2text_addnorm2 = AddandNorm(self.module_dim, self.dropout)

        self.att_text2vis_mha = MultiHeadAttention(self.att_head, self.module_dim, self.dropout)
        self.att_text2vis_addnorm1 = AddandNorm(self.module_dim, self.dropout)
        self.att_text2vis_ffn = PositionwiseFeedForward(self.module_dim, self.module_dim, self.dropout)
        self.att_text2vis_addnorm2 = AddandNorm(self.module_dim, self.dropout)

        self.att_text2text_mha = MultiHeadAttention(self.att_head, self.module_dim, self.dropout)
        self.att_text2text_addnorm1 = AddandNorm(self.module_dim, self.dropout)
        self.att_text2text_ffn = PositionwiseFeedForward(self.module_dim, self.module_dim, self.dropout)
        self.att_text2text_addnorm2 = AddandNorm(self.module_dim, self.dropout)

        self.hidden2tag = torch.nn.Linear(self.bert_dim + self.module_dim, len(args.train.tag2idx))

    def cal(self, data):
        # obtain data
        Bert_input = data["Bert_input"]                             # Bert输入            {input_ids;attention_mask;token_type_ids}
        Bert_input_token_index = data["Bert_input_token_index"]     # len(list)==batch  [Bert token index]
        Token_input = data["Token_input"]                #
        Phrase_extract_info = data["Phrase_extract_info"]           # len(list)==batch  [[phrase], [phrase start index], [phrase length]]
        Phrase_GT = data["Phrase_GT"]

        # 一：[Module 1] calculate features
        Text_feature_origin = self.bert(**Bert_input)[0]    # (batch,128,768)
        Visual_feature = data["Visual_77_feature"]          # (batch,128,2048)

        # 1.1 Project the same Space
        Text_feature = self.trans_t(Text_feature_origin)    # (batch,128,1024)
        Visual_feature = self.trans_v(Visual_feature)       # (batch,49,1024)

        # 1.2 Transformer Layer with Self-attention
        for i in range(self.blocks):
            Text_feature = self.res4att_text[i](Text_feature, self.att_text[i](Text_feature, Text_feature, Text_feature,
                                                                               Bert_input["attention_mask"].unsqueeze(1)))              # (batch,128,768)
            Visual_feature = self.res4att_image[i](Visual_feature, self.att_image[i](Visual_feature, Visual_feature, Visual_feature))   # (batch,49,2048)

        # 二：[Module 2] Iterative Refinement Boundary Module
        Text_feat_list = []
        Visual_feat_list = []
        for batch in range(Text_feature.size(0)):
            Text_feat = Text_feature[batch]                 # (128,1024)
            Visual_feat = Visual_feature[batch]             # (49,1024)
            Phrase_feat, Phrase_marker = get_Phrase(self.max_word_length, Bert_input_token_index[batch], Phrase_extract_info[batch], Text_feat)
            Visual_region_Memory = []

            for _ in range(self.iter_number):

                # 2.1 Attention: [noun phrase] as Q,  [visual region] as K,V
                Prob = torch.zeros(49).to(device)
                for phrase_feat in Phrase_feat:             # (未知,1024)
                    out, prob = self.att_phrase2block(phrase_feat, Visual_feat, Visual_feat)     # (each_phrase_token_number, 49)
                    Prob += prob.sum(axis=0)                                        # (49)
                _, center_indexs = torch.topk(Prob, 1, dim=-1)                      # 该部分目前设定为 1

                # 2.2 select 2*2 visual region --> memory
                for j in range(len(center_indexs)):
                    max_Score = 0
                    max_neigh_index = 0
                    center_index = center_indexs[j].item()
                    center_feat = Visual_feat[center_index]                         # (1024)
                    neighbors_index, neighbors_feat = get_Neighbor(center_index, Visual_feat)

                    for o in range(len(neighbors_feat)):
                        neigh_feat = neighbors_feat[o]                              # (1024)
                        score = torch.matmul(center_feat, neigh_feat)
                        score = F.softmax(score, dim=-1)
                        if score > max_Score:
                            max_Score = score
                            max_neigh_index = neighbors_index[o]
                    region_feat = get_Region(center_index, max_neigh_index, Visual_feat).clone().detach().requires_grad_(True)        # (4, 1024)
                    Visual_region_Memory.append(region_feat.sum(dim=0))                                   # list (1024)

                # 2.3 [Visual_region_Memory] to update [Phrase_marker]
                # （1）更新短语边界 -> 更新算法[依据什么更新短语边界；每次对 Phrase_marker更新多大,是以同数值下降更新，还是以比例下降更新]
                # （2）短语不存在，但 Phrase_marker值不为 0 -> 如何将该短语的 marker归零, 或者说在提取名词短语时就直接尽可能规避不重要的短语，
                #                                                                    毕竟最终我们以[文本链路 + 视觉链路]融合的方式得到结果
                # print("GT:  ", end="")
                # print(Phrase_GT[batch])
                # print("Extract:  ", end="")
                # print(Phrase_extract_info[batch][0])
                for k in range(len(Phrase_feat)):
                    phrase_feat = Phrase_feat[k]
                    Prob = torch.zeros(len(phrase_feat)).to(device)
                    for region_feat in Visual_region_Memory:
                        out, prob = self.att(region_feat, phrase_feat, phrase_feat)     # (each_phrase_token_number)
                        Prob += prob


            # batch 合并：Text_feat和 Visual_feat可能有改动
            Text_feat_list.append(Text_feat)
            Visual_feat_list.append(Visual_feat)

        Text_feat = torch.stack(Text_feat_list)
        Visual_feat = torch.stack(Visual_feat_list)

        # 三：[Module 3] Multimodal Interaction Module
        Text2Vis_feature = self.att_text2vis_mha(Text_feat, Visual_feat, Visual_feat, Bert_input["attention_mask"].unsqueeze(-1))   # (batch, 128, 1024)
        Text2Vis_feature = self.att_text2vis_addnorm1(Text_feat, Text2Vis_feature)
        Text2Vis_feature = self.att_text2vis_addnorm2(Text2Vis_feature, self.att_text2vis_ffn(Text2Vis_feature))

        Vis2Text_feature = self.att_vis2text_mha(Visual_feat, Text_feat, Text_feat, Bert_input["attention_mask"].unsqueeze(1))     # (batch, 49, 1024)
        Vis2Text_feature = self.att_vis2text_addnorm1(Visual_feat, Vis2Text_feature)
        Vis2Text_feature = self.att_vis2text_addnorm2(Vis2Text_feature, self.att_vis2text_ffn(Vis2Text_feature))

        Text2Text_feature = self.att_vis2text_mha(Text_feat, Vis2Text_feature, Vis2Text_feature, Bert_input["attention_mask"].unsqueeze(-1))  # (batch, 128, 1024)
        Text2Text_feature = self.att_vis2text_addnorm1(Text_feat, Text2Text_feature)
        Text2Text_feature = self.att_vis2text_addnorm2(Text2Text_feature, self.att_vis2text_ffn(Text2Text_feature))

        Fusion_feature = Text2Vis_feature + Text2Text_feature                                       # (batch, 128, 1024)

        # 四: [Module 4] Decode Module
        Dual_channel_feat = torch.cat((Text_feature_origin, Fusion_feature), dim=2)                 # (batch, 128, 1024+768)
        Dual_channel_feat = self.hidden2tag(Dual_channel_feat)                                      # (batch, 128, 13)

        return Dual_channel_feat

    def log_likelihood(self, data):
        return -self.crf(self.cal(data), data["label"], mask=data["crf_attention_mask"])

    def forward(self, data):
        return self.crf.decode(self.cal(data), mask=data["crf_attention_mask"])
