import numpy as np
import torch
import copy
import torch.nn as nn
import scipy.spatial
import torch.nn.functional as F
from models.GCN import GCN
from optimization import WarmupLinearSchedule, AdamW
from transformers import BertTokenizer, BertModel, RobertaModel, RobertaTokenizer



def masked_softmax(vector: torch.Tensor,
                   mask: torch.Tensor,
                   dim: int = -1,
                   memory_efficient: bool = False,
                   mask_fill_value: float = -1e32) -> torch.Tensor:
    """
    ``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be
    masked.  This performs a softmax on just the non-masked portions of ``vector``.  Passing
    ``None`` in for the mask is also acceptable; you'll just get a regular softmax.
    ``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
    broadcastable to ``vector's`` shape.  If ``mask`` has fewer dimensions than ``vector``, we will
    unsqueeze on dimension 1 until they match.  If you need a different unsqueezing of your mask,
    do it yourself before passing the mask into this function.
    If ``memory_efficient`` is set to true, we will simply use a very large negative number for those
    masked positions so that the probabilities of those positions would be approximately 0.
    This is not accurate in math, but works for most cases and consumes less memory.
    In the case that the input vector is completely masked and ``memory_efficient`` is false, this function
    returns an array of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of
    a model that uses categorical cross-entropy loss. Instead, if ``memory_efficient`` is true, this function
    will treat every element as equal, and do softmax over equal numbers.
    """
    if mask is None:
        result = torch.nn.functional.softmax(vector, dim=dim)
    else:
        mask = mask.float()
        while mask.dim() < vector.dim():
            mask = mask.unsqueeze(1)
        if not memory_efficient:
            # To limit numerical errors from large vector elements outside the mask, we zero these out.
            result = torch.nn.functional.softmax(vector * mask, dim=dim)
            result = result * mask
            result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
        else:
            masked_vector = vector.masked_fill((1 - mask).to(dtype=torch.bool), mask_fill_value)
            result = torch.nn.functional.softmax(masked_vector, dim=dim)
    return result



def _truncate_q_pair(tokens_q1, tokens_q2, max_total_seq_length):
    len_spare = len(tokens_q1) + len(tokens_q2) - max_total_seq_length
    is_truncated = len_spare > 0
    # print("len_spare：%d" %len_spare)

    while len_spare > 0:
        is_truncated = True
        if len(tokens_q1) >= len(tokens_q2):
            tokens_q1.pop()
        else:
            tokens_q2.pop()
        len_spare -= 1

    return is_truncated

def _find_corr_features(output_features, list_original, list_trancated, merge_type, encoder_type):
    curr_pos = 0
    token_target = ""
    idx_list = []
    feature_tensor = []

    if encoder_type == 'bert':
        flag = True

        for idx, token in enumerate(list_trancated):
            if token == list_original[curr_pos].lower() \
                    or (len(token) == len(list_original[curr_pos].lower()) and flag == True) \
                    or token == "[UNK]":
                curr_pos += 1
                idx_list.append([idx])
                token_target = ""
                continue
            elif token.startswith('##'):
                token_target += token.lstrip('#')
                idx_list[-1].append(idx)
            else:
                token_target += token
                if flag:
                    idx_list.append([idx])
                    flag = False
                else:
                    idx_list[-1].append([idx])

            if token_target == list_original[curr_pos].lower() or len(token_target) == len(
                    list_original[curr_pos].lower()):
                curr_pos += 1
                token_target = ""
                flag = True


    assert merge_type == 'mean' or merge_type == 'first'
    for sub_idx_list in idx_list:
        if merge_type == 'mean':
            sub_feature = torch.mean(output_features[:, sub_idx_list[:], :], dim=1, keepdim=False)
        else:
            sub_feature = output_features[:, sub_idx_list[0], :]

        sub_feature = sub_feature.unsqueeze(dim=1)
        if len(feature_tensor) > 0:
            feature_tensor = torch.cat((feature_tensor, sub_feature), 1)
        else:
            feature_tensor = sub_feature

    return feature_tensor


def _restore_features(output_features, target_list1, target_list2, tokens, merge_type, encoder_type):
    cls_feature = output_features[:, 0, :]

    sep1_pos = tokens.index("[SEP]")
    text1_feature = _find_corr_features(output_features[:, 1:sep1_pos, :], target_list1, tokens[1:sep1_pos],
                                        merge_type, encoder_type)
    text2_feature = _find_corr_features(output_features[:, sep1_pos + 1:-1, :], target_list2, tokens[sep1_pos + 1:-1],
                                        merge_type, encoder_type)

    return text1_feature, text2_feature, cls_feature



def gen_bert_feature(bert_tokenizer, bert_model, device, text1, text2, target_list1, target_list2, max_seq_length,
                     merge_type, encoder_type):
    assert len(text1) > 0 and len(text2) > 0

    tokens1 = bert_tokenizer.tokenize(text1)#分词
    tokens2 = bert_tokenizer.tokenize(text2)

    is_truncated = _truncate_q_pair(tokens1, tokens2, max_seq_length - 3)

    tokens = ["[CLS]"] + tokens1 + ["[SEP]"] + tokens2 + ["[SEP]"]
    segment_ids = [0] * (len(tokens1) + 2) + [1] * (len(tokens2) + 1) #?????

    input_ids = bert_tokenizer.convert_tokens_to_ids(tokens)

    # Convert inputs to PyTorch tensors 将输入转换为 PyTorch 张量
    tokens_tensor = torch.tensor([input_ids])
    segments_tensors = torch.tensor([segment_ids])

    # If you have a GPU, put everything on cuda
    tokens_tensor = tokens_tensor.to(device)
    segments_tensors = segments_tensors.to(device)

    # Predict hidden states features for each layer
    # output_features, _ = bert_model(input_ids=tokens_tensor, token_type_ids=segments_tensors, output_all_encoded_layers=False)

    # package transformers is different from package pytorch_pretrained_bert
    tensors = bert_model(input_ids=tokens_tensor, token_type_ids=segments_tensors)
    output_features = tensors[0]

    # bert_model返回值：
    # last_hidden_state：shape是(batch_size, sequence_length, hidden_size)，hidden_size=768,它是模型最后一层输出的隐藏状态
    # pooler_output：shape是(batch_size, hidden_size)，这是序列的第一个token(classification token)的最后一层的隐藏状态，它是由线性层和Tanh激活函数进一步处理的，这个输出不是对输入的语义内容的一个很好的总结，对于整个输入序列的隐藏状态序列的平均化或池化通常更好。
    # hidden_states：这是输出的一个可选项，如果输出，需要指定config.output_hidden_states=True,它也是一个元组，它的第一个元素是embedding，其余元素是各层的输出，每个元素的形状是(batch_size, sequence_length, hidden_size)
    # attentions：这也是输出的一个可选项，如果输出，需要指定config.output_attentions=True,它也是一个元组，它的元素是每一层的注意力权重，用于计算self-attention heads的加权平均值
    # 总结：所以通常来说，只输出前两项
    # ————————————————
    # 版权声明：本文为CSDN博主「乐清sss」的原创文章，遵循CC 4.0 BY-SA版权协议，转载请附上原文出处链接及本声明。
    # 原文链接：https://blog.csdn.net/sunyueqinghit/article/details/105157609

    # output_features.to(device)
    tokens1_feature, tokens2_feature, cls_feature = \
        _restore_features(output_features, target_list1, target_list2, tokens, merge_type, encoder_type)

    return cls_feature, tokens1_feature[0], tokens2_feature[0]


class Attn(nn.Module):
    def __init__(self, out_size, attn_size):
        super(Attn, self).__init__()

        self.W = nn.Parameter(torch.Tensor(out_size, out_size))

        self.Wv = nn.Parameter(torch.Tensor(out_size, attn_size))
        self.Wq = nn.Parameter(torch.Tensor(out_size, attn_size))

        nn.init.xavier_uniform_(self.Wv, gain=nn.init.calculate_gain('tanh'))
        nn.init.xavier_uniform_(self.Wq, gain=nn.init.calculate_gain('tanh'))

        self.w_hv = nn.Parameter(torch.Tensor(attn_size, 1))
        self.w_hq = nn.Parameter(torch.Tensor(attn_size, 1))
        nn.init.xavier_uniform_(self.w_hv, gain=nn.init.calculate_gain('linear'))
        nn.init.xavier_uniform_(self.w_hq, gain=nn.init.calculate_gain('linear'))

    def forward(self, seq_features1, seq_features2, mask1, mask2):
        C = F.tanh(torch.matmul(torch.matmul(seq_features1, self.W), torch.transpose(seq_features2, 1, 2)))

        Hv = F.tanh(torch.matmul(seq_features1, self.Wv) + torch.matmul(C, torch.matmul(seq_features2, self.Wq)))
        Hq = F.tanh(torch.matmul(seq_features2, self.Wq) + torch.matmul(torch.transpose(C, 1, 2),
                                                                        torch.matmul(seq_features1, self.Wv)))

        attn_v = masked_softmax(torch.matmul(Hv, self.w_hv).squeeze(), mask1, 1)
        attn_q = masked_softmax(torch.matmul(Hq, self.w_hq).squeeze(), mask2, 1)

        v_hat = torch.sum(torch.unsqueeze(attn_v, 2) * seq_features1, 1)
        q_hat = torch.sum(torch.unsqueeze(attn_q, 2) * seq_features2, 1)

        return v_hat, q_hat





class MatchNetwork(nn.Module):
    def __init__(self, hyps, t_total):
        super(MatchNetwork, self).__init__()

        self.device = hyps["device"]  #hyps["device"]=device(type='cpu')
        self.dropout_inter = nn.Dropout2d(p=hyps["dropout_inter"])   #self.dropout_inter = Dropout2d(p=0, inplace=False)

        self.depen_network = None
        self.embed_size = hyps["embed_size"] #hyps["embed_size"]=768

        self.bert_model = None
        self.rnn = None
        self.encoder_type = hyps["encoder_type"] #hyps["encoder_type"]='bert'
        if hyps["encoder_type"] == "bert":
            print("Initializing BERT model...")
            bert_model_name = 'bert-base-uncased'
            self.max_seq_length = 512  #最大文本程度，最大512

            # merge_type should be 'mean' or 'first'
            self.merge_type = "first"
            self.bert_tokenizer = BertTokenizer.from_pretrained(bert_model_name) #分词
            self.bert_model = BertModel.from_pretrained(bert_model_name)#预训练的模型
            self.bert_model.to(hyps["device"])#加载
            self.bert_model.zero_grad()#将所有模型参数的梯度置为0


        self.GCNModel = None
        if hyps["use_gnn"] == True:
            self.GCNModel = GCN(hyps["gnn"], hyps["mutual_link"], self.device)#({'edge_types': 4, 'gcn_layers': 3, 'in_features': 768, 'out_features': 768, 'gcn_dp': 0, 'gcn_use_bn': True, 'use_highway': True}, 'co_attn', device(type='cpu'))
            self.mutual_link = hyps["mutual_link"]  #hyps["mutual_link"]=='co_attn'

        if self.GCNModel:
            self.attn = Attn(hyps["gnn"]["out_features"], hyps["attn_size"])

        if self.bert_model:
            self.optimizer_bert = AdamW(self.bert_model.parameters(),
                                        lr=3e-5,
                                        weight_decay=hyps["weight_decay"],
                                        eps=hyps["adam_epsilon"])
            self.scheduler_bert = WarmupLinearSchedule(self.optimizer_bert,
                                                       warmup_steps=hyps["warmup_steps"],
                                                       t_total=t_total)

        other_para = []
        if self.GCNModel:
            other_para.append({'params': self.GCNModel.parameters()})
        other_para.append({'params': self.attn.parameters()})


        self.optimizer_other = torch.optim.Adam(other_para,
                                                lr=hyps["lr"],
                                                weight_decay=hyps["weight_decay"],
                                                eps=hyps["adam_epsilon"])



        self.scheduler_other = torch.optim.lr_scheduler.MultiStepLR(self.optimizer_other, milestones=[2], gamma=0.1)

        self.to_device(hyps["device"])

        self.GCNModel.zero_grad()
        self.bert_model.zero_grad()

    def to_device(self, device):
        if self.GCNModel:
            self.GCNModel.to(device)
        if self.bert_model:
            self.bert_model.to(device)
        self.attn.to(device)




    def to_train(self, device):
        if self.GCNModel:
            self.GCNModel.train()
        if self.bert_model:
            self.bert_model.train()
        self.attn.train()



    def forward(self, data_batch):
        adj_list, sent1, sent2, label, adj_entity  = data_batch
        # ubg = dgl.unbatch(bg)

        # clss = torch.Tensor([]).to(self.device)
        # embed_list = []
        embed1_batch = torch.Tensor([]).to(self.device)
        embed2_batch = torch.Tensor([]).to(self.device)
        mask1_batch = torch.Tensor([]).to(self.device)
        mask2_batch = torch.Tensor([]).to(self.device)
        adj_batch = torch.Tensor([]).to(self.device)
        adj_ent_batch = torch.Tensor([]).to(self.device)


        root_node = 1 if self.GCNModel else 0


        q1_lengths = [(len(seq1) + root_node) for seq1 in sent1]
        seq1_len = max([(len(seq1) + root_node) for seq1 in sent1]) #取出批次中的最大长度

        q2_lengths = [(len(seq2) + root_node) for seq2 in sent2]
        seq2_len = max([(len(seq2) + root_node) for seq2 in sent2])

        for i in range(len(adj_list)):
            # print(ubg[i].number_of_nodes())


            if self.bert_model:
                cls, embed1, embed2 = gen_bert_feature(self.bert_tokenizer, self.bert_model, self.device,
                                                       " ".join(sent1[i]), " ".join(sent2[i]),
                                                       sent1[i], sent2[i],
                                                       self.max_seq_length, self.merge_type, self.encoder_type)


            assert len(sent1[i]) == len(embed1) and len(sent2[i]) == len(embed2)

            seq1_len_curr = len(embed1) + root_node
            seq2_len_curr = len(embed2) + root_node


            embed1_curr = torch.cat((torch.zeros(root_node, self.embed_size).to(self.device), embed1))
            embed1_curr = torch.cat((embed1_curr, torch.zeros(seq1_len - seq1_len_curr, self.embed_size).to(self.device)), 0)
            embed1_batch = torch.cat((embed1_batch, torch.unsqueeze(embed1_curr, 0)), 0)

            embed2_curr = torch.cat((torch.zeros(root_node, self.embed_size).to(self.device), embed2))
            embed2_curr = torch.cat((embed2_curr, torch.zeros(seq2_len - seq2_len_curr, self.embed_size).to(self.device)), 0)
            embed2_batch = torch.cat((embed2_batch, torch.unsqueeze(embed2_curr, 0)), 0)

            mask1_curr = [1] * seq1_len_curr + [0] * (seq1_len - seq1_len_curr)
            mask1_curr = torch.tensor([mask1_curr], dtype=torch.float).to(self.device)
            mask1_batch = torch.cat((mask1_batch, mask1_curr), 0)

            mask2_curr = [1] * seq2_len_curr + [0] * (seq2_len - seq2_len_curr)
            mask2_curr = torch.tensor([mask2_curr], dtype=torch.float).to(self.device)
            mask2_batch = torch.cat((mask2_batch, mask2_curr), 0)

            if self.GCNModel:
                adj1_curr = copy.deepcopy(adj_list[i][0]).to(self.device)
                adj1_curr = torch.cat((adj1_curr, torch.zeros(adj1_curr.size(0), seq1_len - seq1_len_curr, seq1_len_curr).to(self.device)), 1)
                adj1_curr = torch.cat((adj1_curr, torch.zeros(adj1_curr.size(0), seq1_len, seq1_len + seq2_len - seq1_len_curr).to(self.device)), 2)

                adj2_curr = copy.deepcopy(adj_list[i][1]).to(self.device)
                adj2_curr = torch.cat((adj2_curr, torch.zeros(adj2_curr.size(0), seq2_len - seq2_len_curr, seq2_len_curr).to(self.device)), 1)
                adj2_curr = torch.cat((adj2_curr, torch.zeros(adj2_curr.size(0), seq2_len, seq1_len + seq2_len - seq2_len_curr).to(self.device)), 2)

                adj_curr = torch.cat((adj1_curr, adj2_curr), 1)


                adj_batch = torch.cat((adj_batch, torch.unsqueeze(adj_curr, 0)), 0)



                adj1_enti = copy.deepcopy(adj_entity[i][0]).to(self.device)
                adj1_enti = torch.cat((adj1_enti, torch.zeros(adj1_enti.size(0), seq1_len - seq1_len_curr, seq1_len_curr).to(self.device)), 1)
                adj1_enti = torch.cat((adj1_enti, torch.zeros(adj1_enti.size(0), seq1_len, seq1_len + seq2_len - seq1_len_curr).to(self.device)), 2)

                adj2_enti = copy.deepcopy(adj_entity[i][1]).to(self.device)
                adj2_enti = torch.cat((adj2_enti, torch.zeros(adj2_enti.size(0), seq2_len - seq2_len_curr, seq2_len_curr).to(self.device)), 1)
                adj2_enti = torch.cat((adj2_enti, torch.zeros(adj2_enti.size(0), seq2_len, seq1_len + seq2_len - seq2_len_curr).to(self.device)), 2)

                adj_enti = torch.cat((adj1_enti, adj2_enti), 1)

                adj_ent_batch = torch.cat((adj_ent_batch, torch.unsqueeze(adj_enti, 0)), 0)





        label = label.to(self.device)
        # score = label / 5
        # clss = clss.to(device)
        # print(optimizer)

        embed_combined = self.dropout_inter(torch.cat((embed1_batch, embed2_batch), 1).transpose(1, 2)).transpose(1, 2)   #torch.cat((embed1_batch, embed2_batch), 1)==self.dropout_inter(torch.cat((embed1_batch, embed2_batch), 1).transpose(1, 2)).transpose(1, 2)
        embed1_batch, embed2_batch = embed_combined[:, :seq1_len, :], embed_combined[:, seq1_len:, :]

        if self.GCNModel:
            embed1_batch, embed2_batch = self.GCNModel(embed1_batch, mask1_batch, embed2_batch, mask2_batch, adj_batch,adj_ent_batch)

        # similarity_score = torch.zeros(embed1_batch.size()[0])
        # for index in range(embed1_batch.size()[0]):
        #     # Sequence lenghts are being used to index and retrieve the activations before the zero padding since they were not part of original question
        #     q1 = embed1_batch[index, embed1_batch.size()[1]-1, :]
        #     q2 = embed2_batch[index, embed2_batch.size()[1]-1, :]
        #
        #     def get_cosine_similarity(u, v):
        #         return 1 - scipy.spatial.distance.cosine(u, v)
        #
        #
        #     similarity_score[index] = get_cosine_similarity(q1.detach().numpy(),q2.detach().numpy())
        data1, data2 = self.attn(embed1_batch, embed2_batch, mask1_batch, mask2_batch)
        cos = nn.CosineSimilarity(dim=1, eps=1e-6)
        similarity_score = cos(data1, data2)
        return similarity_score,label



