#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   models.py    
@Contact :   raogx.vip@hotmail.com
@License :   (C)Copyright 2020

@Modify Time      @Author    @Version    @Desciption
------------      -------    --------    -----------
2021-06-12 23:05   WandongShi      1.0         None
'''
 
import torch
from torch import nn as nn
from transformers import BertConfig
from transformers import BertModel
from transformers import BertPreTrainedModel
from models.dual_attention import Attention_Type, Attention_Node
from models.evidence_GCN import evidence_GCN
from utils import *
import time

from models import sampling
from models import util

def get_token(h: torch.tensor, x: torch.tensor, token: int):
    """ Get specific token embedding (e.g. [CLS]) """
    emb_size = h.shape[-1]

    token_h = h.view(-1, emb_size)
    flat = x.contiguous().view(-1)

    # get contextualized embedding of given token
    token_h = token_h[flat == token, :]

    return token_h

class OpinionMiner(BertPreTrainedModel):
    """ Span-based model to jointly extract entities and relations """

    VERSION = '1.1'

    def __init__(self, config: BertConfig, cls_token: int, relation_types: int, entity_types: int,
                 size_embedding: int, prop_drop: float, freeze_transformer: bool, tokenizer,
                 max_pairs: int, evidence_rep):
        super(OpinionMiner, self).__init__(config)

        # BERT model
        self.device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
        self.bert = BertModel(config).to(self.device)
        self._tokenizer = tokenizer

        # layers
        self.score_evidence = nn.Linear(config.hidden_size * 4, 1)
        self.rel_classifier = nn.Linear(config.hidden_size * 5 + size_embedding * 2, relation_types)
        self.entity_classifier = nn.Linear(config.hidden_size * 2 + size_embedding, entity_types)
        self.size_embeddings = nn.Embedding(100, size_embedding)
        self.dropout = nn.Dropout(prop_drop)

        self._cls_token = cls_token
        self._relation_types = relation_types
        self._entity_types = entity_types
        self._max_pairs = max_pairs

        # weight initialization
        self.init_weights()

        self.node_gcn = evidence_GCN(config.hidden_size, 5).to(self.device)
        self.evidence_nodes = self.generate_node(evidence_rep)

        # attention setting
        self.Attention_Type = Attention_Type(q_embd_size=config.hidden_size * 2, k_embd_size=config.hidden_size * 2,
                                             q_k_hidden_size=config.hidden_size * 2, num_heads=1, score_func="dot")
        self.Attention_Node = Attention_Node(q_embd_size=config.hidden_size, k_embd_size=config.hidden_size,
                                             q_k_hidden_size=config.hidden_size * 2, num_heads=1, score_func="dot")

        if freeze_transformer:
            print("Freeze transformer weights")

            # freeze all transformer weights
            for param in self.bert.parameters():
                param.requires_grad = False

    def generate_node(self, evidence_rep):
        mapidx, ret = evidence_rep.get_adj()
        node_values = mapidx.keys()
        inputs = torch.zeros(len(node_values), 768, device=self.device)
        for idx, node in enumerate(node_values):
            doc_encoding = [self._tokenizer.convert_tokens_to_ids('[CLS]')]
            doc_encoding += [self._tokenizer.convert_tokens_to_ids(node)]
            doc_encoding += [self._tokenizer.convert_tokens_to_ids('[SEP]')]
            ts_encodings = torch.tensor(doc_encoding, dtype=torch.long, device=self.device)
            context_size = len(ts_encodings)
            ctx_masks = torch.ones(context_size, dtype=torch.bool, device=self.device)
            ctx_masks = ctx_masks.float()
            ctx_masks = ctx_masks.unsqueeze(0)
            ts_encodings = ts_encodings.unsqueeze(0)
            hidden_rep = self.bert(input_ids=ts_encodings, attention_mask=ctx_masks)[0]
            inputs[idx] = hidden_rep[:, 1, :].unsqueeze(1)
        inputs = torch.unsqueeze(inputs, dim=0)
        gcn_output = self.node_gcn(ret, inputs)
        gcn_output = torch.squeeze(gcn_output, dim=0)

        aspect_evi = evidence_rep.get_aspect()
        opinion_evi = evidence_rep.get_opinion()
        senti_evi = evidence_rep.get_senti()
        pos_evi = []
        neg_evi = []
        neu_evi = []
        evidence_lines = []
        for senti_idx, line_senti in enumerate(senti_evi):
            if (line_senti == 'POS'):
                pos_evi.append((aspect_evi[senti_idx], opinion_evi[senti_idx]))
            elif (line_senti == 'NEG'):
                neg_evi.append((aspect_evi[senti_idx], opinion_evi[senti_idx]))
            else:
                neu_evi.append((aspect_evi[senti_idx], opinion_evi[senti_idx]))
        evidence_lines.append(pos_evi)
        evidence_lines.append(neg_evi)
        evidence_lines.append(neu_evi)
        aspect_arr = []
        opinion_arr = []
        get_pairs = []
        for senti in evidence_lines:
            senti_at_emb = torch.zeros(len(senti), 768, device="cuda:1")
            senti_ot_emb = torch.zeros(len(senti), 768, device="cuda:1")
            senti_pair_emb = torch.zeros(len(senti), 768 * 2, device="cuda:1")
            for t_idx, line in enumerate(senti):
                at_word = line[0]
                ot_word = line[1]
                at_idx = mapidx[at_word]
                ot_idx = mapidx[ot_word]
                at_phrase_rep = gcn_output[at_idx]
                ot_phrase_rep = gcn_output[ot_idx]
                at_phrase_rep = torch.unsqueeze(at_phrase_rep, dim=0)
                ot_phrase_rep = torch.unsqueeze(ot_phrase_rep, dim=0)
                senti_at_emb[t_idx] = at_phrase_rep
                senti_ot_emb[t_idx] = ot_phrase_rep
                senti_pair_emb[t_idx] = torch.cat([at_phrase_rep, ot_phrase_rep], dim=1)
            aspect_arr.append(senti_at_emb)
            opinion_arr.append(senti_ot_emb)
            get_pairs.append(senti_pair_emb)
        evidence_nodes = [aspect_arr, opinion_arr, get_pairs]
        return evidence_nodes

    def _forward_train(self, encodings: torch.tensor, context_masks: torch.tensor, entity_masks: torch.tensor,
                       entity_sizes: torch.tensor, relations: torch.tensor, rel_masks: torch.tensor):
        # get contextualized token embeddings from last transformer layer
        context_masks = context_masks.float()
        h = self.bert(input_ids=encodings, attention_mask=context_masks)[0]

        batch_size = encodings.shape[0]

        # classify entities
        size_embeddings = self.size_embeddings(entity_sizes)  # embed entity candidate sizes
        entity_clf, entity_spans_pool = self._classify_entities(encodings, h, entity_masks, size_embeddings)

        # classify relations
        h_large = h.unsqueeze(1).repeat(1, max(min(relations.shape[1], self._max_pairs), 1), 1, 1)
        rel_clf = torch.zeros([batch_size, relations.shape[1], self._relation_types]).to(
            self.rel_classifier.weight.device)

        # obtain relation logits
        # chunk processing to reduce memory usage
        for i in range(0, relations.shape[1], self._max_pairs):
            # classify relation candidates
            chunk_rel_logits = self._classify_relations(entity_spans_pool, size_embeddings,
                                                        relations, rel_masks, h_large, i)
            rel_clf[:, i:i + self._max_pairs, :] = chunk_rel_logits

        return entity_clf, rel_clf

    def _forward_inference(self, encodings: torch.tensor, context_masks: torch.tensor, entity_masks: torch.tensor,
                           entity_sizes: torch.tensor, entity_spans: torch.tensor, entity_sample_masks: torch.tensor):
        # get contextualized token embeddings from last transformer layer
        context_masks = context_masks.float()
        h = self.bert(input_ids=encodings, attention_mask=context_masks)[0]

        batch_size = encodings.shape[0]
        ctx_size = context_masks.shape[-1]

        # classify entities
        size_embeddings = self.size_embeddings(entity_sizes)  # embed entity candidate sizes
        entity_clf, entity_spans_pool = self._classify_entities(encodings, h, entity_masks, size_embeddings)

        # ignore entity candidates that do not constitute an actual entity for relations (based on classifier)
        relations, rel_masks, rel_sample_masks = self._filter_spans(entity_clf, entity_spans,
                                                                    entity_sample_masks, ctx_size)

        rel_sample_masks = rel_sample_masks.float().unsqueeze(-1)
        h_large = h.unsqueeze(1).repeat(1, max(min(relations.shape[1], self._max_pairs), 1), 1, 1)
        rel_clf = torch.zeros([batch_size, relations.shape[1], self._relation_types]).to(
            self.rel_classifier.weight.device)

        # obtain relation logits
        # chunk processing to reduce memory usage
        for i in range(0, relations.shape[1], self._max_pairs):
            # classify relation candidates
            chunk_rel_logits = self._classify_relations(entity_spans_pool, size_embeddings,
                                                        relations, rel_masks, h_large, i)
            # apply sigmoid
            chunk_rel_clf = torch.sigmoid(chunk_rel_logits)
            rel_clf[:, i:i + self._max_pairs, :] = chunk_rel_clf

        rel_clf = rel_clf * rel_sample_masks  # mask

        # apply softmax
        entity_clf = torch.softmax(entity_clf, dim=2)

        return entity_clf, rel_clf, relations

    def _classify_entities(self, encodings, h, entity_masks, size_embeddings):
        # max pool entity candidate spans
        m = (entity_masks.unsqueeze(-1) == 0).float() * (-1e30)
        entity_spans_pool = m + h.unsqueeze(1).repeat(1, entity_masks.shape[1], 1, 1)
        entity_spans_pool = entity_spans_pool.max(dim=2)[0]

        # get cls token as candidate context representation
        entity_ctx = get_token(h, encodings, self._cls_token)

        # create candidate representations including context, max pooled span and size embedding
        entity_repr = torch.cat([entity_ctx.unsqueeze(1).repeat(1, entity_spans_pool.shape[1], 1),
                                 entity_spans_pool, size_embeddings], dim=2)
        entity_repr = self.dropout(entity_repr)

        # classify entity candidates
        entity_clf = self.entity_classifier(entity_repr)

        return entity_clf, entity_spans_pool

    def _classify_relations(self, entity_spans, size_embeddings, relations, rel_masks, h, chunk_start):

        batch_size = relations.shape[0]

        # create chunks if necessary
        if relations.shape[1] > self._max_pairs:
            relations = relations[:, chunk_start:chunk_start + self._max_pairs]
            rel_masks = rel_masks[:, chunk_start:chunk_start + self._max_pairs]
            h = h[:, :relations.shape[1], :]

        # get pairs of entity candidate representations
        entity_pairs = util.batch_index(entity_spans, relations)
        entity_aspect, entity_opinion = entity_pairs.chunk(2, dim=2)
        query_aspect = torch.squeeze(entity_aspect, dim=2)
        query_opinion = torch.squeeze(entity_opinion, dim=2)
        entity_pairs = entity_pairs.view(batch_size, entity_pairs.shape[1], -1)


        aspect_arr = self.evidence_nodes[0]
        opinion_arr = self.evidence_nodes[1]
        get_pairs = self.evidence_nodes[2]

        evi_out_list = []
        evidence_emb = []
        min_len = min(6, min([sen.shape[0] for sen in get_pairs]))
        # 循环三次
        for idx, aspect_rep in enumerate(aspect_arr):
            opinion_rep = opinion_arr[idx]
            pairs_rep = get_pairs[idx]
            senti_input = torch.unsqueeze(pairs_rep, dim=0).expand(entity_pairs.shape[1], pairs_rep.shape[0],
                                                                   pairs_rep.shape[1]).contiguous()
            senti_input = torch.unsqueeze(senti_input, dim=0).expand(batch_size, entity_pairs.shape[1],
                                                                     pairs_rep.shape[0],
                                                                     pairs_rep.shape[1]).contiguous()
            entity_input = torch.unsqueeze(entity_pairs, dim=2).expand(batch_size, entity_pairs.shape[1],
                                                                       pairs_rep.shape[0],
                                                                       pairs_rep.shape[1]).contiguous()
            mask_output = self.score_evidence(torch.cat((senti_input, entity_input), dim=-1)).squeeze(-1)
            # mask_output是三维的，topk_idx也是三维的，topk_idx: [batch_size, max_pairs, min_len]
            _, topk_idx = torch.topk(mask_output, min_len, dim=-1, largest=True, sorted=False)
            # _, mask = mask_output.topk(maxk, dim=-1, largest=True, sorted=False)
            mask = torch.zeros([batch_size, entity_pairs.shape[1], pairs_rep.shape[0]], dtype=torch.bool,
                               device=self.device)
            for batch_idx in range(batch_size):
                for pair_idx in range(entity_pairs.shape[1]):
                    for mask_idx in topk_idx[batch_idx][pair_idx]:
                        mask[batch_idx][pair_idx][mask_idx] = 1

            senti_at_emb = aspect_rep.expand(batch_size, len(aspect_rep), 768).contiguous()
            senti_ot_emb = opinion_rep.expand(batch_size, len(opinion_rep), 768).contiguous()

            # 思考怎么引入mask
            pairs_tmp = torch.unsqueeze(pairs_rep, dim=0).expand(entity_pairs.shape[1], pairs_rep.shape[0],
                                                                 pairs_rep.shape[1]).contiguous()
            pairs_tmp = torch.unsqueeze(pairs_tmp, dim=0).expand(batch_size, entity_pairs.shape[1], pairs_rep.shape[0],
                                                                 pairs_rep.shape[1]).contiguous()
            mask_tmp = torch.repeat_interleave(mask.unsqueeze(dim=-1), repeats=768 * 2, dim=-1)
            pairs_tmp = pairs_tmp.masked_fill(~mask_tmp, 0.0)
            senti_pairs_emb = torch.sum(pairs_tmp, dim=2)
            senti_pairs_emb = torch.unsqueeze(senti_pairs_emb, dim=-2)
            evidence_emb.append(senti_pairs_emb)
            evi_senti = self.Attention_Node(query_aspect, query_opinion, senti_at_emb, senti_ot_emb)
            evi_senti = evi_senti.unsqueeze(2)
            evi_out_list.append(evi_senti)
        evidence_emb = torch.cat([evi for evi in evidence_emb], dim=2)
        score = self.Attention_Type(entity_pairs, evidence_emb)
        evi_out = torch.cat([evi for evi in evi_out_list], dim=2)
        alpha_o = torch.repeat_interleave(score.unsqueeze(dim=-1), repeats=768 * 2, dim=-1)
        output_o = evi_out * alpha_o
        evidence_o = torch.sum(output_o, dim=2)

        # get corresponding size embeddings
        size_pair_embeddings = util.batch_index(size_embeddings, relations)
        size_pair_embeddings = size_pair_embeddings.view(batch_size, size_pair_embeddings.shape[1], -1)

        # relation context (context between entity candidate pair)
        # mask non entity candidate tokens
        m = ((rel_masks == 0).float() * (-1e30)).unsqueeze(-1)
        rel_ctx = m + h
        # max pooling
        rel_ctx = rel_ctx.max(dim=2)[0]
        # set the context vector of neighboring or adjacent entity candidates to zero
        rel_ctx[rel_masks.to(torch.uint8).any(-1) == 0] = 0

        # create relation candidate representations including context, max pooled entity candidate pairs
        # and corresponding size embeddings
        rel_repr = torch.cat([rel_ctx, entity_pairs, size_pair_embeddings], dim=2)
        rel_repr = self.dropout(rel_repr)

        # 添加上证据向量
        final_input = torch.cat([rel_repr, evidence_o], dim=2)
        chunk_rel_logits = self.rel_classifier(final_input)

        return chunk_rel_logits


    def _filter_spans(self, entity_clf, entity_spans, entity_sample_masks, ctx_size):
        batch_size = entity_clf.shape[0]
        entity_logits_max = entity_clf.argmax(dim=-1) * entity_sample_masks.long()  # get entity type (including none)
        batch_relations = []
        batch_rel_masks = []
        batch_rel_sample_masks = []

        for i in range(batch_size):
            rels = []
            rel_masks = []
            sample_masks = []

            # get spans classified as entities
            non_zero_indices = (entity_logits_max[i] != 0).nonzero().view(-1)
            non_zero_spans = entity_spans[i][non_zero_indices].tolist()
            non_zero_indices = non_zero_indices.tolist()

            # create relations and
            for i1, s1 in zip(non_zero_indices, non_zero_spans):
                for i2, s2 in zip(non_zero_indices, non_zero_spans):
                    if i1 != i2 and overleap_check(s1, s2):
                    # if i1 != i2:
                        rels.append((i1, i2))
                        rel_masks.append(sampling.create_rel_mask(s1, s2, ctx_size))
                        sample_masks.append(1)

            if not rels:
                # case: no more than two spans classified as entities
                batch_relations.append(torch.tensor([[0, 0]], dtype=torch.long))
                batch_rel_masks.append(torch.tensor([[0] * ctx_size], dtype=torch.bool))
                batch_rel_sample_masks.append(torch.tensor([0], dtype=torch.bool))
            else:
                # case: more than two spans classified as entities
                batch_relations.append(torch.tensor(rels, dtype=torch.long))
                batch_rel_masks.append(torch.stack(rel_masks))
                batch_rel_sample_masks.append(torch.tensor(sample_masks, dtype=torch.bool))

        # stack
        device = self.rel_classifier.weight.device
        batch_relations = util.padded_stack(batch_relations).to(device)
        batch_rel_masks = util.padded_stack(batch_rel_masks).to(device)
        batch_rel_sample_masks = util.padded_stack(batch_rel_sample_masks).to(device)

        return batch_relations, batch_rel_masks, batch_rel_sample_masks

    def forward(self, *args, inference=False, **kwargs):
        if not inference:
            return self._forward_train(*args, **kwargs)
        else:
            return self._forward_inference(*args, **kwargs)

# Model access

_MODELS = {
    'opinion_miner': OpinionMiner,
}

def get_model(name):
    return _MODELS[name]