#! -*- encoding:utf-8 -*-
"""
@File    :   Baselines.py
@Author  :   Zachary Li
@Contact :   li_zaaachary@163.com
@Dscpt   :   
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertPreTrainedModel, BertModel


class BertBaseline(BertPreTrainedModel):

    def __init__(self, config, *args, **kwargs):
        super(BertBaseline, self).__init__(config)

        self.bert = BertModel(config)

        self.scorer = nn.Sequential(
            nn.Dropout(0.1),
            nn.Linear(config.hidden_size, 2)
        )

        self.init_weights()

    def forward(self, input_ids, attention_mask, token_type_ids, labels):
        """
        input_ids: [B, L]
        labels: [B, ]
        """
        logits = self._forward(input_ids, attention_mask, token_type_ids)
        loss = F.cross_entropy(logits, labels)      # get the CELoss

        with torch.no_grad():
            logits = F.softmax(logits, dim=1)       # get the score
            predicts = torch.argmax(logits, dim=1)  # find the result
            right_num = torch.sum(predicts == labels)

        return loss, right_num, predicts

    def _forward(self, input_ids, attention_mask, token_type_ids):

        outputs = self.bert(
            input_ids=input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids
        )
        
        pooler_output = outputs.pooler_output  # [CLS]
        logits = self.scorer(pooler_output)

        return logits

    def predict(self, input_ids, attention_mask, token_type_ids):
        logits = self._forward(input_ids, attention_mask, token_type_ids)
        logits = F.softmax(logits, dim=1)       # get the score
        predicts = torch.argmax(logits, dim=1)

        return predicts


class ZMM_Model(BertPreTrainedModel):

    def __init__(self, config):
        super(ZMM_Model, self).__init__(config)
        self.bert = BertModel(config)
        # self.s_bert = self.bert
        self.s_bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        # self.bert_layer = BertLayer(config)
        self.dense = nn.Linear(config.hidden_size * 4, config.hidden_size)
        self.classifier = nn.Linear(config.hidden_size, 2)
        self.s_length = 40
        # self.att = BasicAttention(config.hidden_size, config.hidden_size, config.hidden_size)
        # self.w = nn.Parameter(torch.Tensor([0.5, 0.5]))
        # self.gamma = nn.Parameter(torch.ones(1))

    def forward(self, input_ids, attention_mask, token_type_ids, long_input_ids, long_attention_mask, long_token_type_ids, labels):
        """
        input_ids: [B, L]
        labels: [B, ]
        """
        logits = self._forward(input_ids, attention_mask, token_type_ids, long_input_ids, long_attention_mask, long_token_type_ids)
        loss = F.cross_entropy(logits, labels)      # get the CELoss

        with torch.no_grad():
            logits = F.softmax(logits, dim=1)       # get the score
            predicts = torch.argmax(logits, dim=1)  # find the result
            right_num = torch.sum(predicts == labels)

        return loss, right_num, predicts

    def _forward(self, all_input_ids, all_attention_mask, all_token_type_ids, long_input_ids, long_attention_mask, long_token_type_ids):
        
        
        output = self.bert(all_input_ids, all_attention_mask, all_token_type_ids)
        all_pooled_output = output.pooler_output
        all_pooled_output = self.dropout(all_pooled_output)
        
        sm1_input_ids = long_input_ids[:, 0, :]
        sm1_segment_ids = long_token_type_ids[:,0,:]
        sm1_input_mask = long_attention_mask[:,0,:]
        output = self.s_bert(sm1_input_ids, sm1_input_mask, sm1_segment_ids)
        s1_bert_output = output.last_hidden_state
        s1_output = s1_bert_output[:, :self.s_length+1, :]

        sm2_input_ids = long_input_ids[:, 1, :]
        sm2_segment_ids = long_token_type_ids[:,1,:]
        sm2_input_mask = long_attention_mask[:,1,:]
        output = self.s_bert(sm2_input_ids, sm2_input_mask, sm2_segment_ids)
        s2_bert_output = output.last_hidden_state
        s2_output = s2_bert_output[:, :self.s_length+1, :]
        
        def attention(v1, v2):
          """
          :param v1: (batch, seq_len1, hidden_size)
          :param v2: (batch, seq_len2, hidden_size)
          :return: (batch, seq_len1, seq_len2)
          """

          # (batch, seq_len1, 1)
          v1_norm = v1.norm(p=2, dim=2, keepdim=True)
          # (batch, 1, seq_len2)
          v2_norm = v2.norm(p=2, dim=2, keepdim=True).permute(0, 2, 1)

          # (batch, seq_len1, seq_len2)
          a = torch.bmm(v1, v2.permute(0, 2, 1))
          d = v1_norm * v2_norm

          return div_with_small_value(a, d)

        def div_with_small_value(n, d, eps=1e-8):
            # too small values are replaced by 1e-8 to prevent it from exploding.
            d = d * (d > eps).float() + eps * (d <= eps).float()
            return n / d

        att_w = attention(s1_output, s2_output)
        att_s1_w = s1_output.unsqueeze(1)*att_w.unsqueeze(3)
        att_s2_w = s2_output.unsqueeze(1)*att_w.unsqueeze(3)
        att_mean_s1_w = div_with_small_value(att_s1_w.sum(dim=2), att_w.sum(dim=2, keepdim=True))
        att_mean_s2_w = div_with_small_value(att_s2_w.sum(dim=1), att_w.sum(dim=1, keepdim=True).permute(0, 2, 1))
        
        # att_mean_s1_w = self.att(s1_output, s2_output, s2_output)
        # att_mean_s2_w = self.att(s2_output, s1_output, s1_output)
        
        pool_1 = nn.MaxPool1d(att_mean_s1_w.size(1), stride=1)
        s1_output = pool_1(att_mean_s1_w.permute(0, 2, 1)).permute(0, 2, 1).view(-1,768)
        s2_output = pool_1(att_mean_s2_w.permute(0, 2, 1)).permute(0, 2, 1).view(-1,768)
       
        pooled_output = torch.cat((all_pooled_output, s1_output+s2_output, s1_output-s2_output,torch.abs(s1_output-s2_output)), dim=1)
        # pooled_output = torch.cat((all_pooled_output, s1_output, s2_output), dim=1)
        # pooled_output = torch.cat((all_pooled_output,s1_output, s2_output, s1_output+s2_output, s1_output-s2_output,torch.abs(s1_output-s2_output)), dim=1)
        # pooled_output = torch.cat((all_pooled_output,s1_output, s2_output, s1_output+s2_output, s1_output-s2_output,torch.abs(s1_output-s2_output)), dim=1)
        # pooled_output = torch.cat((s1_output, s2_output, s1_output+s2_output, s1_output-s2_output,torch.abs(s1_output-s2_output)), dim=1)
        output = self.dense(pooled_output)
        output = self.dropout(output)

        logits = self.classifier(output)

        return logits

    def predict(self, all_input_ids, all_attention_mask, all_token_type_ids, long_input_ids, long_attention_mask, long_token_type_ids):
        logits = self._forward(all_input_ids, all_attention_mask, all_token_type_ids, long_input_ids, long_attention_mask, long_token_type_ids)
        logits = F.softmax(logits, dim=1)       # get the score
        predicts = torch.argmax(logits, dim=1)

        return predicts