#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   dual_attention.py
@Contact :   raogx.vip@hotmail.com
@License :   (C)Copyright 2020

@Modify Time      @Author    @Version    @Desciption
------------      -------    --------    -----------
2021-01-07 21:45   WandongShi      1.0         None
'''
import math
import numpy as np
import torch
from torch import nn

class Attention_Type(nn.Module):
    def __init__(self,
                 q_embd_size,
                 k_embd_size,
                 drop_rate=0.,
                 q_k_hidden_size=None,
                 num_heads=1,  # for multi-head attention
                 score_func='scaled_dot',
                 bias=True):
        super(Attention_Type, self).__init__()
        self.head_dim = q_k_hidden_size // num_heads
        self.q_w = nn.Linear(q_embd_size, q_k_hidden_size, bias=bias)
        self.k_w = nn.Linear(k_embd_size, q_k_hidden_size, bias=bias)
        self.wa = nn.Parameter(torch.Tensor(q_k_hidden_size, q_k_hidden_size).fill_(0.00001))  # wa for mult
        self.num_heads = num_heads
        self.score_func = score_func
        self.drop_rate = drop_rate

    def forward(self, q_embd, k_embd, mask=None):
        """
        :param q_embd: [batch_size, max_pairs, 2*hidden_size]
        :param k_embd: [batch_size, max_seq, embedding_matrix.shape[1]]
        :param v_embd:
        :param mask:
        :return:
        """
        if len(q_embd.shape) == 2:
            # 第二维增加一维
            q_embd = torch.unsqueeze(q_embd, 1)
        if len(k_embd.shape) == 2:
            k_embd = torch.unsqueeze(k_embd, 1)
        batch_size = q_embd.shape[0]
        q_len = q_embd.shape[1]
        k_len = k_embd.shape[1]
        k_senti = k_embd.shape[2]

        q_embd = torch.unsqueeze(q_embd, dim=2).expand(q_embd.shape[0], q_embd.shape[1], k_senti, q_embd.shape[-1]).contiguous()
        q = self.q_w(q_embd).view(batch_size, q_len, k_senti, self.num_heads, self.head_dim)
        q = q.permute(3, 0, 1, 2, 4).contiguous().view(-1, q_len, k_senti, self.head_dim)
        k = self.k_w(k_embd).view(batch_size, k_len, k_senti, self.num_heads, self.head_dim)
        k = k.permute(3, 0, 1, 2, 4).contiguous().view(-1, k_len, k_senti, self.head_dim)

        # get score
        if isinstance(self.score_func, str):
            if self.score_func == "dot":
                # torch.bmm矩阵的乘法，维度必须为3
                score = torch.matmul(torch.matmul(q, self.wa), k.permute(0, 1, 3, 2))

            elif self.score_func == "scaled_dot":
                temp = torch.bmm(q, k.permute(0, 2, 1))
                score = torch.div(temp, math.sqrt(self.q_k_hidden_size))

            else:
                raise RuntimeError('invalid score function')
        elif callable(self.score_func):
            try:
                score = self.score_func(q, k)
            except Exception as e:
                print("Exception :", e)

        if mask is not None:
            mask = mask.bool().unsqueeze(1)
            score = score.masked_fill(~mask, -np.inf)
        # 对分值进行归一化
        score = nn.functional.softmax(score, dim=-1)
        score = nn.functional.dropout(score, p=self.drop_rate, training=self.training)
        # [batch_size, max_pairs, len(senti)]
        score = score[:, :, 1, :].squeeze(dim=2)
        return score

class Attention_Node(nn.Module):
    def __init__(self,
                 q_embd_size,
                 k_embd_size,
                 q_k_hidden_size=None,
                 num_heads=1,  # for multi-head attention
                 score_func='scaled_dot',
                 drop_rate=0.,
                 bias=True):
        super(Attention_Node, self).__init__()
        self.head_dim = q_k_hidden_size // num_heads
        self.q_w = nn.Linear(q_embd_size, q_k_hidden_size, bias=bias)
        self.k_w = nn.Linear(k_embd_size, q_k_hidden_size, bias=bias)
        self.wn_at = nn.Parameter(torch.Tensor(q_k_hidden_size, q_k_hidden_size).fill_(0.00001))  # wa for mult
        self.wn_ot = nn.Parameter(torch.Tensor(q_k_hidden_size, q_k_hidden_size).fill_(0.00001))  # wa for mult
        self.num_heads = num_heads
        self.score_func = score_func
        self.drop_rate = drop_rate

    def forward(self, q_at_embd, q_ot_embd, k_at_embd, k_ot_embd, lamda=0.5, mask=None):
        if len(q_at_embd.shape) == 2:
            q_at_embd = torch.unsqueeze(q_at_embd, 1)
        if len(k_at_embd.shape) == 2:
            k_at_embd = torch.unsqueeze(k_at_embd, 1)
        if len(q_ot_embd.shape) == 2:
            q_ot_embd = torch.unsqueeze(q_ot_embd, 1)
        if len(k_ot_embd.shape) == 2:
            k_ot_embd = torch.unsqueeze(k_ot_embd, 1)
        batch_size = q_at_embd.shape[0]
        q_len = q_at_embd.shape[1]
        k_len = k_at_embd.shape[1]

        # view()的作用是将一个多行的tensor拼接成一行
        q_at = self.q_w(q_at_embd).view(batch_size, q_len, self.num_heads, self.head_dim)
        q_ot = self.q_w(q_ot_embd).view(batch_size, q_len, self.num_heads, self.head_dim)
        # 将tensor的维度进行转换，进行permute操作后需要使用contiguous将内存变成连续的一块
        q_at = q_at.permute(2, 0, 1, 3).contiguous().view(-1, q_len, self.head_dim)
        q_ot = q_ot.permute(2, 0, 1, 3).contiguous().view(-1, q_len, self.head_dim)
        k_at = self.k_w(k_at_embd).view(batch_size, k_len, self.num_heads, self.head_dim)
        k_ot = self.k_w(k_ot_embd).view(batch_size, k_len, self.num_heads, self.head_dim)
        k_at = k_at.permute(2, 0, 1, 3).contiguous().view(-1, k_len, self.head_dim)
        k_ot = k_ot.permute(2, 0, 1, 3).contiguous().view(-1, k_len, self.head_dim)

        # get score
        if isinstance(self.score_func, str):
            if self.score_func == "dot":
                # score_at = torch.mul(torch.bmm(q_at, k_at.permute(0, 2, 1)), lamda)
                # score_ot = torch.mul(torch.bmm(q_ot, k_ot.permute(0, 2, 1)), 1-lamda)
                score_at = torch.mul(torch.matmul(torch.matmul(q_at, self.wn_at), k_at.permute(0, 2, 1)), lamda)
                score_ot = torch.mul(torch.matmul(torch.matmul(q_ot, self.wn_ot), k_ot.permute(0, 2, 1)), 1-lamda)
                score = torch.add(score_at, score_ot)
            elif self.score_func == "scaled_dot":
                score_at = torch.bmm(q_at, k_at.permute(0, 2, 1))
                score_ot = torch.bmm(q_ot, k_ot.permute(0, 2, 1))
                score_at = torch.mul(torch.div(score_at, math.sqrt(self.q_k_hidden_size)), lamda)
                score_ot = torch.mul(torch.div(score_ot, math.sqrt(self.q_k_hidden_size)), 1-lamda)
                score = troch.add(score_at, score_ot)
            else:
                raise RuntimeError('invalid score function')
        elif callable(self.score_func):
            try:
                score = self.score_func(q, k)
            except Exception as e:
                print("Exception :", e)
        if mask is not None:
            if len(mask.shape) == 2:
                mask = mask.bool().unsqueeze(1)
            score = score.masked_fill(~mask, -np.inf)
        # 对分值进行归一化
        # score: [batch_size, max_pairs, len(senti)], 其中senti是每个情感类别的evidence数目
        score = nn.functional.softmax(score, dim=-1)
        score = nn.functional.dropout(score, p=self.drop_rate, training=self.training)

        # get output
        # v: [batch_size, len(senti), hidden_size]
        v = torch.cat([k_at_embd, k_ot_embd], dim=2)
        output = torch.bmm(score, v)
        return output