import math
import pickle
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F

from torch.nn.init import xavier_uniform_


class MultiHeadAttention(nn.Module):
    def __init__(self, emb_dim, nhead, dropout=0.1, attn_mask=False):
        super(MultiHeadAttention, self).__init__()
        self.attn_mask = attn_mask
        self.nhead = nhead
        self.head_dim = emb_dim // nhead
        self.q_proj_weight = nn.Parameter(torch.empty(emb_dim, emb_dim), requires_grad=True)
        self.k_proj_weight = nn.Parameter(torch.empty(emb_dim, emb_dim), requires_grad=True)
        self.v_proj_weight = nn.Parameter(torch.empty(emb_dim, emb_dim), requires_grad=True)

        self.o_proj = nn.Linear(emb_dim, emb_dim, bias=False)
        self.dropout = dropout
        self._reset_parameter()

    def _reset_parameter(self):
        xavier_uniform_(self.q_proj_weight)
        xavier_uniform_(self.k_proj_weight)
        xavier_uniform_(self.v_proj_weight)
        xavier_uniform_(self.o_proj.weight)

    def forward(self, q, k, v, mask, require_weight=False):
        src_len = q.size(0)
        tgt_len = k.size(0)

        assert src_len == tgt_len, "length of query does not equal length of key"

        scaling = float(self.head_dim) ** -0.5

        query = F.linear(q, self.q_proj_weight)
        key = F.linear(k, self.k_proj_weight)
        value = F.linear(v, self.v_proj_weight)

        # (n_head, s_len, h_dim)
        query = query.contiguous().view(src_len, self.nhead, self.head_dim).transpose(0, 1)
        key = key.contiguous().view(src_len, self.nhead, self.head_dim).transpose(0, 1)
        value = value.contiguous().view(src_len, self.nhead, self.head_dim).transpose(0, 1)

        # q*k
        attn_weight = torch.bmm(query, key.transpose(1, 2))
        attn_weight = attn_weight * scaling

        if mask is not None:
            attn_weight = torch.masked_fill(attn_weight, mask, -1e30)
        # (n_head, src_len, tgt_len)
        attn_score = F.softmax(attn_weight, dim=-1)
        if self.attn_mask:
            attmask = mask.eq(False).to(torch.float)
            attn_score = attn_score * attmask
        attn_score = F.dropout(attn_score, p=self.dropout, training=self.training)
        attn_output = torch.bmm(attn_score, value)
        # (n_head, src_len, h_dim) -> (src_len, n_head, h_dim) -> (src_len, emb_dim)
        attn_output = attn_output.transpose(0, 1).contiguous().view(src_len, -1)
        output = F.linear(attn_output, self.o_proj.weight)
        if require_weight:
            # attn = attn_score.sum(dim=1) / self.nhead
            # return output, attn
            return output, attn_score
        return output, None

def _get_activation_fn(activation):
    if activation == "relu":
        return F.relu
    elif activation == "gelu":
        return F.gelu
    else:
        raise RuntimeError("activation should be relu/gelu, not %s." % activation)

class TransformerLayerAbs(nn.Module):
    def __init__(self, d_model, nhead, dim_feedforward, dropout, activation, attn_mask=False):
        super(TransformerLayerAbs, self).__init__()
        self.attention = MultiHeadAttention(d_model, nhead, dropout, attn_mask)

        self.linear1 = nn.Linear(d_model, dim_feedforward)
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)

        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)

        self.activation = _get_activation_fn(activation)

    def forward(self, src, src_mask, require_weight=False):
        if require_weight:
            src2, weight = self.attention(src, src, src, src_mask, require_weight)
        else:
            src2, _ = self.attention(src, src, src, src_mask, require_weight)
        ss = src + self.dropout1(src2)
        ss = self.norm1(ss)
        if hasattr(self, "activation"):
            ss2 = self.linear2(self.dropout(self.activation(self.linear1(ss))))
        else:
            ss2 = self.linear2(self.dropout(F.relu(self.linear1(ss))))
        ss = ss + self.dropout2(ss2)
        ss = self.norm2(ss)
        if require_weight:
            return ss, weight
        return ss



class FusionAttention(nn.Module):
    def __init__(self, input_dim):
        super(FusionAttention, self).__init__()
        self.wf = nn.Parameter(torch.empty((1, input_dim, 1)), requires_grad=True)

    def forward(self, feat):
        seq_len = feat.size(1)
        sent_dim = feat.size(2)
        # (3, seq_len, sent_dim) -> (seq_len, 3, sent_dim)
        feat = feat.transpose(0, 1)
        # (seq_len, 3, 1)
        alpha = torch.bmm(feat, self.wf.expand(seq_len, sent_dim, 1))
        alpha = F.softmax(alpha, dim=1)
        # (seq_len, 1, 3)*(seq_len, 3, sent_dim) -> (seq_len, 1, sent_dim)
        out = torch.bmm(alpha.transpose(1, 2), feat)

        return out.squeeze(1)

class AbsolutePositionEncoding(nn.Module):
    def __init__(self, input_dim, max_len=1600):
        super(AbsolutePositionEncoding, self).__init__()
        self.max_len = max_len
        pe = torch.zeros(max_len, input_dim)
        position = torch.arange(0, max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0., input_dim, 2) * -(math.log(10000.) / input_dim))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe)

    def forward(self, x):
        seq_len = x.size(0)
        pe_clip = self.pe[:seq_len]
        pemb = x + pe_clip
        return pemb

def _get_clones(module, N):
    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])

def build_mixed_mask_local(utt_mask, spk_mask=None, window=10, bidirectional=False):
    # utt_mask: (bsz, slen)
    utt_mask = torch.matmul(utt_mask.unsqueeze(2), utt_mask.unsqueeze(1))
    utt_mask = utt_mask.tril(window)-utt_mask.tril(-window-1)
    if bidirectional is False:
        utt_mask = utt_mask.tril(0)
    umask = utt_mask.eq(0)
    if spk_mask is not None:
        batch_size = spk_mask.size(0)
        seq_len = spk_mask.size(1)
        mask1 = spk_mask.unsqueeze(2).expand(batch_size, seq_len, seq_len)
        mask2 = spk_mask.unsqueeze(1).expand(batch_size, seq_len, seq_len)
        smask_self = torch.eq(mask1, mask2)
        smask_other = torch.eq(smask_self, False)
        smask_self = torch.masked_fill(smask_self, umask, False)
        smask_other = torch.masked_fill(smask_other, umask, False)

        smask_self = torch.eq(smask_self, False)
        smask_other = torch.eq(smask_other, False)
        return umask, smask_self, smask_other
    return umask, None, None

class TripleTransformer(nn.Module):
    def __init__(self, layer, nhead, num_layer, emb_dim, max_len, bidirectional, num_block, norm=None):
        super(TripleTransformer, self).__init__()
        self.nhead = nhead
        self.bidirectional = bidirectional
        self.num_layer = num_layer
        self.norm = norm
        self.num_block = num_block
        self.pe = AbsolutePositionEncoding(emb_dim, max_len)
        if self.num_block == 1:
            self.layers1 = _get_clones(layer, num_layer)
        elif self.num_block == 2:
            self.layers1 = _get_clones(layer, num_layer)
            self.layers2 = _get_clones(layer, num_layer)
            self.fusion = FusionAttention(emb_dim)
        elif self.num_block == 3:
            self.layers1 = _get_clones(layer, num_layer)
            self.layers2 = _get_clones(layer, num_layer)
            self.layers3 = _get_clones(layer, num_layer)
            self.fusion = FusionAttention(emb_dim)
        else:
            assert 1 <= num_block <= 3, 'ooc'
        self._reset_parameter()

    def _reset_parameter(self):
        for p in self.parameters():
            if p.dim() > 1:
                xavier_uniform_(p)

    def forward(self, src, utt_mask, spk_mask, window=100, mode='so'):
        batch_size, seq_len, emb_dim = src.size()
        src= src.view(-1, emb_dim)
        src_len = src.size(0)
        utt_mask =utt_mask.view(-1)
        spk_mask =spk_mask.view(-1)
        # ##### make masks
        # (1, src_len, tgt_len)
        # uttm, samm, othm = build_mixed_mask_prior(utt_mask.unsqueeze(0), spk_mask.unsqueeze(0), True)
        uttm, samm, othm = build_mixed_mask_local(utt_mask.unsqueeze(0), spk_mask.unsqueeze(0),
                                                  window, self.bidirectional)
        uttm = uttm.expand(self.nhead, src_len, src_len)
        samm = samm.expand(self.nhead, src_len, src_len)
        othm = othm.expand(self.nhead, src_len, src_len)

        src = self.pe(src)
        if self.num_block == 1:
            output = src
            for i in range(self.num_layer):
                if mode == 'u':
                    output = self.layers1[i](output, uttm)
                elif mode == 's':
                    output = self.layers1[i](output, samm)
                elif mode == 'o':
                    output = self.layers1[i](output, othm)
                else:
                    raise NotImplementedError
        elif self.num_block == 2:
            output1 = src
            output2 = src
            for i in range(self.num_layer):
                if mode == 'so':
                    output1 = self.layers1[i](output1, samm)
                    output2 = self.layers2[i](output2, othm)
                elif mode == 'us':
                    output1 = self.layers1[i](output1, uttm)
                    output2 = self.layers2[i](output2, samm)
                elif mode == 'uo':
                    output1 = self.layers1[i](output1, uttm)
                    output2 = self.layers2[i](output2, othm)
                else:
                    raise NotImplementedError
            # (2, seq_len, sent_dim)
            output = torch.stack([output1, output2], dim=0)
            output = self.fusion(output)
        elif self.num_block == 3:
            output1 = src
            output2 = src
            output3 = src
            for i in range(self.num_layer):
                output1 = self.layers1[i](output1, uttm)
                output2 = self.layers2[i](output2, samm)
                output3 = self.layers3[i](output3, othm)
            # (3, seq_len, sent_dim)
            output = torch.stack([output1, output2, output3], dim=0)
            output = self.fusion(output)
        else:
            output = None
            assert 1 <= self.num_block <= 3, 'ooc'

        # return output
        return output.view(batch_size, seq_len, emb_dim)


