import json

import mxnet.gluon.nn as nn
import mxnet.ndarray as nd

from config import Config
from models.attention_cell import DotProductAttentionCell, MultiHeadAttentionCell


# import logging


class HighwayBase(nn.HybridBlock):
    def __init__(self, num_ch, k, activation=None, conv_dim=2,
                 dropout=0.0):
        super(HighwayBase, self).__init__()
        with self.name_scope():
            self.dropout = nn.Dropout(dropout)
            if conv_dim == 1:
                self.T_block = nn.Conv1D(num_ch, k, activation='sigmoid', padding=k // 2)
                self.H_block = nn.Conv1D(num_ch, k, activation=activation, padding=k // 2)
            elif conv_dim == 2:
                self.T_block = nn.Conv2D(num_ch, k, activation='sigmoid', padding=k // 2)
                self.H_block = nn.Conv2D(num_ch, k, activation=activation, padding=k // 2)
            else:
                raise Exception("Wrong dimension for highway conv")

    def hybrid_forward(self, F, x):
        T = self.T_block(x)
        # print('T: ', T.shape)
        H = self.H_block(x)
        H = self.dropout(H)
        x = H * T + x * (1.0 - T)
        return x


class Highway(nn.HybridBlock):
    """
    https://arxiv.org/pdf/1612.07771.pdf
    transform gate T
    Carry gate C
    H(x): nonlinear parametric function of the inputs x, (typically an affine projection
    followed by pointwise non-linearity)

    A traditional feed forward network: y(x) = H(x)
    By adding two additional units T(x) and C(x) a Highway layer can be:
    y(x) = H(x) · T(x) + x · C(x)
    Usually this is further simplified by coupling the gates, i.e. setting C(x) = 1 − T(x)

    y(x) = H(x) · T(x) + x · (1 − T(x)).
    """
    def __init__(self, num_layers, num_ch, k, activation=None, conv_dim=2,
                 dropout=0.0):
        super(Highway, self).__init__()
        # self.kernel_size = k
        with self.name_scope():
            self.highway = nn.HybridSequential()
            for i in range(num_layers):
                self.highway.add(HighwayBase(num_ch, k, activation=activation, conv_dim=conv_dim, dropout=dropout))

    def hybrid_forward(self, F, x):
        """

        :param x: H, T, x must have same channel size  shape: NCHW for conv_dim == 2
        :return: highway network output NCHW(conv_dim==2) or NCH(conv_dim==1)
        """
        return self.highway(x)


class DepthwiseSeparableConv(nn.HybridBlock):
    """

    """
    def __init__(self, in_ch, out_ch, k, conv_dim=1, bias=False):
        super(DepthwiseSeparableConv, self).__init__()
        if isinstance(k, int):
            self.padding = k // 2
        elif isinstance(k, tuple):
            self.padding = (k[0] // 2, k[1] // 2)
            if len(k) == 2 and conv_dim == 1:
                k = k[0]
                self.padding = self.padding[0]

        if conv_dim == 1:

            self.depthwise_conv = nn.Conv1D(in_ch, k, groups=in_ch, in_channels=in_ch, padding=self.padding, use_bias=bias)
            self.pointwise_conv = nn.Conv1D(out_ch, 1, use_bias=bias)
        elif conv_dim == 2:
            self.depthwise_conv = nn.Conv2D(in_ch, k, groups=in_ch, in_channels=in_ch, padding=self.padding, use_bias=bias)
            self.pointwise_conv = nn.Conv2D(out_ch, 1, use_bias=bias)
        else:
            raise Exception("Wrong dimension for depthwise separable conv")

    def hybrid_forward(self, F, x):
        return self.pointwise_conv(self.depthwise_conv(x))


class SelfAttention(nn.HybridBlock):
    def __init__(self, units, num_heads, use_bias, out_ch, dropout=0.0):
        """
        Parameters
        ----------
        units : int
            Total number of projected units for query. Must be divided exactly by num_heads.
        num_heads : int
            Number of parallel attention heads
        use_bias : bool, default True
            Whether to use bias when projecting the query/key/values
        out_ch: int
            number filters
        dropout: float
            dropout
        """
        super(SelfAttention, self).__init__()
        with self.name_scope():
            self.base_cell = DotProductAttentionCell()
            self.multi_head_attn = MultiHeadAttentionCell(self.base_cell,
                                                                    units,
                                                                    units,
                                                                    units,
                                                                    num_heads,
                                                                    use_bias)
            self.layer_norm1 = nn.LayerNorm()
            self.layer_norm2 = nn.LayerNorm()
            self.dropout = nn.Dropout(dropout)
            self.conv1 = nn.Conv1D(out_ch, kernel_size=1, use_bias=True, activation='relu')
            self.conv2 = nn.Conv1D(out_ch, kernel_size=1, use_bias=True, activation=None)

    def hybrid_forward(self, F, x, mask=None):
        """

        :param x:
        :param mask:
        :return:
        """
        out = self.layer_norm1(x)
        out = self.dropout(out)
        # print('out ', out.shape)
        out, attn_weights = self.multi_head_attn(out, out, mask=mask)
        out = out.swapaxes(1, 2)
        # print('attn_out: ', out.shape)
        out = self.layer_norm2(out)
        out = self.conv1(out)
        # print(out.shape)
        out = self.conv2(out)
        return out


class ConvBlock(nn.HybridBlock):
    def __init__(self, num_conv_layers, num_ch, k,):
        super(ConvBlock, self).__init__()
        with self.name_scope():
            self.convs = nn.HybridSequential()
            for _ in range(num_conv_layers):
                self.convs.add(DepthwiseSeparableConv(num_ch, num_ch, k))

    def hybrid_forward(self, F, x):
        return self.convs(x)


class EncoderBaseBlock(nn.HybridBlock):
    def __init__(self, num_conv_layers, num_ch, k, query_units, num_heads, use_bias, dropout=0.0):
        super(EncoderBaseBlock, self).__init__()
        with self.name_scope():
            # self.layer_norm = nn.LayerNorm()

            self.conv = ConvBlock(num_conv_layers, num_ch, k)
            self.self_attn = SelfAttention(query_units, num_heads, use_bias, num_ch, dropout)
            # self.dropout = nn.Dropout(dropout)

    def hybrid_forward(self, F, x):
        x = self.conv(x)
        x = F.swapaxes(x, 1, 2)
        # print('x1: ', x.shape)
        x = self.self_attn(x)
        # print('x2: ', x.shape)
        # x = x.swapaxes(1, 2)
        return x


class EncoderBlock(nn.HybridBlock):
    def __init__(self, num_blocks, num_conv_layers, num_ch, k, query_units, num_heads, use_bias, dropout=0.0):
        super(EncoderBlock, self).__init__()
        with self.name_scope():
            # self.layer_norm = nn.LayerNorm()
            self.encoders = nn.HybridSequential()
            for _ in range(num_blocks):
                self.encoders.add(EncoderBaseBlock(num_conv_layers, num_ch, k, query_units,
                                                   num_heads, use_bias, dropout))
            self.dropout = nn.Dropout(dropout)

    def hybrid_forward(self, F, x):
        return self.dropout(self.encoders(x))


class Embedding(nn.HybridBlock):
    def __init__(self, vocab_size=None, ch_size=None, word_emb_size=None, ch_emb_size=None,
                 pretrained_word_emb_file=None, pretrained_ch_emb_file=None):
        super(Embedding, self).__init__()
        self.pretrained_word_emb_file = pretrained_word_emb_file
        self.pretrained_ch_emb_file = pretrained_ch_emb_file
        self.vocab_size = vocab_size
        self.ch_size = ch_size
        self.word_emb_size = word_emb_size
        self.ch_emb_size = ch_emb_size

        if self.pretrained_word_emb_file is not None and self.pretrained_ch_emb_file is not None:
            self.pretrained_ch_emb, self.pretrained_word_emb = self.load_pretrained()
            self.vocab_size, self.word_emb_size = self.pretrained_word_emb.shape
            self.ch_size, self.ch_emb_size = self.pretrained_ch_emb.shape

        with self.name_scope():
            self.emb_word = nn.Embedding(self.vocab_size, self.word_emb_size)
            self.emb_ch = nn.Embedding(self.ch_size, self.ch_emb_size)

    def load_pretrained(self):
        # fixme handle when no pretrained embedding
        return nd.array(json.load(open(self.pretrained_ch_emb_file, 'r'))), \
               nd.array(json.load(open(self.pretrained_word_emb_file, 'r')))

    def hybrid_forward(self, F, chars, words):
        """

        :param chars: Batch x Length x char_num
        :param words: Batch x Length
        :return: emb_ch: Batch x Length x char_num x emb_size
        """
        return self.emb_ch(chars), self.emb_word(words)

    def initialize(self, **kwargs):
        # todo use from mxnet.contrib import text
        self.emb_ch.initialize(**kwargs)
        self.emb_word.initialize(**kwargs)
        # pretrained_ch_emb =
        if self.pretrained_ch_emb_file is not None:
            self.emb_ch.weight.set_data(self.pretrained_ch_emb)
            del self.pretrained_ch_emb
        if self.pretrained_word_emb_file is not None:
            self.emb_word.weight.set_data(self.pretrained_word_emb)
            del self.pretrained_word_emb


class Pointer(nn.HybridBlock):
    def __init__(self):
        super(Pointer, self).__init__()
        with self.name_scope():
            self.linear_start = nn.Dense(1)
            self.linear_end = nn.Dense(1)

    def hybrid_forward(self, F, m0, m1, m2):
        start = F.concat(m0, m1, dim=1)
        end = F.concat(m0, m2, dim=1)
        start = self.linear_start(start)
        end = self.linear_end(end)

        return start, end


class EmbeddingHighway(nn.HybridBlock):
    def __init__(self, ch_emb_size, word_emb_size, out_ch, k, dropout, num_highway):
        super(EmbeddingHighway, self).__init__()
        with self.name_scope():
            self.relu = nn.Activation('relu')
            self.conv_ch = DepthwiseSeparableConv(ch_emb_size, out_ch, k, conv_dim=2)
            self.conv_emb = DepthwiseSeparableConv(word_emb_size + out_ch, out_ch, k, conv_dim=1)
            self.dropout = nn.Dropout(dropout)
            self.highway = Highway(num_highway, out_ch, k, conv_dim=1, dropout=dropout)

    def hybrid_forward(self, F, ch_emb, word_emb):
        """

        :param ch_emb:  Batch x Length x char_num x emb_size
        :param word_emb: Batch x Length x emb_size
        :return: Batch x out_ch x Length
        """

        ch_emb = self.conv_ch(F.transpose(ch_emb, axes=(0, 3, 1, 2)))
        ch_emb = self.relu(ch_emb)
        ch_emb = ch_emb.max(axis=3)
        # print('ch_emb', ch_emb.shape)
        ch_emb = self.dropout(ch_emb)
        word_emb = self.dropout(word_emb)
        word_emb = word_emb.swapaxes(1, 2)  # to dim Batch x Emb_size x Length
        # print('word_emb: ', word_emb.shape)
        emb = F.concat(ch_emb, word_emb, dim=1)
        # print('emb: ', emb.shape)
        emb = self.conv_emb(emb)
        # print('conv_emb', emb.shape)
        emb = self.highway(emb)
        return emb


class QANet(nn.Block):
    def __init__(self, config: Config):
        super(QANet, self).__init__()
        with self.name_scope():
            self.emb = Embedding(pretrained_ch_emb_file=config.char_emb_file,
                                 pretrained_word_emb_file=config.word_emb_file)
            self.emb_highway = EmbeddingHighway(config.char_dim, config.glove_dim, config.hidden,
                                                k=5, dropout=config.dropout, num_highway=2)
            # embedding encoder layer
            self.p_emb_enc = EncoderBlock(
                num_blocks=4,
                num_conv_layers=2,
                num_ch=config.hidden,
                k=7,
                query_units=config.hidden,
                num_heads=config.num_heads,
                dropout=config.dropout,
                use_bias=True
            )
            self.q_emb_enc = EncoderBlock(
                num_blocks=4,
                num_conv_layers=2,
                num_ch=config.hidden,
                k=7,
                query_units=config.hidden,
                num_heads=config.num_heads,
                dropout=config.dropout,
                use_bias=True
            )

            self.base_cell = DotProductAttentionCell()
            self.pq_attn = MultiHeadAttentionCell(
                base_cell=self.base_cell,
                query_units=config.hidden,
                key_units=config.hidden,
                value_units=config.hidden,
                num_heads=config.num_heads
            )

            # model encoder layer
            self.model_enc_blk0 = EncoderBlock(
                num_blocks=2,
                num_conv_layers=2,
                num_ch=config.hidden,
                k=5,
                query_units=config.hidden,
                num_heads=config.num_heads,
                dropout=config.dropout,
                use_bias=True
            )
            self.model_enc_blk1 = EncoderBlock(
                num_blocks=2,
                num_conv_layers=2,
                num_ch=config.hidden,
                k=5,
                query_units=config.hidden,
                num_heads=config.num_heads,
                dropout=config.dropout,
                use_bias=True
            )
            self.model_enc_blk2 = EncoderBlock(
                num_blocks=2,
                num_conv_layers=2,
                num_ch=config.hidden,
                k=5,
                query_units=config.hidden,
                num_heads=config.num_heads,
                dropout=config.dropout,
                use_bias=True
            )
            self.pointer = Pointer()

    def forward(self, p_chars, p_words, q_chars, q_words, p_mask=None, q_mask=None):
        p_emb_ch, p_emb_word = self.emb(p_chars, p_words)
        q_emb_ch, q_emb_word = self.emb(q_chars, q_words)

        p_out = self.emb_highway(p_emb_ch, p_emb_word)
        q_out = self.emb_highway(q_emb_ch, q_emb_word)

        p_out = self.p_emb_enc(p_out)
        q_out = self.q_emb_enc(q_out)

        # reshape to Batch x Length x Dim
        p_out = p_out.swapaxes(1, 2)
        q_out = q_out.swapaxes(1, 2)
        out, attn_weight = self.pq_attn(p_out, q_out)

        out = out.swapaxes(1, 2)

        M0 = self.model_enc_blk0(out)
        M1 = self.model_enc_blk1(M0)
        M2 = self.model_enc_blk2(M1)

        start_probs = []
        end_probs = []

        trans = (2, 0, 1)
        # print(shape_0)
        M0, M1, M2 = nd.transpose(M0, axes=trans), nd.transpose(M1, axes=trans), nd.transpose(M2, axes=trans)
        for i in range(M0.shape[0]):
            m0, m1, m2 = M0[i], M1[i], M2[i]
            # print('m0', m0.shape)
            p_start, p_end = self.pointer(m0, m1, m2)
            # print(p_start, p_end)
            # print('p_start', p_start.shape)
            start_probs.append(p_start)
            end_probs.append(p_end)

        # print(len(start_probs))
        start_probs = nd.concat(*start_probs, dim=1)
        end_probs = nd.concat(*end_probs, dim=1)
        # print(start_probs)
        # print(start_probs.shape)
        return start_probs, end_probs

