import json
import mxnet as mx
import mxnet.gluon.nn as nn
import mxnet.ndarray as nd
import math
from config import Config
# from models.attention_cell import DotProductAttentionCell, MultiHeadAttentionCell


def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4, ctx=mx.cpu()):
    """Gets a bunch of sinusoids of different frequencies.
    Each channel of the input Tensor is incremented by a sinusoid of a different
    frequency and phase.
    This allows attention to learn to use absolute and relative positions.
    Timing signals should be added to some precursors of both the query and the
    memory inputs to attention.
    The use of relative position is possible because sin(x+y) and cos(x+y) can be
    experessed in terms of y, sin(x) and cos(x).
    In particular, we use a geometric sequence of timescales starting with
    min_timescale and ending with max_timescale.  The number of different
    timescales is equal to channels / 2. For each timescale, we
    generate the two sinusoidal signals sin(timestep/timescale) and
    cos(timestep/timescale).  All of these sinusoids are concatenated in
    the channels dimension.
    Args:
    length: scalar, length of timing signal sequence.
    channels: scalar, size of timing embeddings to create. The number of
        different timescales is equal to channels / 2.
    min_timescale: a float
    max_timescale: a float
    Returns:
    a Tensor of timing signals [1, length, channels]
    """
    position = nd.arange(length, ctx=ctx)
    num_timescales = channels // 2
    log_timescale_increment = (
        math.log(float(max_timescale) / float(min_timescale)) /
        (num_timescales - 1))
    inv_timescales = min_timescale * nd.exp(
        nd.arange(num_timescales, ctx=ctx) * - log_timescale_increment)
    scaled_time = nd.expand_dims(position, 1) * nd.expand_dims(inv_timescales, 0)
    signal = nd.concat(*[nd.sin(scaled_time), nd.cos(scaled_time)], dim=1)
    # print(signal)
    # signal = nd.pad(signal, mode='constant', constant_value=0, pad_width=[0, 0, 0, channels % 2])
    signal = nd.reshape(signal, [1, length, channels])
    return signal


def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
    """Adds a bunch of sinusoids of different frequencies to a Tensor.
    Each channel of the input Tensor is incremented by a sinusoid of a different
    frequency and phase.
    This allows attention to learn to use absolute and relative positions.
    Timing signals should be added to some precursors of both the query and the
    memory inputs to attention.
    The use of relative position is possible because sin(x+y) and cos(x+y) can be
    expressed in terms of y, sin(x) and cos(x).
    In particular, we use a geometric sequence of timescales starting with
    min_timescale and ending with max_timescale.  The number of different
    timescales is equal to channels / 2. For each timescale, we
    generate the two sinusoidal signals sin(timestep/timescale) and
    cos(timestep/timescale).  All of these sinusoids are concatenated in
    the channels dimension.
    Args:
    x: a Tensor with shape [batch, length, channels]
    min_timescale: a float
    max_timescale: a float
    Returns:
    a Tensor the same shape as x.
    """

    length = x.shape[1]
    channels = x.shape[2]
    ctx = x.context
    signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale, ctx=ctx)
    # print('signal:', signal.sum())
    return x + signal


def mask_logits(inputs, mask, mask_value=-1e30):
    return inputs + mask_value * (1 - mask)


class DotProductAttention(nn.Block):
    def __init__(self, num_heads):
        super(DotProductAttention, self).__init__()
        self.num_heads = num_heads
        # fixme add bias ?

    def forward(self, q, k, v, seq_len=None, mask=None):
        """

        :param q: [batch, heads, length_q, depth_k]
        :param k: [batch, heads, length_kv, depth_k]
        :param v: [batch, heads, length_kv, depth_v]
        :param seq_len:
        :param mask: [batch, 1, 1, length_kv]

        :return [batch, heads, length_q, depth_v]
        """
        q = q.reshape((-1, 1, 0, 0)).squeeze()
        k = k.reshape((-1, 1, 0, 0)).squeeze()
        v = v.reshape((-1, 1, 0, 0)).squeeze()

        # [batch x heads, length_q, length_kv]
        logits = nd.batch_dot(q, k, transpose_b=True)

        if mask is not None:
            logits = mask_logits(logits, mask)

        weights = nd.softmax(logits, axis=-1)
        out = nd.batch_dot(weights, v)

        out = out.transpose(axes=(1, 2, 0))\
            .reshape((0, 0, -1, self.num_heads))\
            .transpose(axes=(2, 3, 0, 1))
        return out, weights


class MultiHeadAttention(nn.Block):
    def __init__(self, units, num_heads, use_bias=True, dropout=0.0):
        super(MultiHeadAttention, self).__init__()
        self.num_heads = num_heads
        self.units = units
        self.base_attn_cell = DotProductAttention(num_heads)
        with self.name_scope():
            self.mem_conv = nn.Conv1D(self.units * 2, 1, use_bias=use_bias)
            self.query_conv = nn.Conv1D(self.units, 1, use_bias=use_bias)

    def forward(self, query, memory=None, seq_len=None, mask=None):
        """

        :param query: [batch, q_length, depth_k]
        :param memory: [batch, length_kv, depth_k]
        :param seq_len:
        :param mask:

        :return [batch, length_q, heads*depth_v]
        """

        # self attention
        if memory is None:
            memory = query

        # memory projection
        memory = self.mem_conv(memory.swapaxes(1, 2)).swapaxes(1, 2)
        # query  projection
        query = self.query_conv(query.swapaxes(1, 2)).swapaxes(1, 2)

        # Q, K, V: [batch, heads, length, depth]
        Q = query.reshape(0, 0, self.num_heads, -1).swapaxes(1, 2)
        K = memory[:, :, 0: self.units].reshape(0, 0, self.num_heads, -1).swapaxes(1, 2)
        V = memory[:, :, self.units:].reshape(0, 0, self.num_heads, -1).swapaxes(1, 2)

        key_depth_per_head = self.units // self.num_heads

        Q = Q * key_depth_per_head ** -0.5

        # [batch, heads, length_q, depth_v]
        x, attn_weights = self.base_attn_cell(Q, K, V,)
        # print('x: ', x.shape)

        return x.swapaxes(1, 2).reshape((0, 0, -1)), attn_weights


class HighwayBase(nn.Block):
    def __init__(self, num_ch, k, activation=None, conv_dim=2,
                 dropout=0.0):
        super(HighwayBase, self).__init__()
        with self.name_scope():
            self.dropout = nn.Dropout(dropout)
            if conv_dim == 1:
                self.T_block = nn.Conv1D(num_ch, k, activation='sigmoid', padding=k // 2)
                self.H_block = nn.Conv1D(num_ch, k, activation=activation, padding=k // 2)
            elif conv_dim == 2:
                self.T_block = nn.Conv2D(num_ch, k, activation='sigmoid', padding=k // 2)
                self.H_block = nn.Conv2D(num_ch, k, activation=activation, padding=k // 2)
            else:
                raise Exception("Wrong dimension for highway conv")

    def forward(self, x):
        T = self.T_block(x)
        # print('T: ', T.shape)
        H = self.H_block(x)
        H = self.dropout(H)
        x = H * T + x * (1.0 - T)
        return x


class Highway(nn.Block):
    """
    https://arxiv.org/pdf/1612.07771.pdf
    transform gate T
    Carry gate C
    H(x): nonlinear parametric function of the inputs x, (typically an affine projection
    followed by pointwise non-linearity)

    A traditional feed forward network: y(x) = H(x)
    By adding two additional units T(x) and C(x) a Highway layer can be:
    y(x) = H(x) · T(x) + x · C(x)
    Usually this is further simplified by coupling the gates, i.e. setting C(x) = 1 − T(x)

    y(x) = H(x) · T(x) + x · (1 − T(x)).
    """

    def __init__(self, num_layers, num_ch, k, activation='relu', conv_dim=2,
                 dropout=0.1):
        super(Highway, self).__init__()
        # self.kernel_size = k
        with self.name_scope():
            self.highway = nn.Sequential()
            for i in range(num_layers):
                self.highway.add(HighwayBase(num_ch, k, activation=activation, conv_dim=conv_dim, dropout=dropout))

    def forward(self, x):
        """

        :param x: H, T, x must have same channel size  shape: NCHW for conv_dim == 2
        :return: highway network output NCHW(conv_dim==2) or NCH(conv_dim==1)
        """
        x = self.highway(x)
        return x


class DepthwiseSeparableConv(nn.Block):
    """

    """

    def __init__(self, in_ch, out_ch, k, conv_dim=1, bias=False, activation='relu'):
        super(DepthwiseSeparableConv, self).__init__()
        if isinstance(k, int):
            self.padding = k // 2
        elif isinstance(k, tuple):
            self.padding = (k[0] // 2, k[1] // 2)
            if len(k) == 2 and conv_dim == 1:
                k = k[0]
                self.padding = self.padding[0]

        if conv_dim == 1:

            self.depthwise_conv = nn.Conv1D(in_ch, k, groups=in_ch, in_channels=in_ch, padding=self.padding,
                                            use_bias=bias)
            self.pointwise_conv = nn.Conv1D(out_ch, 1, use_bias=bias, activation=activation)
        elif conv_dim == 2:
            self.depthwise_conv = nn.Conv2D(in_ch, k, groups=in_ch, in_channels=in_ch, padding=self.padding,
                                            use_bias=bias)
            self.pointwise_conv = nn.Conv2D(out_ch, 1, use_bias=bias, activation=activation)
        else:
            raise Exception("Wrong dimension for depthwise separable conv")

    def forward(self, x):
        # print('depth ', x.sum())
        x = self.depthwise_conv(x)
        # print('depth middle', x.sum())
        x = self.pointwise_conv(x)
        # print('depth end ', x.sum())
        return x


class SelfAttention(nn.Block):
    def __init__(self, units, num_heads, use_bias, out_ch, dropout=0.0):
        """
        Parameters
        ----------
        units : int
            Total number of projected units for query. Must be divided exactly by num_heads.
        num_heads : int
            Number of parallel attention heads
        use_bias : bool, default True
            Whether to use bias when projecting the query/key/values
        out_ch: int
            number filters
        dropout: float
            dropout
        """
        super(SelfAttention, self).__init__()
        with self.name_scope():
            self.multi_head_attn = MultiHeadAttention(units,
                                                      num_heads,
                                                      use_bias,
                                                      dropout)
            self.layer_norm1 = nn.LayerNorm()
            self.layer_norm2 = nn.LayerNorm()
            self.dropout = nn.Dropout(dropout)
            self.conv1 = nn.Conv1D(out_ch, kernel_size=1, use_bias=True, activation='relu')
            # self.conv2 = nn.Conv1D(out_ch, kernel_size=1, use_bias=True, activation=None)

    def forward(self, x, mask=None):
        """

        :param x:
        :param mask:
        :return:
        """
        out = self.layer_norm1(x)
        out = self.dropout(out)
        # print('out ', out.shape)
        out, attn_weights = self.multi_head_attn(out)
        out = out.swapaxes(1, 2)
        # print('attn_out: ', out.shape)
        out = self.layer_norm2(out)
        out = self.conv1(out)
        # print(out.shape)
        # out = self.conv2(out)
        return out


class PQAttention(nn.Block):
    def __init__(self, para_limit, ques_limit, num_ch, dropout=0.1):
        super(PQAttention, self).__init__()
        self.para_limit = para_limit
        self.ques_limit = ques_limit
        with self.name_scope():
            # self.linear = nn.Dense(1, use_bias=False)
            self.conv_linear = nn.Conv1D(1, 1)
            self.dropout = nn.Dropout(dropout)
            self.conv_out = nn.Conv1D(num_ch, 1)
            # self.S = mx.gluon.Parameter('S', grad_req='null', shape=(batch_size, para_limit, ques_limit))
            # self.S = nd.zeros(shape=(batch_size, para_limit, ques_limit), ctx=mx.gpu())

    def forward(self, P, Q, p_lens=None, q_lens=None):

        """

        :param P: [batch, dim, len_p]
        :param Q: [batch, dim, len_q]
        """
        S = []
        max_p_len = max(p_lens) if p_lens is not None else self.para_limit
        max_q_len = max(q_lens) if q_lens is not None else self.ques_limit

        pad = None
        v = None
        for j in range(self.ques_limit):
            V = []
            for i in range(self.para_limit):
                p = P[:, :, i]
                q = Q[:, :, j]
                # batch x dim x 1
                if i < max_p_len and j < max_q_len:
                    v = nd.concat(*[p, q, p * q], dim=1).expand_dims(2)
                    # v = self.linear(v)
                    V.append(v)
                else:
                    V.append(pad)
                if pad is None:
                    pad = nd.zeros_like(v)
            V = nd.concat(*V, dim=2)
            # print('V: ', V)
            # print('V sum: ', V.sum())
            V = self.conv_linear(V)
            S.append(V)

        S = nd.concat(*S, dim=1).reshape((-1, self.para_limit, self.ques_limit))
        # [batch, len_p, len_q]
        S1 = nd.softmax(S, axis=2)  # question
        S2 = nd.softmax(S, axis=1)  # passage

        P2Q = nd.batch_dot(Q, S1.swapaxes(1, 2))  # [batch, dim, len_p]
        Q2P = nd.batch_dot(P, nd.batch_dot(S1, S2.swapaxes(1, 2)))  # [batch, dim, len_p]
        out = nd.concat(*[P, P2Q, P * P2Q, P * Q2P], dim=1)
        out = self.dropout(out)
        return self.conv_out(out)


class ConvBlock(nn.Block):
    def __init__(self, num_conv_layers, num_ch, k, dropout=0.1):
        super(ConvBlock, self).__init__()
        with self.name_scope():
            self.convs = nn.Sequential()
            for i in range(num_conv_layers):
                self.convs.add(nn.LayerNorm())
                self.convs.add(DepthwiseSeparableConv(num_ch, num_ch, k))
                if (i+1) % 2 == 0:
                    self.convs.add(nn.Dropout(1-dropout))

    def forward(self, x):
        return self.convs(x)


class EncoderBaseBlock(nn.Block):
    def __init__(self, num_conv_layers, num_ch, k, query_units, num_heads, use_bias, nth, dropout=0.0):
        super(EncoderBaseBlock, self).__init__()
        with self.name_scope():
            # self.layer_norm = nn.LayerNorm()

            self.conv_block = ConvBlock(num_conv_layers, num_ch, k, dropout=dropout)
            self.self_attn = SelfAttention(query_units, num_heads, use_bias, num_ch, dropout)
            self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        x = add_timing_signal_1d(x)
        # print('pos sum', x.sum())
        x = self.conv_block(x)
        # print('middle_sum: ', x.sum())
        x = x.swapaxes(1, 2)
        # print('x1: ', x.shape)
        x = self.self_attn(x)
        # print('pos_sum end: ', x.sum())
        # print('x2: ', x.shape)
        # x = x.swapaxes(1, 2)
        return x


class EncoderBlock(nn.Block):
    def __init__(self, num_blocks, num_conv_layers, num_ch, k, query_units, num_heads, use_bias, dropout=0.0):
        super(EncoderBlock, self).__init__()
        with self.name_scope():
            # self.layer_norm = nn.LayerNorm()
            self.encoders = nn.Sequential()
            for i in range(num_blocks):
                self.encoders.add(EncoderBaseBlock(num_conv_layers, num_ch, k, query_units,
                                                   num_heads, use_bias, i, dropout))
            self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        return self.dropout(self.encoders(x))


class Embedding(nn.Block):
    def __init__(self, vocab_size=None, ch_size=None, word_emb_size=None, ch_emb_size=None,
                 pretrained_word_emb_file=None, pretrained_ch_emb_file=None):
        super(Embedding, self).__init__()
        self.pretrained_word_emb_file = pretrained_word_emb_file
        self.pretrained_ch_emb_file = pretrained_ch_emb_file
        self.vocab_size = vocab_size
        self.ch_size = ch_size
        self.word_emb_size = word_emb_size
        self.ch_emb_size = ch_emb_size

        if self.pretrained_word_emb_file is not None and self.pretrained_ch_emb_file is not None:
            self.pretrained_ch_emb, self.pretrained_word_emb = self.load_pretrained()
            self.vocab_size, self.word_emb_size = self.pretrained_word_emb.shape
            self.ch_size, self.ch_emb_size = self.pretrained_ch_emb.shape

        with self.name_scope():
            self.emb_word = nn.Embedding(self.vocab_size, self.word_emb_size)
            self.emb_ch = nn.Embedding(self.ch_size, self.ch_emb_size)

    def load_pretrained(self):
        # fixme handle when no pretrained embedding
        return nd.array(json.load(open(self.pretrained_ch_emb_file, 'r'))), \
               nd.array(json.load(open(self.pretrained_word_emb_file, 'r')))

    def forward(self, chars, words):
        """

        :param chars: Batch x Length x char_num
        :param words: Batch x Length
        :return: emb_ch: Batch x Length x char_num x emb_size
        """
        return self.emb_ch(chars), self.emb_word(words)

    def initialize(self, **kwargs):
        # todo use from mxnet.contrib import text
        self.emb_ch.initialize(**kwargs)
        self.emb_word.initialize(**kwargs)
        # pretrained_ch_emb =
        if self.pretrained_ch_emb_file is not None:
            self.emb_ch.weight.set_data(self.pretrained_ch_emb)
            del self.pretrained_ch_emb
        if self.pretrained_word_emb_file is not None:
            self.emb_word.weight.set_data(self.pretrained_word_emb)
            del self.pretrained_word_emb


class Pointer(nn.Block):
    def __init__(self):
        super(Pointer, self).__init__()
        with self.name_scope():
            self.linear_start = nn.Conv1D(1, 3, padding=3//2)
            self.linear_end = nn.Conv1D(1, 3, padding=3//2)

    def forward(self, m0, m1, m2):
        start = nd.concat(m0, m1, dim=1)
        end = nd.concat(m0, m2, dim=1)
        start = self.linear_start(start)
        end = self.linear_end(end)

        return start, end


class EmbeddingHighway(nn.Block):
    def __init__(self, ch_emb_size, word_emb_size, out_ch, k, dropout, num_highway):
        super(EmbeddingHighway, self).__init__()
        with self.name_scope():
            self.relu = nn.Activation('relu')
            self.conv_ch = DepthwiseSeparableConv(ch_emb_size, out_ch, k, conv_dim=2)
            self.conv_emb = DepthwiseSeparableConv(word_emb_size + out_ch, out_ch, k, conv_dim=1)
            self.dropout_ch = nn.Dropout(dropout)
            self.dropout_word = nn.Dropout(dropout)
            self.highway = Highway(num_highway, out_ch, k, conv_dim=1, dropout=dropout)

    def forward(self, ch_emb: nd.NDArray, word_emb: nd.NDArray):
        """

        :param ch_emb:  Batch x Length x char_num x emb_size
        :param word_emb: Batch x Length x emb_size
        :return: Batch x out_ch x Length
        """
        shape = ch_emb.shape
        ch_emb = ch_emb.transpose(axes=(0, 3, 1, 2))
        ch_emb = self.dropout_ch(ch_emb)
        ch_emb = self.conv_ch(ch_emb)
        ch_emb = self.relu(ch_emb)
        ch_emb = ch_emb.max(axis=3)
        # print('ch_emb', ch_emb.shape)

        word_emb = self.dropout_word(word_emb)
        word_emb = word_emb.swapaxes(1, 2)  # to dim Batch x Emb_size x Length
        # print('word_emb: ', word_emb.shape)
        emb = nd.concat(ch_emb, word_emb, dim=1)
        # print('emb: ', emb.shape)
        emb = self.conv_emb(emb)
        # print('conv_emb', emb.shape)
        emb = self.highway(emb)
        return emb


class QANet(nn.Block):
    def __init__(self, config: Config):
        super(QANet, self).__init__()
        with self.name_scope():
            self.emb = Embedding(pretrained_ch_emb_file=config.char_emb_file,
                                 pretrained_word_emb_file=config.word_emb_file)

            self.dropout_w = nn.Dropout(config.dropout)
            self.emb_highway = EmbeddingHighway(config.char_dim, config.glove_dim, config.hidden,
                                                k=5, dropout=config.dropout, num_highway=2)

            self.dropout_encoder = nn.Dropout(config.dropout)
            # embedding encoder layer
            self.p_emb_enc = EncoderBlock(
                num_blocks=1,
                num_conv_layers=4,
                num_ch=config.hidden,
                k=7,
                query_units=config.hidden,
                num_heads=config.num_heads,
                dropout=config.dropout,
                use_bias=False
            )
            self.q_emb_enc = EncoderBlock(
                num_blocks=1,
                num_conv_layers=4,
                num_ch=config.hidden,
                k=7,
                query_units=config.hidden,
                num_heads=config.num_heads,
                dropout=config.dropout,
                use_bias=False
            )

            self.pq_attn = PQAttention(
                config.para_limit,
                config.ques_limit,
                num_ch=config.hidden,
                dropout=config.dropout
            )

            # model encoder layer
            model_enc_blk0 = EncoderBlock(
                num_blocks=1,
                num_conv_layers=2,
                num_ch=config.hidden,
                k=5,
                query_units=config.hidden,
                num_heads=config.num_heads,
                dropout=config.dropout,
                use_bias=False
            )
            model_enc_blk1 = EncoderBlock(
                num_blocks=1,
                num_conv_layers=2,
                num_ch=config.hidden,
                k=5,
                query_units=config.hidden,
                num_heads=config.num_heads,
                dropout=config.dropout,
                use_bias=False
            )
            model_enc_blk2 = EncoderBlock(
                num_blocks=1,
                num_conv_layers=2,
                num_ch=config.hidden,
                k=5,
                query_units=config.hidden,
                num_heads=config.num_heads,
                dropout=config.dropout,
                use_bias=False
            )

            self.model_enc_blk0 = nn.Sequential()

            self.model_enc_blk1 = nn.Sequential()

            self.model_enc_blk2 = nn.Sequential()

            for _ in range(7):
                self.model_enc_blk0.add(model_enc_blk0)
                self.model_enc_blk1.add(model_enc_blk1)
                self.model_enc_blk2.add(model_enc_blk2)

            self.pointer = Pointer()

    def forward(self, p_chars, p_words, q_chars, q_words, p_lens=None, q_lens=None, p_mask=None, q_mask=None):
        p_emb_ch, p_emb_word = self.emb(p_chars, p_words)
        q_emb_ch, q_emb_word = self.emb(q_chars, q_words)

        p_out = self.emb_highway(p_emb_ch, p_emb_word)
        q_out = self.emb_highway(q_emb_ch, q_emb_word)
        # print('p_out1: ', p_out.sum(), p_out.sum(), q_out.sum())
        p_out = self.p_emb_enc(p_out)
        q_out = self.q_emb_enc(q_out)
        # print('p_out2: ', p_out.sum(), q_out.sum())

        out = self.pq_attn(p_out, q_out, p_lens, q_lens)
        # print('out: ', out.sum())
        # print('out: ', out)
        # print(out.shape)

        out = self.dropout_encoder(out)
        M0 = self.model_enc_blk0(out)
        M1 = self.model_enc_blk1(M0)
        out = self.dropout_encoder(M1)
        M2 = self.model_enc_blk2(out)

        start_probs, end_probs = self.pointer(M0, M1, M2)

        return start_probs.squeeze(), end_probs.squeeze()
