import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import (
    LayerNormalization,
    Embedding,
    Dropout,
)


def shape_list(x):
    """Deal with dynamic shape in tensorflow cleanly."""
    static = x.shape.as_list()
    dynamic = tf.shape(x)
    return [dynamic[i] if s is None else s for i, s in enumerate(static)]


def gelu(x):
    return 0.5 * x * (
            1 + tf.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))


def split_states(x, n):
    """Reshape the last dimension of x into [n, x.shape[-1]/n]."""
    *start, m = shape_list(x)
    return tf.reshape(x, start + [n, m // n])


def merge_states(x):
    """Smash the last two dimensions of x into a single dimension."""
    *start, a, b = shape_list(x)
    return tf.reshape(x, start + [a * b])


def attention_mask(nd, ns, *, dtype):
    """1's in the lower triangle, counting from the lower right corner.

    Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.
    """
    i = tf.range(nd)[:, None]
    j = tf.range(ns)
    m = i >= j - ns + nd
    return tf.cast(m, dtype)


@tf.autograph.experimental.do_not_convert
def sparse_attention_mask(nd, ns, stride, c, *, dtype):
    layout = np.zeros([ns, ns], dtype=np.bool)
    for idx in range(c):
        layout[:, (stride - 1 - idx):: stride] = 1
    for q_idx in range(ns):
        row = q_idx // stride
        layout[q_idx, row * stride: (row + 1) * stride] = 1
        # Any query cannot attend to keys above it
        layout[q_idx, q_idx + 1:] = 0
    return tf.cast(
        layout[
        (ns - nd):,
        ],
        dtype,
    )


class MLP(tf.keras.layers.Layer):
    def __init__(self, res_dropout, n_layer):
        super(MLP, self).__init__()
        self.dropout = Dropout(res_dropout)
        self.n_layer = n_layer

    def build(self, input_shape):
        self.nx = input_shape[-1]
        self.n_state = input_shape[-1] * 4

        self.conv1d_4 = Conv1d(self.n_state, self.n_layer, name="conv1d_mlp_4")
        self.conv1d_1 = Conv1d(self.nx, self.n_layer, name="conv1d_mlp_1")
        super(MLP, self).build(input_shape)

    def call(self, inputs, **kwargs):
        h = self.conv1d_4(inputs)
        h = gelu(h)
        output = self.conv1d_1(h)
        output = self.dropout(output)
        return output


class Conv1d(tf.keras.layers.Layer):
    def __init__(self, units, n_layer=1, **kwargs):
        super(Conv1d, self).__init__()
        self.units = units
        self.n_layer = n_layer

    def build(self, input_shape):
        self.nx = input_shape[-1]
        w_init_stdev = 0.02 * (
                1.0 / tf.sqrt(tf.cast(self.n_layer, dtype="float32")))

        w_init = tf.random_normal_initializer(stddev=w_init_stdev)
        self.w = tf.Variable(
            initial_value=w_init(shape=(1, self.nx, self.units),
                                 dtype="float32"),
            trainable=True,
            name="conv1d_w",
        )
        b_init = tf.zeros_initializer()
        self.b = tf.Variable(
            initial_value=b_init(shape=(self.units), dtype="float32"),
            trainable=True,
            name="conv1d_b",
        )
        super(Conv1d, self).build(input_shape)

    def call(self, inputs, **kwargs):
        start = shape_list(inputs)[:-1]
        inputs_flat = tf.reshape(inputs, (-1, self.nx))
        w_flat = tf.reshape(self.w, [-1, self.units])
        output = tf.reshape(
            tf.matmul(inputs_flat, w_flat) + self.b, start + [self.units]
        )
        return output

    def get_config(self):
        config = {"units": self.units, "n_layer": self.n_layer}
        base_config = super(Conv1d, self).get_config()
        return {**base_config, **config}


class Attention(tf.keras.layers.Layer):
    def __init__(self, n_head, attn_dropout, n_layer, mode, stride, c,
                 nth_layer):
        super(Attention, self).__init__()
        self.n_head = n_head
        self.dropout = Dropout(attn_dropout)
        self.n_layer = n_layer
        self.mode = mode
        self.stride = stride
        self.c = c
        self.nth_layer = nth_layer

    def build(self, input_shape):
        self.nx = input_shape[-1]
        self.con1d_3 = Conv1d(self.nx * 3, self.n_layer, name="conv1d_att_3")
        self.con1d_1 = Conv1d(self.nx, self.n_layer, name="conv1d_att_1")
        super(Attention, self).build(input_shape)

    def call(self, inputs, past=None):
        # print(f'attention inputs: {inputs.shape}, {None if past is None else past.shape}')
        c = self.con1d_3(inputs)
        # print(f'c: {c.shape}')
        q, k, v = map(self.split_heads, tf.split(c, 3, axis=2))
        # print(f'q: {q.shape}, k: {k.shape}, v: {v.shape}')
        present = tf.stack([k, v], axis=1)
        # print(f'present: {present.shape}')
        if past is not None:
            # print('present.shape',shape_list(present))
            pk, pv = tf.unstack(past, axis=1)
            # print(f'pk: {pk.shape}, pv: {pv.shape}')
            k = tf.concat([pk, k], axis=-2)
            # print(f'k: {k.shape}')
            v = tf.concat([pv, v], axis=-2)
            # print(f'v {v.shape}')
        a = Attention._multihead_attn(q, k, v, self.mode, self.stride, self.c,
                                      self.nth_layer)
        # print(f'a0 {a.shape}')
        a = Attention._merge_heads(a)
        # print(f'a1 {a.shape}')
        a = self.con1d_1(a)
        # print(f'a2 {a.shape}')
        a = self.dropout(a)
        # print(f'a3 {a.shape}')
        return a, present

    def split_heads(self, x):
        return tf.transpose(split_states(x, self.n_head), [0, 2, 1, 3])

    @staticmethod
    def _merge_heads(x):
        return merge_states(tf.transpose(x, [0, 2, 1, 3]))

    @staticmethod
    def _mask_attn_weights(w):
        # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
        _, _, nd, ns = shape_list(w)
        b = attention_mask(nd, ns, dtype=w.dtype)
        b = tf.reshape(b, [1, 1, nd, ns])
        w = w * b - tf.cast(1e10, w.dtype) * (1 - b)
        return w

    @staticmethod
    def _sparse_mask_attn_weights(w, stride, c):
        _, _, nd, ns = shape_list(w)
        b = sparse_attention_mask(nd, ns, stride, c, dtype=w.dtype)
        b = tf.reshape(b, [1, 1, nd, ns])
        w = w * b - tf.cast(1e10, w.dtype) * (1 - b)
        return w

    @staticmethod
    def _multihead_attn(q, k, v, mode="all", stride=None, c=None, nth_layer=1):
        # q, k, v have shape [batch, heads, sequence, features]
        w = tf.matmul(q, k, transpose_b=True)
        w = w * (1 / tf.sqrt(tf.cast(v.shape[-1], w.dtype)))
        if mode == "sparse":
            if nth_layer % 2 == 0:
                w = Attention._mask_attn_weights(w)
            else:
                w = Attention._sparse_mask_attn_weights(w, stride, c)
        else:
            w = Attention._mask_attn_weights(w)
        w = tf.nn.softmax(w, axis=-1)
        a = tf.matmul(w, v)
        return a

    def get_config(self):
        config = {
            "n_head": self.n_head,
            "n_layer": self.n_layer,
            "mode": self.mode,
            "stride": self.stride,
            "c": self.c,
        }
        base_config = super(Attention, self).get_config()
        return {**base_config, **config}


class Block(tf.keras.layers.Layer):
    def __init__(
            self, n_head, epsilon, attn_dropout, res_dropout, n_layer, mode,
            stride, c, nth_layer
    ):
        super(Block, self).__init__()
        self.n_head = n_head
        self.epsilon = epsilon
        self.attn_dropout = attn_dropout
        self.res_dropout = res_dropout
        self.layer_norm_1 = LayerNormalization(epsilon=epsilon)
        self.attention = Attention(n_head, attn_dropout, n_layer, mode, stride,
                                   c, nth_layer)
        self.layer_norm_2 = LayerNormalization(epsilon=epsilon)
        self.mlp = MLP(res_dropout, n_layer)
        self.n_layer = n_layer
        self.nth_layer = nth_layer

    def call(self, inputs, past=None):
        norm_1 = self.layer_norm_1(inputs)
        att, present = self.attention(norm_1, past)

        x = tf.math.add(inputs, att)
        mlp_in = self.layer_norm_2(x)
        mlp_out = self.mlp(mlp_in)
        x = tf.math.add(x, mlp_out)

        return x, present

    def get_config(self):
        config = {
            "n_head": self.n_head,
            "epsilon": self.epsilon,
            "attn_dropout": self.attn_dropout,
            "res_dropout": self.res_dropout,
            "n_layer": self.n_layer,
            "mode": self.mode,
            "stride": self.stride,
            "c": self.c,
            "nth_layer": self.nth_layer,
        }
        base_config = super(Block, self).get_config()
        return {**base_config, **config}


def expand_tile(value, size):
    """Add a new axis of given size."""
    value = tf.convert_to_tensor(value, name="value")
    ndims = value.shape.ndims
    return tf.tile(tf.expand_dims(value, axis=0), [size] + [1] * ndims)


def positions_for(tokens, past_length):
    #     print('print(shape_list(tokens))',shape_list(tokens))
    batch_size = shape_list(tokens)[0]
    n_steps = shape_list(tokens)[1]

    return expand_tile(past_length + tf.range(n_steps), batch_size)


class EmbeddingLayer(tf.keras.layers.Layer):
    def __init__(self, vocab_size, emb_size, n_ctx):
        super(EmbeddingLayer, self).__init__()
        self.vocab_size = vocab_size
        self.emb_size = emb_size
        self.n_ctx = n_ctx

        self.wte_embedding = Embedding(
            self.vocab_size,
            self.emb_size,
            embeddings_initializer=tf.random_normal_initializer(stddev=0.02),
        )
        wpe_init = tf.random_normal_initializer(stddev=0.01)
        self.wpe = tf.Variable(
            initial_value=wpe_init(
                shape=(
                    self.n_ctx,
                    self.emb_size,
                ),
                dtype="float32",
            ),
            trainable=True,
        )

    def call(self, inputs, past=None):
        wte_emb = self.wte_embedding(inputs)
        wte = self.wte_embedding.embeddings
        past_length = 0 if past is None else shape_list(past)[-2]
        wpe_emb = tf.gather(self.wpe, positions_for(inputs, past_length))
        emb = wte_emb + wpe_emb
        return emb, wte

    def get_config(self):
        config = {
            "vocab_size": self.vocab_size,
            "emb_size": self.emb_size,
            "n_ctx": self.n_ctx,
            "wpe_init": tf.random_normal_initializer(stddev=0.01),
        }
        base_config = super(EmbeddingLayer, self).get_config()
        return {**base_config, **config}


class Mat(tf.keras.layers.Layer):
    def __init__(self):
        super(Mat, self).__init__()

    def call(self, h, wte):
        output = tf.matmul(h, wte, transpose_b=True)
        return output


class GPT2(tf.keras.layers.Layer):
    def __init__(self, params=None, **kwargs):
        super(GPT2, self).__init__()
        if params is None:
            self.n_ctx = kwargs["n_ctx"]
            self.n_layer = kwargs["n_layer"]
            self.n_vocab = kwargs["n_vocab"]
            self.n_emb = kwargs["n_emb"]
            self.n_head = kwargs["n_head"]
            self.epsilon = kwargs["epsilon"]
            self.embed_dropout = kwargs["embed_dropout"]
            self.attn_dropout = kwargs["attn_dropout"]
            self.res_dropout = kwargs["res_dropout"]
            self.mode = kwargs["mode"]
            self.stride = kwargs["stride"]
            self.c = kwargs["c"]
        else:
            self.n_ctx = params["n_ctx"]
            self.n_layer = params["n_layer"]
            self.n_vocab = params["n_vocab"]
            self.n_emb = params["n_emb"]
            self.n_head = params["n_head"]
            self.epsilon = params["epsilon"]
            self.embed_dropout = params["embed_dropout"]
            self.attn_dropout = params["attn_dropout"]
            self.res_dropout = params["res_dropout"]
            self.mode = params["mode"]
            self.stride = params["stride"]
            self.c = params["c"]
        self.embedding = EmbeddingLayer(self.n_vocab, self.n_emb, self.n_ctx)
        self.embedding_dropout = tf.keras.layers.Dropout(self.embed_dropout)
        self.block = [
            Block(
                self.n_head,
                self.epsilon,
                self.attn_dropout,
                self.res_dropout,
                self.n_layer,
                self.mode,
                self.stride,
                self.c,
                i
            )
            for i in range(self.n_layer)
        ]
        self.norm = LayerNormalization(epsilon=self.epsilon)
        self.mat = Mat()

    def call(self, inputs, past=None):
        results = {}
        emb, wte = self.embedding(inputs, past)
        h = self.embedding_dropout(emb)
        presents = []
        # if past is not None:
        #     print(f'past.shape: {past.shape}')
        # else:
        #     print('past: None')
        pasts = tf.unstack(past, axis=1) if past is not None else \
            [None] * self.n_layer
        assert len(pasts) == self.n_layer
        for layer, past in enumerate(pasts):
            # print(f'pasts-{layer}: {past.shape if past is not None else None}')
            h, present = self.block[layer](h, past)
            presents.append(present)
        results["present"] = tf.stack(presents, axis=1)
        h = self.norm(h)
        logits = self.mat(h, wte)
        results["logits"] = logits
        return results

    def get_config(self):
        config = {
            "n_ctx": self.n_ctx,
            "n_layer": self.n_layer,
            "n_vocab": self.n_vocab,
            "n_emb": self.n_emb,
            "n_head": self.n_head,
            "epsilon": self.epsilon,
            "embed_dropout": self.embed_dropout,
            "attn_dropout": self.attn_dropout,
            "res_dropout": self.res_dropout,
            "mode": self.mode,
            "stride": self.stride,
            "c": self.c,
        }
        base_config = super(GPT2, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


class GPTModel(tf.keras.Model):
    def __init__(self, params, *args, **kwargs):
        super(GPTModel, self).__init__(*args, **kwargs)
        self.gpt = GPT2(params, name="gpt_2")

    def call(self, inputs, past=None, **kwargs):
        output = self.gpt(inputs, past)
        return [output["logits"], output["present"]]
