import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import (
    LayerNormalization,
    Dropout,
)

gpus = tf.config.list_physical_devices(device_type='GPU')
for gpu in gpus:
    tf.config.experimental.set_memory_growth(gpu, True)


def shape_list(x):
    static = x.shape.as_list()
    dynamic = tf.shape(x)
    return [dynamic[i] if s is None else s for i, s in enumerate(static)]


def gelu(x):
    return 0.5 * x * (
            1 + tf.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))


def split_states(x, n):
    *start, m = shape_list(x)
    return tf.reshape(x, start + [n, m // n])


def merge_states(x):
    *start, a, b = shape_list(x)
    return tf.reshape(x, start + [a * b])


def attention_mask(nd, ns, *, dtype):
    i = tf.range(nd)[:, None]
    j = tf.range(ns)
    m = i >= j - ns + nd
    return tf.cast(m, dtype)


class MLP(tf.keras.layers.Layer):
    def __init__(self, res_dropout, n_layer):
        super(MLP, self).__init__()
        self.dropout = Dropout(res_dropout)
        self.n_layer = n_layer
        self.nx = None
        self.n_state = None
        self.conv1d_1 = None
        self.conv1d_4 = None

    def build(self, input_shape):
        self.nx = input_shape[-1]
        self.n_state = input_shape[-1] * 4

        self.conv1d_4 = Conv1d(self.n_state, self.n_layer, name='conv1d_mlp_4')
        self.conv1d_1 = Conv1d(self.nx, self.n_layer, name='conv1d_mlp_1')
        super(MLP, self).build(input_shape)

    def call(self, inputs, **kwargs):
        h = self.conv1d_4(inputs)
        h = gelu(h)
        output = self.conv1d_1(h)
        output = self.dropout(output)
        return output


class Conv1d(tf.keras.layers.Layer):
    def __init__(self, units, n_layer=1, **kwargs):
        super(Conv1d, self).__init__(**kwargs)
        self.units = units
        self.n_layer = n_layer
        self.nx = None
        self.w = None
        self.b = None

    def build(self, input_shape):
        self.nx = input_shape[-1]
        w_init_stdev = 0.02 * (
                1.0 / tf.sqrt(tf.cast(self.n_layer, dtype='float32')))

        w_init = tf.random_normal_initializer(stddev=w_init_stdev)
        self.w = tf.Variable(
            initial_value=w_init(shape=(1, self.nx, self.units),
                                 dtype='float32'),
            trainable=True,
            name='conv1d_w',
        )
        b_init = tf.zeros_initializer()
        self.b = tf.Variable(
            initial_value=b_init(shape=self.units, dtype='float32'),
            trainable=True,
            name='conv1d_b',
        )
        super(Conv1d, self).build(input_shape)

    def call(self, inputs, **kwargs):
        start = shape_list(inputs)[:-1]
        inputs_flat = tf.reshape(inputs, (-1, self.nx))
        w_flat = tf.reshape(self.w, [-1, self.units])
        output = tf.reshape(
            tf.matmul(inputs_flat, w_flat) + self.b, start + [self.units]
        )
        return output

    def get_config(self):
        config = {'units': self.units, 'n_layer': self.n_layer}
        base_config = super(Conv1d, self).get_config()
        return {**base_config, **config}


class Attention(tf.keras.layers.Layer):
    def __init__(self, n_head, attn_dropout, n_layer, mode, stride, c,
                 nth_layer):
        super(Attention, self).__init__()
        self.n_head = n_head
        self.dropout = Dropout(attn_dropout)
        self.n_layer = n_layer
        self.mode = mode
        self.stride = stride
        self.c = c
        self.nth_layer = nth_layer
        self.nx = None
        self.con1d_1 = None
        self.con1d_3 = None

    def build(self, input_shape):
        self.nx = input_shape[-1]
        self.con1d_3 = Conv1d(self.nx * 3, self.n_layer, name='conv1d_att_3')
        self.con1d_1 = Conv1d(self.nx, self.n_layer, name='conv1d_att_1')
        super(Attention, self).build(input_shape)

    def call(self, inputs, past=None):
        c = self.con1d_3(inputs)
        q, k, v = map(self.split_heads, tf.split(c, 3, axis=2))
        present = tf.stack([k, v], axis=1)
        if past is not None:
            pk, pv = tf.unstack(past, axis=1)
            k = tf.concat([pk, k], axis=-2)
            v = tf.concat([pv, v], axis=-2)
        a = Attention._multihead_attn(q, k, v)
        a = Attention._merge_heads(a)
        a = self.con1d_1(a)
        a = self.dropout(a)
        return a, present

    def split_heads(self, x):
        return tf.transpose(split_states(x, self.n_head), [0, 2, 1, 3])

    @staticmethod
    def _merge_heads(x):
        return merge_states(tf.transpose(x, [0, 2, 1, 3]))

    @staticmethod
    def _mask_attn_weights(w):
        _, _, nd, ns = shape_list(w)
        b = attention_mask(nd, ns, dtype=w.dtype)
        b = tf.reshape(b, [1, 1, nd, ns])
        w = w * b - tf.cast(1e10, w.dtype) * (1 - b)
        return w

    @staticmethod
    def _multihead_attn(q, k, v):
        # q, k, v have shape [batch, heads, sequence, features]
        w = tf.matmul(q, k, transpose_b=True)
        w = w * (1 / tf.sqrt(tf.cast(v.shape[-1], w.dtype)))
        w = Attention._mask_attn_weights(w)
        w = tf.nn.softmax(w, axis=-1)
        a = tf.matmul(w, v)
        return a

    def get_config(self):
        config = {
            'n_head': self.n_head,
            'n_layer': self.n_layer,
            'mode': self.mode,
            'stride': self.stride,
            'c': self.c,
        }
        base_config = super(Attention, self).get_config()
        return {**base_config, **config}


class Block(tf.keras.layers.Layer):
    def __init__(
            self, n_head, epsilon, attn_dropout, res_dropout, n_layer, mode,
            stride, c, nth_layer
    ):
        super(Block, self).__init__()
        self.n_head = n_head
        self.epsilon = epsilon
        self.attn_dropout = attn_dropout
        self.res_dropout = res_dropout
        self.layer_norm_1 = LayerNormalization(epsilon=epsilon)
        self.attention = Attention(n_head, attn_dropout, n_layer, mode, stride,
                                   c, nth_layer)
        self.layer_norm_2 = LayerNormalization(epsilon=epsilon)
        self.mlp = MLP(res_dropout, n_layer)
        self.n_layer = n_layer
        self.nth_layer = nth_layer

    def call(self, inputs, past=None):
        norm_1 = self.layer_norm_1(inputs)
        att, present = self.attention(norm_1, past)

        x = tf.math.add(inputs, att)
        mlp_in = self.layer_norm_2(x)
        mlp_out = self.mlp(mlp_in)
        x = tf.math.add(x, mlp_out)

        return x, present

    def get_config(self):
        config = {
            'n_head': self.n_head,
            'epsilon': self.epsilon,
            'attn_dropout': self.attn_dropout,
            'res_dropout': self.res_dropout,
            'n_layer': self.n_layer,
            'mode': self.mode,
            'stride': self.stride,
            'c': self.c,
            'nth_layer': self.nth_layer,
        }
        base_config = super(Block, self).get_config()
        return {**base_config, **config}


def expand_tile(value, size):
    """Add a new axis of given size."""
    value = tf.convert_to_tensor(value, name='value')
    ndims = value.shape.ndims
    return tf.tile(tf.expand_dims(value, axis=0), [size] + [1] * ndims)


def positions_for(batch_size, n_steps, past_length):
    return expand_tile(past_length + tf.range(n_steps), batch_size)


class GPTIntermediate(tf.keras.layers.Layer):
    def __init__(self, params: dict,
                 embedding_layer: tf.keras.layers.Layer, **kwargs):
        name = kwargs['name'] if 'name' in kwargs.keys() else None
        super(GPTIntermediate, self).__init__(name=name)
        self.n_ctx = params['n_ctx']
        self.n_layer = params['n_layer']
        self.n_vocab = params['n_vocab']
        self.n_emb = params['n_emb']
        self.n_head = params['n_head']
        self.epsilon = params['epsilon']
        self.embed_dropout = params['embed_dropout']
        self.attn_dropout = params['attn_dropout']
        self.res_dropout = params['res_dropout']
        self.mode = params['mode']
        self.stride = params['stride']
        self.c = params['c']
        self.embedding_dropout = tf.keras.layers.Dropout(self.embed_dropout)
        self.block = [
            Block(
                self.n_head,
                self.epsilon,
                self.attn_dropout,
                self.res_dropout,
                self.n_layer,
                self.mode,
                self.stride,
                self.c,
                i
            )
            for i in range(self.n_layer)
        ]
        self.norm = LayerNormalization(epsilon=self.epsilon)
        self.embedding = embedding_layer
        # self.embedding = EmbeddingLayer(self.n_vocab, self.n_emb, self.n_ctx)

    def call(self, inputs, past=None):
        emb = self.embedding(inputs, past)
        h = self.embedding_dropout(emb)
        # presents = []
        pasts = tf.unstack(past, axis=1) if past is not None else \
            [None] * self.n_layer
        assert len(pasts) == self.n_layer
        for layer, past in enumerate(pasts):
            h, present = self.block[layer](h, past)
            # presents.append(present)
        h = self.norm(h)
        # return p, tf.stack(presents, axis=1)
        return h

    def get_config(self):
        config = {
            'n_ctx': self.n_ctx,
            'n_layer': self.n_layer,
            'n_vocab': self.n_vocab,
            'n_emb': self.n_emb,
            'n_head': self.n_head,
            'epsilon': self.epsilon,
            'embed_dropout': self.embed_dropout,
            'attn_dropout': self.attn_dropout,
            'res_dropout': self.res_dropout,
            'mode': self.mode,
            'stride': self.stride,
            'c': self.c,
        }
        base_config = super(GPTIntermediate, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))
