import tensorflow as tf
from tensorflow.keras.layers import Embedding

from transformer_decoder import GPTIntermediate, positions_for, shape_list


class EmbeddingLayer(tf.keras.layers.Layer):
    def __init__(self, company_count, emb_size, n_ctx):
        super().__init__()
        self.company_count = company_count
        self.emb_size = emb_size
        self.n_ctx = n_ctx

        self.cpy_embedding = Embedding(
            self.company_count,
            self.emb_size,
            embeddings_initializer=tf.random_normal_initializer(stddev=0.02),
        )
        wpe_init = tf.random_normal_initializer(stddev=0.01)
        self.wpe = tf.Variable(
            initial_value=wpe_init(
                shape=(
                    self.n_ctx,
                    self.emb_size,
                ),
                dtype='float32',
            ),
            trainable=True,
        )
        # self.st_ws_emb = tf.keras.layers.Dense(
        #     emb_size, use_bias=False,
        #     kernel_initializer=tf.random_normal_initializer
        # )
        # self.lt_vt_emb = tf.keras.layers.Dense(
        #     emb_size, use_bias=False,
        #     kernel_initializer=tf.random_normal_initializer(stddev=0.01)
        # )
        self.ft_emb = tf.keras.layers.Dense(
            emb_size, use_bias=False,
            kernel_initializer=tf.random_normal_initializer(stddev=0.01)
        )
        # self.dt_emb = tf.keras.layers.Dense(
        #     emb_size, use_bias=False,
        #     kernel_initializer=tf.random_normal_initializer(stddev=0.01)
        # )
        # self.toh_emb = tf.keras.layers.Dense(
        #     emb_size, use_bias=False,
        #     kernel_initializer=tf.random_normal_initializer(stddev=0.01)
        # )
        # self.ndr_emb = tf.keras.layers.Dense(
        #     emb_size, use_bias=False,
        #     kernel_initializer=tf.random_normal_initializer(stddev=0.01)
        # )
        # self.df_emb = tf.keras.layers.Dense(
        #     emb_size, use_bias=False,
        #     kernel_initializer=tf.random_normal_initializer(stddev=0.01)
        # )

    def call(self, inputs, past=None):
        # c_id, st_ws, lt_et, toh, ft, df, next_day_rest = inputs
        c_id, ft = inputs

        # ft = ft[:, :, 0:5]

        batch_size = tf.shape(ft)[0]

        n_step = tf.shape(ft)[1]

        cpy_id_emb = tf.reshape(
            self.cpy_embedding(c_id), (batch_size, 1, self.emb_size)
        )

        # st_ws_emb = tf.reshape(
        #     self.st_ws_emb(st_ws), (batch_size, 1, self.emb_size))
        # lt_vt_emb = tf.reshape(
        #     self.lt_vt_emb(lt_et), (batch_size, 1, self.emb_size))

        # cpy_emb = cpy_id_emb + st_ws_emb + lt_vt_emb

        # ndr_emb = self.ndr_emb(next_day_rest)
        # toh_emb = self.toh_emb(toh)
        ft_emb = self.ft_emb(ft)
        # df_emb = self.df_emb(df)

        # tsf_emb = ndr_emb + (toh_emb + ft_emb + df_emb)

        # emb = cpy_emb + tsf_emb

        past_length = 0 if past is None else shape_list(past)[-2]
        wpe_emb = tf.gather(
            self.wpe, positions_for(batch_size, n_step, past_length))

        # emb = emb + wpe_emb
        emb = ft_emb * cpy_id_emb + wpe_emb
        return emb

    def get_config(self):
        config = {
            'vocab_size': self.company_count,
            'emb_size': self.emb_size,
            'n_ctx': self.n_ctx,
            'wpe_init': tf.random_normal_initializer(stddev=0.01),
        }
        base_config = super().get_config()
        return {**base_config, **config}


class StockPredictModelV1(tf.keras.Model):
    def __init__(self, params, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.gpt = GPTIntermediate(
            params,
            EmbeddingLayer(params['n_vocab'], params['n_emb'], params['n_ctx']),
            name='stock-predict-model'
        )
        self.n_emb = params['n_emb']
        self.w = tf.Variable(
            initial_value=tf.random_normal_initializer()(
                shape=(params['n_vocab'], self.n_emb, 1),
                dtype='float32'
            ),
            trainable=True,
            name='w',
        )
        self.b = tf.Variable(
            initial_value=tf.random_normal_initializer()(
                shape=(params['n_vocab'], 1),
                dtype='float32'
            ),
            trainable=True,
            name='w',
        )

    def build(self, batch_size: int):
        super().build(input_shape=[
            (batch_size, 1),
            (batch_size, None, 1),
        ])

    def call(self, inputs, past=None, **kwargs):
        c_id, ft = inputs
        c_id = tf.cast(c_id, dtype=tf.int32)
        h = self.gpt([c_id, ft], past)
        w = tf.reshape(tf.gather(self.w, c_id), (-1, self.n_emb, 1))
        b = tf.gather(self.b, c_id)
        return tf.matmul(h, w) + b
