import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import optimizers
from stock_a.model.base_model import BaseModel

class TFModelV1(keras.Model):
    def __init__(self):
        super(TFModelV1, self).__init__()
        self.day_lstm_fea_lay1 = tf.keras.layers.LSTM(50, return_sequences=True)
        self.day_lstm_fea_lay2 = tf.keras.layers.LSTM(50, return_sequences=False)
        self.dense_layer1 = tf.keras.layers.Dense(50)
        self.dense_layer2 = tf.keras.layers.Dense(1)

    def call(self, inputs, training=None, mask=None):
        seq_day20_fluctuate = tf.reshape(inputs['seq_day20_fluctuate'], [-1, 20, 1]) / 100.0
        seq_day20_amplitude = tf.reshape(inputs['seq_day20_amplitude'], [-1, 20, 1]) / 100.0
        seq_day20_trunover = tf.reshape(inputs['seq_day20_trunover'], [-1, 20, 1]) / 100.0
        dense_inputs = tf.concat([seq_day20_fluctuate, seq_day20_amplitude, seq_day20_trunover], axis=-1)
        x = self.day_lstm_fea_lay1(dense_inputs)
        x = self.day_lstm_fea_lay2(x)
        x = self.dense_layer1(x)
        x = self.dense_layer2(x)
        return tf.nn.relu(x)

class ModelV1(BaseModel):
    def __init__(self):
        super().__init__(TFModelV1)

    def train_model(self):
        optimizer = optimizers.Adam(learning_rate=1e-4)
        for step, (features, labels) in enumerate(self.dataset):
            with tf.GradientTape() as tape:
                out = self.tf_model(features)
                loss = tf.reduce_mean(tf.losses.mae(labels['max_increase_20d'], out))
            grads = tape.gradient(loss, self.tf_model.trainable_variables)
            optimizer.apply_gradients(zip(grads, self.tf_model.trainable_variables))
            if step % 100 == 0:
                print(step, 'loss:', float(loss))
                with self.tf_summary_writer.as_default():
                    tf.summary.scalar('train_loss:', float(loss), step=step)

    def predict(self, inputs):
        pass