import json
import tensorflow as tf
import numpy as np

from transformer_decoder import StockPredictModel
from optimizer import get_optimizer
from data import StatisticsData, SingletonData

statistics_data = StatisticsData.get()

with open('config.json', 'r', encoding='utf') as file:
    config = json.load(file)

data = np.load('data/processed/平安银行.npz')

cpy_id = data['cpy_id']
st_ws_one_hot = data['st_ws_one_hot']
lt_et = data['lt_et']
time_one_hot = data['time_one_hot']
float_feature = data['float_feature']
true_float_feature = data['true_float_feature']
target_feature = data['target_feature']
mask = data['mask']

print('cpy_id', cpy_id.shape)
print('st_ws_one_hot', st_ws_one_hot.shape)
print('lt_et', lt_et.shape)
print('time_one_hot', time_one_hot.shape)
print('float_feature', float_feature.shape)
print('true_float_feature', true_float_feature.shape)
print('target_feature', target_feature.shape)
print('mask', mask.shape)

model = StockPredictModel(params=config)

batch_size = 1

col_names = SingletonData.positive_data_cols()
col_names.append('复权因子')

stds = np.array([statistics_data['连续特征'][each]['std'] for each in col_names])

var = np.square(stds)

weights = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]

weights = np.array(weights) * (1 / var)
print(weights)


def loss(y_true, y_pred):
    predicts = y_pred[:, 8:, :]
    # tf.print('y_true', y_true.shape)
    # tf.print('predicts', predicts.shape)
    valid_pos = tf.where(tf.math.greater_equal(y_true[:, :, 0], 0))
    # tf.print('mask', mask.shape)
    valid_pred = tf.gather_nd(predicts, valid_pos)
    # tf.print('valid_pred', valid_pred.shape)
    valid_true = tf.gather_nd(y_true, valid_pos)
    # tf.print('valid_true', valid_true[0])
    # tf.print('valid_pred', valid_pred[0])
    return tf.reduce_sum(
        tf.square(valid_true - valid_pred) * weights
    ) / tf.cast(tf.shape(valid_pos)[0], dtype=tf.float32)


opt = get_optimizer(config)

model.build(input_shape=[
    (batch_size, 1), (batch_size, 12), (batch_size, 2),
    (batch_size, None, 50), (batch_size, None, 22), (batch_size, None, 11),
    (batch_size, None, 1)
])

model.compile(
    optimizer=tf.optimizers.Adam(lr=6e-4),
    loss=[loss, None],
    # metrics=[[loss], []]
)

model.fit(
    [
        cpy_id,
        st_ws_one_hot,
        lt_et,
        time_one_hot,
        float_feature,
        true_float_feature,
        mask
    ],
    target_feature,
    batch_size=1,
    epochs=100,
    validation_split=0.01,
    verbose=1,
    callbacks=[
        # tf.keras.callbacks.ModelCheckpoint(
        #     "../checkpoint_v2/checkpoint-{epoch}.hdf5",
        #     verbose=0,
        #     save_best_only=True,
        #     mode="min",
        # )
    ],
)
