import json
import os
import tqdm
import numpy as np

if __name__ == '__main__':
    os.environ['CUDA_VISIBLE_DEVICES'] = '1'

import tensorflow as tf
from model1 import StockPredictModelV1
from modelbase import StockPredictModel0
from model0 import StockPredictModelV0
from optimizer import get_optimizer
from data import TrainData
from common import train_data_dir

# 选取每个公司的VAL_RADIO的数据作为验证集，验证集不会在训练中使用
VAL_RADIO = 0.05

TRAIN_MB_CPYS = 100

LOG_FILE_PATH = './train-log.json'

TOTAL_TRAINING_CPYS = 5000

# 多少个epochs跑完一轮数据
ALL_DATA_EPOCHS = 15
# 平均每个sample跑多少遍
TRAIN_TIMES_PER_SAMPLE = 3


def load_config():
    with open('config.json', 'r', encoding='utf') as file:
        config = json.load(file)
    return config


CONFIG = load_config()

SCALE = CONFIG['scale']

BATCH_SIZE = CONFIG['train_batch_size']

RANDOM_TRAIN_SAMPLE = False

LOADED_DATA_CACHE = {}


def get_data():
    trainable_cpy_names = TrainData.get()[:TOTAL_TRAINING_CPYS]
    print(f'training data comes from {len(trainable_cpy_names)} companies')

    print('loading data')

    data_indices = {}
    if os.path.exists('train-data-split-indices.npz'):
        data_indices = np.load('train-data-split-indices.npz')

    keys = ['cpy_id', 'float_feature', 'target_feature']
    scaled_keys = {'float_feature', 'target_feature'}

    val_data = {}
    train_data = {}
    for k in keys:
        val_data[k] = []
        train_data[k] = []

    for name in tqdm.tqdm(trainable_cpy_names):
        filename = name.replace('*', '星__')
        npz = np.load(os.path.join(train_data_dir, f'{filename}.npz'))
        val_split = int(len(npz['cpy_id']) * VAL_RADIO)
        if val_split <= 0:
            continue
        if name not in data_indices:
            indices = np.arange(len(npz['cpy_id']))
            np.random.shuffle(indices)
            data_indices[name] = indices
        indices = data_indices[name]
        for k in keys:
            arr = npz[k]

            if k == 'float_feature':
                arr = arr[:, :, 1:2]
            if k in scaled_keys:
                arr = arr * SCALE
            val_data[k].append(arr[indices[:val_split]].copy())
            train_data[k].append(arr[indices[val_split:]].copy())
    if not os.path.exists('train-data-split-indices.npz'):
        np.savez('train-data-split-indices.npz', **data_indices)

    val_len = 0
    train_data_len = 0

    for k in keys:
        lst = val_data.pop(k)
        print('concatenate val data', k)
        val_data[k] = np.concatenate(lst, axis=0)
        lst = train_data.pop(k)
        print('concatenate train data', k)
        train_data[k] = np.concatenate(lst, axis=0)
        val_len = len(val_data[k])
        train_data_len = len(train_data[k])

    print(f'validation data size', val_len)
    print(f'train data size', train_data_len)
    return train_data, val_data


def train_data_generator(data: dict):
    data_len = len(data['target_feature'])
    print('train data lens')
    for k, v in data.items():
        print(k, len(v))
    shuffle_indices = np.arange(data_len)
    np.random.shuffle(shuffle_indices)

    index = 0

    while True:
        if index >= data_len:
            index = 0
            np.random.shuffle(shuffle_indices)
        end = min(BATCH_SIZE + index, data_len)

        indices = shuffle_indices[index:end]

        yield [
                  data['cpy_id'][indices],
                  # macro_batch_data_np['st_ws_one_hot'][index:end].copy(),
                  # macro_batch_data_np['lt_et'][index:end].copy(),
                  # macro_batch_data_np['time_one_hot'][index:end].copy(),
                  data['float_feature'][indices],
                  # macro_batch_data_np['discrete_feature'][index:end].copy(),
                  # macro_batch_data_np['is_next_day_rest'][index:end].copy(),
              ], data['target_feature'][indices]
        index = end


class StepLossRecordCallBack(tf.keras.callbacks.Callback):
    def __init__(self, log_fp):
        super().__init__()
        self.losses = []
        self.log_file = log_fp

    def on_train_batch_end(self, batch, logs=None):
        if batch % 100 == 0:
            self.log_file.write(
                json.dumps({
                    'batch': batch,
                    'loss': logs.get('loss'),
                    'error_rate': logs.get('er'),
                    # 'focus_error_rate': logs.get('output_1_fer'),
                }) + '\n'
            )
            self.log_file.flush()

    def on_epoch_end(self, epoch, logs=None):
        if epoch >= 40:
            global RANDOM_TRAIN_SAMPLE
            RANDOM_TRAIN_SAMPLE = True
        self.log_file.write(
            json.dumps({
                'epoch': epoch,
                'loss': logs.get('loss'),
                'error_rate': logs.get('er'),
                # 'focus_error_rate': logs.get('output_1_fer'),
                'val_loss': logs.get('val_loss'),
                'val_error_rate': logs.get('val_er'),
                # 'val_focus_error_rate': logs.get('val_output_1_fer'),
            }) + '\n'
        )
        self.log_file.flush()


def main():
    # model = StockPredictModel0()
    model = StockPredictModelV1(params=CONFIG)
    # model = StockPredictModelV1Diff(params=CONFIG)

    model.build(BATCH_SIZE)

    train_data, val_data = get_data()

    data_len = len(train_data['target_feature'])

    all_data_steps_needed = data_len // BATCH_SIZE

    steps_per_epoch = all_data_steps_needed // ALL_DATA_EPOCHS

    opt = get_optimizer(CONFIG)

    weights = [5, 10, 10, 10, 10, 1, 1, 1, 1, 1, 1]

    @tf.function
    def preprocess(y_true, y_pred):
        # 最后一维度11~21位是这个公司的特征值的最大值-最小值+epsilon(防止除零)
        # 第22维小于0则代表不计算这个点的loss
        scale = y_true[:, :, 12:13]

        mask = y_true[:, :, 22]

        y_true = y_true[:, :, 1:2]

        valid_pos = tf.where(tf.math.greater_equal(mask, 0))
        valid_pred = tf.gather_nd(y_pred, valid_pos)
        valid_true = tf.gather_nd(y_true, valid_pos)
        valid_scale = tf.gather_nd(scale, valid_pos)
        return valid_true - valid_pred, valid_scale, tf.shape(valid_pos)[0]

    @tf.function
    def mse_loss(y_true, y_pred):
        diff, scale, valid_num = preprocess(y_true, y_pred)
        if valid_num <= 0:
            return tf.constant(0, dtype=tf.float32)
        scale = scale / SCALE
        return tf.reduce_mean(tf.square(diff / scale))

    @tf.function
    def er(y_true, y_pred):
        diff, scale, valid_num = preprocess(y_true, y_pred)
        if valid_num <= 0:
            return tf.constant(0, dtype=tf.float32)
        return tf.reduce_mean(tf.math.abs(diff) / scale) * 100

    # @tf.function
    # def fer(y_true, y_pred):
    #     diff, scale, valid_num = preprocess(y_true, y_pred)
    #     diff = diff[:, 0:5]
    #     scale = scale[:, 0:5]
    #     if valid_num <= 0:
    #         return tf.constant(0, dtype=tf.float32)
    #     return tf.reduce_mean(tf.math.abs(diff) / scale) * 100

    model.compile(
        optimizer=opt,
        loss=mse_loss,
        metrics=[er]
    )

    model.fit(
        train_data_generator(train_data),
        epochs=ALL_DATA_EPOCHS * TRAIN_TIMES_PER_SAMPLE,
        steps_per_epoch=steps_per_epoch,
        validation_data=(
            [
                val_data['cpy_id'],
                # val_data['st_ws_one_hot'],
                # val_data['lt_et'],
                # val_data['time_one_hot'],
                val_data['float_feature'],
                # val_data['discrete_feature'],
                # val_data['is_next_day_rest'],
            ], val_data['target_feature']
        ),
        validation_batch_size=BATCH_SIZE,
        verbose=1,
        callbacks=[
            StepLossRecordCallBack(log_file),
            # tf.keras.callbacks.ModelCheckpoint(
            #     'checkpoint-{epoch}-best-val-'
            #     'error-rate-{val_er}.hdf5',
            #     monitor='val_output_1_fer',
            #     verbose=1,
            #     save_best_only=True,
            #     mode='min',
            # ),
            tf.keras.callbacks.ModelCheckpoint(
                'checkpoint-latest.hdf5',
                verbose=1
            )
        ],
    )


log_file = open('./train-log.json', 'w', encoding='utf8')
company_hits = {}
try:
    main()
finally:
    log_file.close()
