#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 19-1-7
import os

import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
from tqdm import tqdm

from model.RLB import RLBModel
from utils import generate_batch, get_single_fold

DATA_PATH = 'data/smp2018/10flod_all_3600+_未转阿拉伯'
EPOCHS = 15
LOG_DIR = 'train/RLB_log'
TRAIN_DIR = 'train/RLB_train'
MAX_LEN = 20

test_x = np.load(os.path.join(DATA_PATH, 'test_x.npy'))
test_y = np.load(os.path.join(DATA_PATH, 'test_y.npy')).astype(np.int32)
test_x = pad_sequences(test_x, 20, 'float32')
test_y = np.where(test_y == 3, 1, 0)
test_y = to_categorical(test_y, 2)

for fold in range(10):
    train_x, train_y, dev_x, dev_y = get_single_fold(DATA_PATH, fold)
    train_x = pad_sequences(train_x, 20, dtype='float32')
    dev_x = pad_sequences(dev_x, 20, dtype='float32')

    train_y = np.where(train_y == 3, 1, 0)
    dev_y = np.where(dev_y == 3, 1, 0)
    # train_y = to_categorical(train_y, 31)
    # dev_y = to_categorical(dev_y, 31)
    print(train_y.shape)
    train_y = to_categorical(train_y, 2)
    dev_y = to_categorical(dev_y, 2)
    tf.reset_default_graph()
    model = RLBModel(128, 64, 2, 0, 400, 0.3)

    dummy_x = tf.zeros((3, MAX_LEN, 400), dtype=tf.float32)
    model._set_inputs(dummy_x)
    model.compile(tf.train.AdamOptimizer(0.001), 'categorical_crossentropy', metrics=['accuracy'])
    actor_v = model.rnn.actor.trainable_variables
    ex_actor_v = [i for i in model.trainable_variables if i not in actor_v]
    # 训练RNN
    print("fold {}, train RNN".format(fold))
    for epoch in tqdm(range(1, EPOCHS + 1)):
        for x, y in generate_batch(train_x, train_y, 32, shuffle=True, undersampling=False):
            with tf.GradientTape() as tape:
                loss = model.loss_fn(x, y, 'ex_actor')
                g = tape.gradient(loss, ex_actor_v)
                model.optimizer.apply_gradients(zip(g, ex_actor_v))

    # TODO: 需要修复训练时候找不到Actor的梯度
    # print("fold {}, train Actor".format(fold))
    # for epoch in tqdm(range(1, EPOCHS + 1)):
    #     for x, y in generate_batch(train_x, train_y, 32, shuffle=True, undersampling=False):
    #         with tf.GradientTape() as tape:
    #             loss = model.loss_fn(x, y)
    #             g = tape.gradient(loss, actor_v)
    #             model.optimizer.apply_gradients(zip(g, actor_v))
    print("fold {}, train All".format(fold))
    for epoch in tqdm(range(1, EPOCHS + 1)):
        model.fit(train_x, train_y, 32, validation_data=[dev_x, dev_y], verbose=1)
if __name__ == '__main__':
    pass
