#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 18-11-2


import os

import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical

import utils
from model.LMTGRU import CNNLMTGRU
from utils import generate_batch
import preprocess

DATA_DIR = 'data/chat-detection-dataset'
EPOCHS = 1000
LOG_DIR = 'LMTGRU_train_log'
TRAIN_DIR = 'LMTGRU_saved_model'
MAX_LEN = 24
# all_sent, all_label = preprocess.gather_data(DATA_DIR)
# fold = 0
# for train_x, dev_x, test_x, train_y, dev_y, test_y in preprocess.get_kfold_train_dev_test(all_sent, all_label):
for fold in range(10):
    train_x, dev_x, test_x, train_y, dev_y, test_y = preprocess.generate_train_dev_test(
        os.path.join(DATA_DIR, 'dataset%d.txt' % fold))
    vocab, vocab_rev = preprocess.generate_vocab(train_x)
    train_x = preprocess.tokenize_sentences(train_x, vocab_rev)
    train_x = pad_sequences(train_x, MAX_LEN, dtype='int32')
    dev_x = preprocess.tokenize_sentences(dev_x, vocab_rev)
    dev_x = pad_sequences(dev_x, MAX_LEN, dtype='int32')
    test_x = preprocess.tokenize_sentences(test_x, vocab_rev)
    test_x = pad_sequences(test_x, MAX_LEN, dtype='int32')
    train_y = to_categorical(train_y, 2)
    dev_y = to_categorical(dev_y, 2)
    test_y = to_categorical(test_y, 2)

    tf.reset_default_graph()
    model = CNNLMTGRU(128, [3, 4, 5], 2, 100, MAX_LEN, 400, 0.001, 0.3, use_embedding_layer=True,
                      vocab_size=len(vocab))

    with tf.Session() as sess:
        # sess = tf_debug.LocalCLIDebugWrapperSession(sess)
        summary = tf.summary.FileWriter(os.path.join(LOG_DIR, str(fold)), sess.graph)
        max_dev_acc = model.start_or_continue_training(sess, os.path.join(TRAIN_DIR, str(fold)))
        for epoch in range(1, EPOCHS + 1):
            for x, y in generate_batch(train_x, train_y, 32, shuffle=True, undersampling=False):
                model.train(sess, x, y, summary)
            if epoch % 2 == 0:
                loss = model.compute_loss(sess, train_x, train_y)
                acc = model.compute_accuracy(sess, train_x, train_y)
                print('train acc, loss:', acc, loss)

                loss = model.compute_loss(sess, dev_x, dev_y)
                dev_acc = model.compute_accuracy(sess, dev_x, dev_y)
                print('dev acc, loss: {}, {}'.format(dev_acc, loss))
                # pred = model.predict(sess, dev_x)
                # df = pd.DataFrame(list(zip(np.squeeze(pred), np.squeeze(dev_y))), columns=['pred', 'y'])
                # df.to_csv('pred.csv', index=False)
                if dev_acc > max_dev_acc:
                    loss = model.compute_loss(sess, test_x, test_y)
                    acc = model.compute_accuracy(sess, test_x, test_y)
                    pred_test = model.predict(sess, test_x)
                    print('test acc, loss: {}, {}'.format(acc, loss))

                    max_dev_acc = dev_acc
                    print('max:', max_dev_acc)
                    model.save(sess, max_dev_acc, os.path.join(TRAIN_DIR, str(fold)))

                    df = pd.DataFrame(list(zip(pred_test, test_y)), columns=['pred', 'ground_true'])
                    df.to_csv('LMTGRUpred.csv', index=False)
                print()

if __name__ == '__main__':
    pass
