# !/usr/bin/env python
# -*- coding: utf-8 -*-

import logging
import os
import time

import numpy as np
import tensorflow as tf
import sys

sys.path.insert(0, '..')
from keyword_extract_model import KeywordExtractModel
# from keyword_extract_attention_model import KeywordExtractAttentionModel

os.environ["CUDA_VISIBLE_DEVICES"] = '7'


def load_data(filename):
    with open(filename, 'r') as input_file:
        data = input_file.read().splitlines()

    return data


def get_single_input(line, token_2_index_vocab, stop_words):
    try:
        items = line.strip().split("##")

        content_tokens = items[0].split(' ')
        predict_tokens = items[1].split(' ')

        # content_encoded
        content_encode = []
        # predict_encode
        predict_encode = []

        for content_token in content_tokens:
            if content_token not in token_2_index_vocab:
                content_token = 'UNK'
            content_encode.append(token_2_index_vocab[content_token])

            if content_token in predict_tokens and content_token in token_2_index_vocab and content_token not in stop_words:
                predict_encode.append(2)
            else:
                predict_encode.append(1)

        return content_encode, predict_encode
    except:
        logging.exception('failed')
        return None


def get_next_batch(data, token_2_index_vocab, batch_size, stop_words):
    batch_contents = []
    batch_tokens = []
    while True:
        for line in data:
            # <class 'tuple'>: ([2695, 3012, 647], [1, 0, 1])
            single_data = get_single_input(line, token_2_index_vocab, stop_words)
            if single_data:
                batch_contents.append(single_data[0])
                batch_tokens.append(single_data[1])

            if len(batch_contents) >= batch_size:
                yield batch_contents, batch_tokens
                batch_contents = []
                batch_tokens = []


model_params = {
    'max_seq_length': 50,  # time_steps
    'vocab_size': 400000,
    'class_num': 3,
    'word_embedding_size': 64,  # 词向量的维度
    'lstm_cell_size': 64,  # 每个cell输出维度 等同于 hidden_size
    'batch_size': 128,
    'num_layers': 1,
    'train_keep_prob': 0.75,
    'learning_rate': 0.0001,
    'learning_rate_decay_factor': 0.99999
}

# build stopwords
stop_words = []
with open('data/stop_words.dat', 'r') as input_file:
    for line in input_file:
        stop_words.append(line.strip().lower())

# build vocab and word_embedding
token_2_index = {}
index_2_token = {}
index = 0
with open('data/dict_words.dat', 'r') as input_file:
    for line in input_file:
        token = line.strip().split("\t")[0]
        index_2_token[index] = token
        token_2_index[token] = index
        index += 1
        if index == model_params['vocab_size'] - 1:
            token_2_index['UNK'] = index
            index_2_token[index] = 'UNK'
            break

# build input data
batch_size = model_params['batch_size']
seq_length = model_params['max_seq_length']
model = KeywordExtractModel(model_params)
cfg = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
train_data = load_data('data/train_v1_shuffle.dat')
test_data = load_data('data/test_v1_shuffle.dat')
train_generator = get_next_batch(train_data, token_2_index, batch_size, stop_words)
test_generator = get_next_batch(test_data, token_2_index, batch_size, stop_words)
with tf.Session(config=cfg) as session:
    session.run(tf.global_variables_initializer())

    # Output directory for models and summaries
    timestamp = str(int(time.time()))
    out_dir = os.path.abspath(os.path.join(os.path.curdir, "bilstm", timestamp))
    logging.info("Writing to %s" % out_dir)

    # Checkpoint directory. Tensorflow assumes this directory already
    # exists so we need to create it
    checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
    checkpoint_prefix = os.path.join(checkpoint_dir, "model")
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    for iteration, batch in enumerate(train_generator):
        content_tokens_size = np.zeros(batch_size, np.int32)
        contents_encode = np.zeros((batch_size, seq_length), np.int32)
        predict_encode = np.zeros((batch_size, seq_length), np.int32)
        # batch_contents
        batch_contents = batch[0]
        # batch_predict_tokens
        batch_predict_tokens = batch[1]
        for i in range(batch_size):
            content_tokens_size[i] = min(seq_length, len(batch_contents[i]))
            for j in range(content_tokens_size[i]):
                contents_encode[i][j] = batch_contents[i][j]
                predict_encode[i][j] = batch_predict_tokens[i][j]

        feed_dict = {
            model.pred: predict_encode,
            model._src_input_data: contents_encode,
            model._src_seq_length: content_tokens_size,
            model.keep_prob: model_params['train_keep_prob']
        }
        loss, _ = session.run([model.loss, model.train], feed_dict=feed_dict)

        if iteration > 0 and (iteration % 50) == 0:
            print('iteration %d, loss %.05f' % (iteration, loss))

        if iteration > 0 and (iteration % 100) == 0:
            test_batch = next(test_generator)
            content_tokens_size = np.zeros(batch_size, np.int32)
            contents_encode = np.zeros((batch_size, seq_length), np.int32)
            predict_encode = np.zeros((batch_size, seq_length), np.int32)
            batch_contents = test_batch[0]
            batch_predict_tokens = test_batch[1]
            for i in range(batch_size):
                content_tokens_size[i] = min(seq_length, len(batch_contents[i]))
                for j in range(content_tokens_size[i]):
                    contents_encode[i][j] = batch_contents[i][j]
                    predict_encode[i][j] = batch_predict_tokens[i][j]
            feed_dict = {
                model.pred: predict_encode,
                model._src_input_data: contents_encode,
                model._src_seq_length: content_tokens_size,
                model.keep_prob: model_params['train_keep_prob']
            }
            loss, _pred, pred, accuracy = session.run([model.loss, model.predict, model.pred, model.accuracy],
                                                      feed_dict=feed_dict)
            # 这里只打印出了batch_size的第一个测试用例
            print(' '.join([index_2_token[i] if i != 0 else '' for i in contents_encode[0]]))
            print("label:", predict_encode[0])
            print("predict :", _pred.shape)
            print("predict: ", _pred[0:seq_length])
            print('test loss %.05f, accuracy %.05f' % (loss, accuracy))

        # 保存一次模型
        if iteration % 1000 == 0:
            path = model.saver.save(session, checkpoint_prefix, global_step=iteration)
            logging.info("Saved model checkpoint to %s" % path)
