# !/usr/bin/env python
# -*- coding: utf-8 -*-

import logging
import os
import time

import numpy as np
import tensorflow as tf

from tw_model import TWModel


def get_query_predict(line, vocab, stop_words):
    query_predict = []
    try:
        fields = line.strip().split("###")
        if len(fields) != 2:
            return query_predict
        query = fields[0].split(" ")
        titles = fields[1].split("$$$")
        # tf-idf
        term_tf = {}
        for title in titles:
            for term in title.split(" "):
                term_tf[term] = term_tf.get(term, 0) + 1
        data = sorted(term_tf.items(), key=lambda x: x[1], reverse=True)
        top_term_tf = []
        for item in data[0:int(len(data) / 2)]:
            top_term_tf.append(item[0])

        # query_encode
        query_encode = []
        for w in query:
            if w not in vocab:
                w = 'UNK'
            query_encode.append(vocab[w])
        # predict_encoded
        predict_encode = []
        for w in query:
            match = 0  # 记录所有title中包含token的title数量(df)
            if w not in stop_words and w in top_term_tf:
                for title in titles:
                    if w in "".join(title.split(" ")):
                        match += 1
            predict_encode.append(match)
        # normal 1
        if max(predict_encode) > 1:
            predict_encode = [max(1, int(i * 4 / len(titles))) for i in predict_encode]
            query_predict.append((query_encode, predict_encode))
    except:
        logging.exception('failed')

    return query_predict


def get_next_batch(train_filename, vocab, batch_size, stop_words):
    batch_query = []
    batch_predict = []
    while True:
        input = open(train_filename)
        for line in input:
            data = get_query_predict(line, vocab, stop_words)  # <class 'tuple'>: [([2695, 3012, 647], [4.0, 4.0, 1])]
            if len(data) == 1:
                batch_query.append(data[0][0])
                batch_predict.append(data[0][1])

            if len(batch_query) >= batch_size:
                yield (batch_query, batch_predict)
                batch_query = []
                batch_predict = []


os.environ["CUDA_VISIBLE_DEVICES"] = '7'

model_params = {'max_seq_length': 8, 'vocab_size': 800000, 'class_num': 5,
                'word_embedding_size': 32, 'lstm_cell_size': 32, 'batch_size': 128,
                'num_layers': 1, 'train_keep_prob': 0.75,
                'learning_rate': 0.0001, 'learning_rate_decay_factor': 0.99999}

# build stopwords
stop_words = []
with open('../data/stop_words.dat', 'r') as input_file:
    for line in input_file:
        stop_words.append(line.strip().lower())

# build vocab and word_embedding
max_vocab_file = model_params['vocab_size']
token_2_index = {}
index_2_token = {}
index = 0
for line in open("../data/dict.dat"):
    term = line.strip().split("\t")[0]
    index_2_token[index] = term
    token_2_index[term] = index
    index += 1
    if index == max_vocab_file - 1:
        token_2_index['UNK'] = index
        index_2_token[index] = 'UNK'
        break

# build input data
batch_size = model_params['batch_size']
seq_length = model_params['max_seq_length']
model = TWModel(model_params)
cfg = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
train_generator = get_next_batch('../data/train_shuf.dat', token_2_index, batch_size, stop_words)
test_generator = get_next_batch('../data/test_shuf.dat', token_2_index, batch_size, stop_words)
with tf.Session(config=cfg) as session:
    session.run(tf.global_variables_initializer())

    # Output directory for models and summaries
    timestamp = str(int(time.time()))
    out_dir = os.path.abspath(os.path.join(os.path.curdir, "bilstm", timestamp))
    logging.info("Writing to %s" % out_dir)

    # Checkpoint directory. Tensorflow assumes this directory already
    # exists so we need to create it
    checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
    checkpoint_prefix = os.path.join(checkpoint_dir, "model")
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    for iteration, batch in enumerate(train_generator):
        query_token_length = np.zeros(batch_size, np.int32)
        query_encode = np.zeros((batch_size, seq_length), np.int32)
        predict_encode = np.zeros((batch_size, seq_length), np.int32)
        batch_query = batch[0]
        batch_predict = batch[1]
        for i in range(batch_size):
            query_token_length[i] = min(seq_length, len(batch_query[i]))
            for j in range(query_token_length[i]):
                query_encode[i][j] = batch_query[i][j]
                predict_encode[i][j] = batch_predict[i][j]
        feed_dict = {
            model.pred: predict_encode,
            model._src_input_data: query_encode,
            model._src_seq_length: query_token_length,
            model.keep_prob: 0.5
        }
        loss, _ = session.run([model.loss, model.train], feed_dict=feed_dict)
        if iteration > 0 and (iteration % 50) == 0:
            print('iteration %d, loss %.05f' % (iteration, loss))

        if iteration > 0 and (iteration % 100) == 0:
            test_batch = next(test_generator)
            query_token_length = np.zeros(batch_size, np.int32)
            query_encode = np.zeros((batch_size, seq_length), np.int32)
            predict_encode = np.zeros((batch_size, seq_length), np.int32)
            batch_query = test_batch[0]
            batch_predict = test_batch[1]
            for i in range(batch_size):
                query_token_length[i] = min(seq_length, len(batch_query[i]))
                for j in range(query_token_length[i]):
                    query_encode[i][j] = batch_query[i][j]
                    predict_encode[i][j] = batch_predict[i][j]
            feed_dict = {
                model.pred: predict_encode,
                model._src_input_data: query_encode,
                model._src_seq_length: query_token_length,
                model.keep_prob: 1.0
            }
            loss, _pred, pred, accuracy = session.run([model.loss, model.predict, model.pred, model.accuracy],
                                                      feed_dict=feed_dict)
            # 这里只打印出了batch_size的第一个测试用例
            print(' '.join([index_2_token[i] for i in query_encode[0]]))
            print("label:", predict_encode[0])
            print("pred :", _pred.shape)
            print("pred: ", _pred[0:seq_length])
            print('test loss %.05f, accuracy %.05f' % (loss, accuracy))

        if iteration % 5000 == 0:
            path = model.saver.save(session, checkpoint_prefix, global_step=iteration)
            logging.info("Saved model checkpoint to %s" % path)
