#-*- coding: utf-8 -*-
import tensorflow as tf
from model.embedding_matching_net import EMNet
from utils import data_util
from utils import config

flags = tf.app.flags
FLAGS = flags.FLAGS
conf = config.Config

# model parameter
flags.DEFINE_integer("epoch", conf.epoch, "epoch")
flags.DEFINE_integer("batch_size", conf.batch_size, "The batch size of training")
flags.DEFINE_float("learning_rate", conf.learning_rate, "learning rate")
flags.DEFINE_integer("num_sample", conf.num_sample, "nce loss num_sample")
flags.DEFINE_integer("embedding_size", conf.embedding_size, "embedding size")
flags.DEFINE_integer("attention_size", conf.attention_size, "attention_size")

EPOCH = FLAGS.epoch
BATCH_SIZE = FLAGS.batch_size
LEARNING_RATE = FLAGS.learning_rate
NUM_SAMPLE = FLAGS.num_sample
EMBEDDING_SIZE = FLAGS.embedding_size
ATTENTION_SIZE = FLAGS.attention_size

# data parameter
ITEM_INPUT_LENGTH = conf.item_input_length
OTHER_INPUT_LENGTH = conf.other_input_length
PATH_TFRECORD_TRAIN = conf.path_tfrecord_train
PATH_TFRECORD_VALIDATION = conf.path_tfrecord_validation
PATH_DICT = conf.path_dict
PATH_MODEL = conf.path_model


def main():
    print("record total:", sum(1 for _ in tf.python_io.tf_record_iterator(PATH_TFRECORD_TRAIN)))
    # load data
    data = data_util.get_batch_data([PATH_TFRECORD_TRAIN], BATCH_SIZE, EPOCH)
    item_dict = data_util.get_dict(PATH_DICT)
    print("record load finished")

    bea_model = EMNet(len(item_dict), EMBEDDING_SIZE, NUM_SAMPLE, LEARNING_RATE, ATTENTION_SIZE, ITEM_INPUT_LENGTH, OTHER_INPUT_LENGTH)

    with tf.Session() as sess:
        sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))
        saver = tf.train.Saver()
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        print("-----------train begin----------")
        batch_cnt = 0
        try:
            while not coord.should_stop():
                batch_data = sess.run(data)
                batch_cnt += 1
                feed_dict = {
                    bea_model.input_other: batch_data['other'],
                    bea_model.input_item: batch_data['item'],
                    bea_model.input_label: batch_data['label'],
                    bea_model.input_weight: batch_data['weight'],
                }
                _, loss = sess.run([bea_model.train_op, bea_model.loss], feed_dict)

                if batch_cnt % 1000 == 0:
                    print("batch: {} loss: {}".format(batch_cnt, loss))

        except tf.errors.OutOfRangeError:
            print('record limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)

        # save model
        saver.save(sess, PATH_MODEL)


if __name__ == '__main__':
    main()





