
#-*- coding: utf-8 -*-
import tensorflow as tf
from model.embedding_matching_net import EMNet
from utils import data_util
from utils import config

conf = config.Config
# model parameter
EPOCH = conf.epoch
BATCH_SIZE = conf.batch_size
LEARNING_RATE = conf.learning_rate
NUM_SAMPLE = conf.num_sample
EMBEDDING_SIZE = conf.embedding_size
ATTENTION_SIZE = conf.attention_size

# data parameter
ITEM_INPUT_LENGTH = conf.item_input_length
OTHER_INPUT_LENGTH = conf.other_input_length
PATH_TFRECORD_TRAIN = conf.path_tfrecord_train
PATH_TFRECORD_VALIDATION = conf.path_tfrecord_validation
PATH_DICT = conf.path_dict
PATH_MODEL = conf.path_model


def main():
    print("record total:", sum(1 for _ in tf.python_io.tf_record_iterator(PATH_TFRECORD_VALIDATION)))

    # load total / 4 recode every batch
    data = data_util.get_total_data(PATH_TFRECORD_VALIDATION)
    item_dict = data_util.get_dict(PATH_DICT)
    print("record load finished")

    bea_model = EMNet(len(item_dict), EMBEDDING_SIZE, NUM_SAMPLE, LEARNING_RATE, ATTENTION_SIZE, ITEM_INPUT_LENGTH, OTHER_INPUT_LENGTH)

    with tf.Session() as sess:
        # load model
        saver = tf.train.Saver()
        saver.restore(sess, PATH_MODEL)

        # Topk Precision
        tf_acc5 = tf.reduce_mean(
            tf.cast(tf.nn.in_top_k(predictions=bea_model.out, targets=tf.reshape(bea_model.input_label, [-1]), k=5),
                    tf.float32))
        tf_acc100 = tf.reduce_mean(
            tf.cast(tf.nn.in_top_k(predictions=bea_model.out, targets=tf.reshape(bea_model.input_label, [-1]), k=100),
                    tf.float32))
        # MAP
        _, tf_map5 = tf.metrics.sparse_average_precision_at_k(predictions=bea_model.out,
                                                            labels=tf.reshape(bea_model.input_label, [-1]), k=5)
        _, tf_map = tf.metrics.sparse_average_precision_at_k(predictions=bea_model.out,
                                                           labels=tf.reshape(bea_model.input_label, [-1]), k=bea_model.pool_size)

        sess.run(tf.group(tf.local_variables_initializer()))
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        print("-----------validation begin----------")
        iter = 0
        try:
            while not coord.should_stop():
                batch_data = sess.run(data)
                iter += 1
                feed_dict = {
                    bea_model.input_other: batch_data['other'],
                    bea_model.input_item: batch_data['item'],
                    bea_model.input_label: batch_data['label'],
                    bea_model.input_weight: batch_data['weight'],
                }
                acc5, acc100, map5, map = sess.run([tf_acc5, tf_acc100, tf_map5, tf_map], feed_dict)
                print("iter: {}, acc5: {}, acc100: {}, map5: {}, map: {}".format(iter, acc5, acc100, map5, map))

        except tf.errors.OutOfRangeError:
            print('record limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)


if __name__ == '__main__':
    main()





