# coding:utf-8
import argparse
import math
import timeit
import numpy as np
import tensorflow as tf
import multiprocessing as mp
from knowlege_graph import KnowledgeGraph
from datetime import datetime
import os

def main():
    # args setting
    parser = argparse.ArgumentParser(description='DKRL(CNN)+TransE')
    parser.add_argument('--data_dir', type=str, default='data/FB15k/')
    parser.add_argument('--pretrain_wv_file',type=str,default='data/word2vec/wordvec_100d.npy')
    parser.add_argument('--embedding_dim', type=int, default=100)
    parser.add_argument('--word_embedding_dim', type=int, default=100)
    parser.add_argument('--conv1_height',type=int,default=2)
    parser.add_argument('--conv1_stride',type=int,default=1)
    parser.add_argument('--conv1_deep',type=int,default=100)
    parser.add_argument('--conv2_height',type=int,default=2)
    parser.add_argument('--conv2_stride',type=int,default=1)
    parser.add_argument('--margin_value', type=float, default=1.0)
    parser.add_argument('--score_func', type=str, default='L1')
    parser.add_argument('--batch_size', type=int, default=2400)
    parser.add_argument('--learning_rate', type=float, default=0.1)
    parser.add_argument('--ckpt_dir', type=str, default='ckpt/')
    parser.add_argument('--summary_dir', type=str, default='summary/')
    parser.add_argument('--max_epoch', type=int, default=500)
    parser.add_argument('--eval_freq', type=int, default=10)
    parser.add_argument('--eval_type', type=str, default='stru')
    args = parser.parse_args()
    print('='*20+'In DRKL Model'+'='*20)
    print(args)
    # data setting
    kg = KnowledgeGraph(data_dir=args.data_dir,has_text=True)
    pretrain_wv = np.load(args.pretrain_wv_file)
    kge_model = DKRL(embedding_dim=args.embedding_dim,word_dim=args.word_embedding_dim,pretrain_wv=pretrain_wv,margin_value=args.margin_value,conv_height=(args.conv1_height,args.conv2_height),conv1_deep=args.conv1_deep,
                    conv_stride=(args.conv1_stride,args.conv2_stride),score_func=args.score_func, batch_size=args.batch_size, learning_rate=args.learning_rate,
                     eval_type=args.eval_type,kg=kg)
    saver = tf.train.Saver()
    now = datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
    model_path = os.path.join('.','model_save',now)
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    ckpt_prefix = os.path.join(model_path, "model")
    gpu_config = tf.GPUOptions(allow_growth=True)
    sess_config = tf.ConfigProto(gpu_options=gpu_config)
    with tf.Session(config=sess_config) as sess:
        print('-----Initializing tf graph-----')
        sess.run(tf.global_variables_initializer())
        print('-----Initialization accomplished-----')
        kge_model.check_norm(session=sess)
        summary_writer = tf.summary.FileWriter(logdir=args.summary_dir, graph=sess.graph)
        for epoch in range(args.max_epoch):
            print('=' * 30 + '[EPOCH {}]'.format(epoch) + '=' * 30)
            kge_model.launch_training(session=sess, summary_writer=summary_writer)
            if (epoch + 1) % args.eval_freq == 0:
                kge_model.launch_evaluation(session=sess, summary_writer=summary_writer)
                saver.save(sess,ckpt_prefix,global_step=epoch)

class DKRL:
    def __init__(self,
                 embedding_dim, word_dim,pretrain_wv,margin_value,conv_height,conv1_deep,conv_stride,score_func,
                 batch_size, learning_rate, eval_type, kg=KnowledgeGraph()):
        self.kg = kg
        self.embedding_dim = embedding_dim
        self.word_dim = word_dim
        self.pretrain_wv = pretrain_wv
        self.word_num = kg.word_num
        self.margin_value = margin_value
        self.conv1_height = conv_height[0]
        self.conv2_height = conv_height[1]
        self.conv1_deep = conv1_deep
        self.conv2_deep = self.embedding_dim
        self.conv1_stride = conv_stride[0]
        self.conv2_stride = conv_stride[1]
        self.score_func = score_func    #L1 or L2
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.eval_type = eval_type
        self.build_graph()
        self.build_eval_graph()

    def build_graph(self):
        '''placeholder ops'''
        self.triple_pos = tf.placeholder(dtype=tf.int32, shape=[None, 3])
        self.triple_neg = tf.placeholder(dtype=tf.int32, shape=[None, 3])
        self.word_ids_pos = tf.placeholder(dtype=tf.int32,shape=[None,2,self.kg.sequence_max_length])
        self.word_ids_neg = tf.placeholder(dtype=tf.int32,shape=[None,2,self.kg.sequence_max_length])
        self.margin = tf.placeholder(dtype=tf.float32, shape=[None])
        '''output ops'''
        self.loss = None
        self.train_step = None
        self.global_step = None
        self.summary_merge = None
        '''variable ops'''
        self.entity_embedding = None
        self.relation_embedding = None
        self.word_embedding = None
        self.conv1_weights = None
        self.conv1_bias = None
        self.conv2_weights = None
        self.conv2_bias = None
        with tf.name_scope('structure'):
            head_pos_embedding,tail_pos_embedding,\
            rel_pos_embedding,head_neg_embedding,tail_neg_embedding,rel_neg_embedding = self.structure_subgraph()
        with tf.name_scope('description'):
            dhead_pos_embedding, dtail_pos_embedding, dhead_neg_embedding, dtail_neg_embedding = self.description_subgraph()
        with tf.name_scope('loss'):
            with tf.name_scope('pos'):
                pos_loss = self.calculate_loss(head_pos_embedding,tail_pos_embedding,rel_pos_embedding,dhead_pos_embedding,dtail_pos_embedding)
            with tf.name_scope('neg'):
                neg_loss = self.calculate_loss(head_neg_embedding,tail_neg_embedding,rel_neg_embedding,dhead_neg_embedding,dtail_neg_embedding)
            self.loss = tf.reduce_sum(tf.nn.relu(self.margin + pos_loss - neg_loss), name='max_margin_loss')
            tf.summary.scalar(name=self.loss.op.name, tensor=self.loss)
        with tf.name_scope('train_step'):
            optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
            self.global_step = tf.Variable(initial_value=0, trainable=False, name='global_step')
            self.train_step = optimizer.minimize(self.loss, global_step=self.global_step)
            self.summary_merge = tf.summary.merge_all()

    def calculate_loss(self, head_embedding,tail_embedding,relation_embedding,desc_head_embedding,desc_tail_embedding):
        loss_ss = self.calculate_distance(head_embedding+relation_embedding-tail_embedding)
        loss_dd = self.calculate_distance(desc_head_embedding+relation_embedding-desc_tail_embedding)
        loss_ds = self.calculate_distance(desc_head_embedding+relation_embedding-tail_embedding)
        loss_sd = self.calculate_distance(head_embedding+relation_embedding-desc_tail_embedding)
        loss = loss_ss+loss_dd+loss_ds+loss_sd
        return loss

    def calculate_distance(self,embedding):
        if self.score_func == 'L1':
            distance = tf.reduce_sum(tf.abs(embedding),axis=1)
        else:
            distance = tf.reduce_sum(tf.square(embedding),axis=1)
        return distance

    def structure_subgraph(self):
        '''embeddings'''
        bound = 6 / math.sqrt(self.embedding_dim)
        with tf.variable_scope('embedding'):
            _entity_embedding = tf.get_variable(name='entity',
                                                    shape=[self.kg.entity_number, self.embedding_dim],
                                                    initializer=tf.random_uniform_initializer(minval=-bound,
                                                                                              maxval=bound))
            _relation_embedding = tf.get_variable(name='relation',
                                                      shape=[self.kg.relation_number, self.embedding_dim],
                                                      initializer=tf.random_uniform_initializer(minval=-bound,
                                                                                                maxval=bound))
        with tf.name_scope('normalization'):
            self.entity_embedding = tf.nn.l2_normalize(_entity_embedding, dim=1)
            self.relation_embedding = tf.nn.l2_normalize(_relation_embedding, dim=1)
            tf.summary.histogram(name=self.entity_embedding.op.name, values=self.entity_embedding)
            tf.summary.histogram(name=self.relation_embedding.op.name, values=self.relation_embedding)
        with tf.name_scope('lookup'):
            head_pos = tf.nn.embedding_lookup(self.entity_embedding, self.triple_pos[:, 0])
            tail_pos = tf.nn.embedding_lookup(self.entity_embedding, self.triple_pos[:, 1])
            relation_pos = tf.nn.embedding_lookup(self.relation_embedding, self.triple_pos[:, 2])
            head_neg = tf.nn.embedding_lookup(self.entity_embedding, self.triple_neg[:, 0])
            tail_neg = tf.nn.embedding_lookup(self.entity_embedding, self.triple_neg[:, 1])
            relation_neg = tf.nn.embedding_lookup(self.relation_embedding, self.triple_neg[:, 2])
        return head_pos,tail_pos,relation_pos,head_neg,tail_neg,relation_neg

    def description_subgraph(self):
        with tf.variable_scope('embedding'):
            pad_embedding = tf.constant(shape=[1,self.word_dim],value=[0]*self.word_dim,dtype=tf.float32)
            _word_embedding = tf.get_variable(name='word',initializer=self.pretrain_wv[1:])
            cast_word_embedding = tf.cast(_word_embedding,dtype=tf.float32)
            self.word_embedding = tf.concat([pad_embedding,cast_word_embedding],axis=0)
        with tf.name_scope('lookup'):
            head_wv_pos = tf.nn.embedding_lookup(self.word_embedding,self.word_ids_pos[:,0])
            tail_wv_pos = tf.nn.embedding_lookup(self.word_embedding,self.word_ids_pos[:,1])
            head_wv_neg = tf.nn.embedding_lookup(self.word_embedding,self.word_ids_neg[:,0])
            tail_wv_neg = tf.nn.embedding_lookup(self.word_embedding,self.word_ids_neg[:,1])
        with tf.variable_scope('cnn_parameters'):
            self.conv1_weights = tf.get_variable('conv1_weights',[self.conv1_height,self.word_dim,1,self.conv1_deep],initializer=tf.truncated_normal_initializer)
            self.conv1_bias = tf.get_variable('conv1_bias',[self.conv1_deep],initializer=tf.constant_initializer(0))
            self.conv2_weights = tf.get_variable('conv2_weights',[self.conv2_height,1,1,self.conv2_deep],initializer=tf.truncated_normal_initializer)
            self.conv2_bias = tf.get_variable('conv2_bias',[self.conv2_deep],initializer=tf.constant_initializer(0))
        with tf.name_scope('cnn'):
            head_pos = self.cnn_infer(head_wv_pos)
            tail_pos = self.cnn_infer(tail_wv_pos)
            head_neg = self.cnn_infer(head_wv_neg)
            tail_neg = self.cnn_infer(tail_wv_neg)
        return head_pos,tail_pos,head_neg,tail_neg

    def cnn_infer(self,input):
        input = tf.expand_dims(input,-1)
        with tf.name_scope('layer1_conv'):
            conv1 = tf.nn.conv2d(input,self.conv1_weights,strides=[1,self.conv1_stride,1,1],padding='VALID',name='conv1')
            relu1 = tf.nn.relu(tf.nn.bias_add(conv1,self.conv1_bias))
        with tf.name_scope('layer2_pool'):
            relu1_transpose = tf.transpose(relu1,perm=[0,3,2,1])
            # pool1 = tf.nn.max_pool(relu1,ksize=[1,relu1.get_shape().as_list()[1],1,1],strides=[1,1,1,1],padding='VALID')
            k = 1
            k_max_pool1,_ = tf.nn.top_k(relu1_transpose,k=k,sorted=False)
            pool1 = tf.reshape(k_max_pool1,[-1,self.conv1_deep*k,1,1])
        with tf.name_scope('layer3_conv'):
            conv2 = tf.nn.conv2d(pool1,self.conv2_weights,strides=[1,self.conv2_stride,1,1],padding='VALID',name='conv2')
            relu2 = tf.nn.relu(tf.nn.bias_add(conv2,self.conv2_bias))
        with tf.name_scope('layer4_pool'):
            pool2 = tf.nn.avg_pool(relu2,ksize=[1,relu2.get_shape().as_list()[1],1,1],strides=[1,1,1,1],padding='VALID')
            _pool2 = tf.reshape(pool2,[-1,self.conv2_deep])
        with tf.name_scope('normalization'):
            output = tf.nn.l2_normalize(_pool2, dim=1)
        return output

    def build_eval_graph(self):
        # get all entity's descrition embedding by cnn
        self.all_desc_embedding = None
        with tf.name_scope('get_all_emb'):
            all_word_ids = tf.constant(self.kg.get_all_word_ids())
            all_wv = tf.nn.embedding_lookup(self.word_embedding, all_word_ids)
            self.all_desc_embedding = self.cnn_infer(all_wv)

        self.eval_triple = tf.placeholder(dtype=tf.int32, shape=[3])
        self.all_desc_embedding_ph = tf.placeholder(dtype=tf.float32,shape=[self.kg.entity_number,self.embedding_dim])
        self.idx_head_prediction = None
        self.idx_tail_prediction = None
        self.raw_ave_rank = None
        self.filter_ave_rank = None
        self.raw_hit10 = None
        self.filter_hit10 = None
        with tf.name_scope('evaluation'):
            with tf.name_scope('lookup'):
                relation_embedding = tf.nn.embedding_lookup(self.relation_embedding, self.eval_triple[2])
                if self.eval_type=='desc':
                    desc_head_embedding = tf.nn.embedding_lookup(self.all_desc_embedding_ph,self.eval_triple[0])
                    desc_tail_embedding = tf.nn.embedding_lookup(self.all_desc_embedding_ph,self.eval_triple[1])
                else:
                    head_embedding = tf.nn.embedding_lookup(self.entity_embedding, self.eval_triple[0])
                    tail_embedding = tf.nn.embedding_lookup(self.entity_embedding, self.eval_triple[1])
            with tf.name_scope('distance'):
                if self.eval_type=='desc':
                    distance_head_prediction = self.all_desc_embedding_ph + relation_embedding - desc_tail_embedding
                    distance_tail_prediction = desc_head_embedding + relation_embedding - self.all_desc_embedding_ph
                    print('='*20+'use descreption embedding to evaluate'+'='*20)
                else:
                    distance_head_prediction = self.entity_embedding + relation_embedding - tail_embedding
                    distance_tail_prediction = head_embedding + relation_embedding - self.entity_embedding
                    print('=' * 20 + 'use structure embedding to evaluate' + '=' * 20)
            with tf.name_scope('rank'):
                if self.score_func == 'L1':  # L1 score
                    _, self.idx_head_prediction = tf.nn.top_k(tf.reduce_sum(tf.abs(distance_head_prediction), axis=1),
                                                         k=self.kg.entity_number)
                    _, self.idx_tail_prediction = tf.nn.top_k(tf.reduce_sum(tf.abs(distance_tail_prediction), axis=1),
                                                         k=self.kg.entity_number)
                else:  # L2 score
                    _, self.idx_head_prediction = tf.nn.top_k(tf.reduce_sum(tf.square(distance_head_prediction), axis=1),
                                                         k=self.kg.entity_number)
                    _, self.idx_tail_prediction = tf.nn.top_k(tf.reduce_sum(tf.square(distance_tail_prediction), axis=1),
                                                         k=self.kg.entity_number)
            #创建输出排序结果的ops
            with tf.variable_scope('eval_result'):
                self.raw_ave_rank = tf.get_variable('raw_avg_rank',shape=[],dtype=tf.float32,trainable=False,initializer=tf.constant_initializer(0))
                self.filter_ave_rank = tf.get_variable('filter_avg_rank',shape=[],dtype=tf.float32,trainable=False,initializer=tf.constant_initializer(0))
                self.raw_hit10 = tf.get_variable('raw_hit10',shape=[],dtype=tf.float32,trainable=False,initializer=tf.constant_initializer(0))
                self.filter_hit10 = tf.get_variable('filter_hit10',shape=[],dtype=tf.float32,trainable=False,initializer=tf.constant_initializer(0))
                tf.summary.scalar(self.raw_ave_rank.op.name,tensor=self.raw_ave_rank)
                tf.summary.scalar(self.filter_ave_rank.op.name,tensor=self.filter_ave_rank)
                tf.summary.scalar(self.raw_hit10.op.name,tensor=self.raw_hit10)
                tf.summary.scalar(self.filter_hit10.op.name,tensor=self.filter_hit10)
                self.eval_summary_merge = tf.summary.merge([tf.get_collection(tf.GraphKeys.SUMMARIES,'evaluation/eval_result')])
    def launch_training(self, session, summary_writer):
        # create child processes to generate training batch
        generator_process_number = 24
        raw_batch_queue = mp.Queue()
        training_batch_queue = mp.Queue()
        for _ in range(generator_process_number):
            mp.Process(target=self.kg.generate_training_batch, kwargs={'in_queue': raw_batch_queue,
                                                                       'out_queue': training_batch_queue}).start()
        print('-----Start training-----')
        start = timeit.default_timer()
        n_batch = 0
        for raw_batch in self.kg.next_raw_batch(self.batch_size):
            raw_batch_queue.put(raw_batch)
            n_batch += 1
        for _ in range(generator_process_number):
            raw_batch_queue.put(None)
        print('-----Constructing training batches-----')
        epoch_loss = 0
        used_triple_number = 0
        # Note:Training is on GPU, generating training batch is on CPU
        for i in range(n_batch):
            batch_pos, batch_neg ,batch_words_pos, batch_words_neg = training_batch_queue.get()
            batch_loss, _, summary = session.run(fetches=[self.loss, self.train_step, self.summary_merge],
                                                 feed_dict={self.triple_pos: batch_pos,
                                                            self.triple_neg: batch_neg,
                                                            self.word_ids_pos:batch_words_pos,
                                                            self.word_ids_neg:batch_words_neg,
                                                            self.margin: [self.margin_value] * len(batch_pos)})
            summary_writer.add_summary(summary, global_step=self.global_step.eval(session=session))
            epoch_loss += batch_loss
            used_triple_number += len(batch_pos)
            num = max(int(n_batch / 100), 1)
            if (i+1)%num==0:
                print('[{:.3f}s] #triple: {}/{} triple_avg_loss: {:.6f}'.format(timeit.default_timer() - start,
                                                                            used_triple_number,
                                                                            self.kg.training_triple_number,
                                                                            batch_loss / len(batch_pos)))
        print()
        print('epoch loss: {:.3f}'.format(epoch_loss))
        print('cost time: {:.3f}s'.format(timeit.default_timer() - start))
        print('-----Finish training-----')
        self.check_norm(session=session)

    def launch_evaluation(self, session,summary_writer):
        if self.eval_type=='desc':
            # get all entity's descrition embedding by cnn
            all_desc_embedding = session.run(self.all_desc_embedding)
        else:
            all_desc_embedding = np.zeros(shape=[self.kg.entity_number,self.embedding_dim],dtype=np.float32)

        rank_calculator_num = 24
        eval_result_queue = mp.JoinableQueue()
        rank_result_queue = mp.Queue()
        print('-----Start evaluation-----')
        for _ in range(rank_calculator_num):
            mp.Process(target=self.calculate_rank, kwargs={'in_queue': eval_result_queue,'out_queue': rank_result_queue}).start()
        start = timeit.default_timer()
        used_eval_triple_num = 0
        for eval_triple in self.kg.test_triples:
            idx_head_prediction, idx_tail_prediction = session.run(fetches=[self.idx_head_prediction,
                                                                            self.idx_tail_prediction],
                                                                   feed_dict={self.eval_triple: eval_triple,self.all_desc_embedding_ph:all_desc_embedding})
            eval_result_queue.put((eval_triple, idx_head_prediction, idx_tail_prediction))
            used_eval_triple_num += 1
            if used_eval_triple_num%100==0:
                print('[{:.3f}s] #evaluation triple: {}/{}'.format(timeit.default_timer() - start,
                                                               used_eval_triple_num,
                                                               self.kg.test_triple_number))
        print()
        for _ in range(rank_calculator_num):
            eval_result_queue.put(None)
        print('-----Joining all rank calculator-----')
        # block itself until all child processes finishing rank calculating
        eval_result_queue.join()
        print('-----All rank calculation accomplished-----')
        print('-----Obtaining evaluation results-----')
        '''Raw'''
        head_meanrank_raw = 0
        head_hits10_raw = 0
        tail_meanrank_raw = 0
        tail_hits10_raw = 0
        '''Filter'''
        head_meanrank_filter = 0
        head_hits10_filter = 0
        tail_meanrank_filter = 0
        tail_hits10_filter = 0
        for _ in range(used_eval_triple_num):
            head_rank_raw, tail_rank_raw, head_rank_filter, tail_rank_filter = rank_result_queue.get()
            head_meanrank_raw += head_rank_raw
            if head_rank_raw < 10:
                head_hits10_raw += 1
            tail_meanrank_raw += tail_rank_raw
            if tail_rank_raw < 10:
                tail_hits10_raw += 1
            head_meanrank_filter += head_rank_filter
            if head_rank_filter < 10:
                head_hits10_filter += 1
            tail_meanrank_filter += tail_rank_filter
            if tail_rank_filter < 10:
                tail_hits10_filter += 1
        print('-----Raw-----')
        head_meanrank_raw = (head_meanrank_raw*1.0)/used_eval_triple_num
        head_hits10_raw = (head_hits10_raw*1.0)/used_eval_triple_num
        tail_meanrank_raw = (tail_meanrank_raw*1.0)/used_eval_triple_num
        tail_hits10_raw = (tail_hits10_raw*1.0)/used_eval_triple_num
        print('-----Head prediction-----')
        print('MeanRank: {:.3f}, Hits@10: {:.3f}'.format(head_meanrank_raw, head_hits10_raw))
        print('-----Tail prediction-----')
        print('MeanRank: {:.3f}, Hits@10: {:.3f}'.format(tail_meanrank_raw, tail_hits10_raw))
        print('------Average------')
        print('MeanRank: {:.3f}, Hits@10: {:.3f}'.format((head_meanrank_raw + tail_meanrank_raw) / 2,
                                                         (head_hits10_raw + tail_hits10_raw) / 2))
        print('-----Filter-----')
        head_meanrank_filter = (head_meanrank_filter*1.0)/used_eval_triple_num
        head_hits10_filter = (head_hits10_filter*1.0)/used_eval_triple_num
        tail_meanrank_filter = (tail_meanrank_filter*1.0)/used_eval_triple_num
        tail_hits10_filter = (tail_hits10_filter*1.0)/used_eval_triple_num
        print('-----Head prediction-----')
        print('MeanRank: {:.3f}, Hits@10: {:.3f}'.format(head_meanrank_filter, head_hits10_filter))
        print('-----Tail prediction-----')
        print('MeanRank: {:.3f}, Hits@10: {:.3f}'.format(tail_meanrank_filter, tail_hits10_filter))
        print('-----Average-----')
        print('MeanRank: {:.3f}, Hits@10: {:.3f}'.format((head_meanrank_filter + tail_meanrank_filter) / 2,
                                                         (head_hits10_filter + tail_hits10_filter) / 2))
        print('cost time: {:.3f}s'.format(timeit.default_timer() - start))
        tf.assign(self.raw_ave_rank,(head_meanrank_raw + tail_meanrank_raw) / 2).eval(session=session)
        tf.assign(self.filter_ave_rank,(head_meanrank_filter + tail_meanrank_filter) / 2).eval(session=session)
        tf.assign(self.raw_hit10,(head_hits10_raw + tail_hits10_raw)*50).eval(session=session)
        tf.assign(self.filter_hit10,(head_hits10_filter + tail_hits10_filter)*50).eval(session=session)
        eval_summary_merge = session.run(fetches=self.eval_summary_merge)
        summary_writer.add_summary(eval_summary_merge, global_step=self.global_step.eval(session=session))
        print('-----Finish evaluation-----')

    def calculate_rank(self, in_queue, out_queue):
        while True:
            idx_predictions = in_queue.get()
            if idx_predictions is None:
                in_queue.task_done()
                return
            else:
                eval_triple, idx_head_prediction, idx_tail_prediction = idx_predictions
                head, tail, relation = eval_triple
                head_rank_raw = 0
                tail_rank_raw = 0
                head_rank_filter = 0
                tail_rank_filter = 0
                for candidate in idx_head_prediction[::-1]:
                    if candidate == head:
                        break
                    else:
                        head_rank_raw += 1
                        if (candidate, tail, relation) in self.kg.all_triples:
                            continue
                        else:
                            head_rank_filter += 1
                for candidate in idx_tail_prediction[::-1]:
                    if candidate == tail:
                        break
                    else:
                        tail_rank_raw += 1
                        if (head, candidate, relation) in self.kg.all_triples:
                            continue
                        else:
                            tail_rank_filter += 1
                out_queue.put((head_rank_raw, tail_rank_raw, head_rank_filter, tail_rank_filter))
                in_queue.task_done()

    def check_norm(self, session):
        print('-----Check norm-----')
        entity_embedding = self.entity_embedding.eval(session=session)
        relation_embedding = self.relation_embedding.eval(session=session)
        entity_norm = np.linalg.norm(entity_embedding, ord=2, axis=1)
        relation_norm = np.linalg.norm(relation_embedding, ord=2, axis=1)
        print('entity norm: {} relation norm: {}'.format(entity_norm, relation_norm))

if __name__ == '__main__':
    main()