import logging
import os
import random
import sys
import time
import numpy as np
import tensorflow.compat.v1 as tf
from train import load_data, get_corrupted, disL2, save_data


def do_test(entity, relation, G, entity_data, relation_data, sample_size):
    rank, hits10 = 0, 0
    index = 1
    # print('ee',entity,G,entity_data,relation_data)
    # 从中取一部分样本进行测试，
    d = []
    for i in range(sample_size):
        seed = random.randint(0, len(G) - 1)
        d.append(G[seed])
    for triple in d:
        head, tail = [], []
        # print(111)
        for i in entity.values():
            h = int(i)
            t = triple[1]
            r = triple[2]
            head.append([h, t, r, disL2(entity_data[h], relation_data[r], entity_data[t])])
            h = triple[0]
            t = int(i)
            r = triple[2]
            tail.append([h, t, r, disL2(entity_data[h], relation_data[r], entity_data[t])])
        sorted_rank_head = sorted(head, key=lambda x: x[-1])
        sorted_rank_tail = sorted(tail, key=lambda x: x[-1])
        for i in range(len(sorted_rank_head)):
            if sorted_rank_head[i][0] == triple[0]:
                if i < 10:
                    hits10 += 1
                rank += i + 1
                break
        for i in range(len(sorted_rank_tail)):
            if sorted_rank_tail[i][1] == triple[1]:
                if i < 10:
                    hits10 += 1
                rank += i + 1
                break
        if index % 10 == 0:
            print('{}/{} rank {:.2f}, hits@10 {:.2f}%'.
                  format(index, sample_size, rank / (2 * index), 100 * hits10 / (2 * index)))
        index += 1
    rank = rank / (2 * sample_size)
    hits10 = hits10 / (2 * sample_size)
    print('mean_rank', rank, 'hits10', hits10)
    return rank, hits10


# def get_data(batch_size):
#     this_batch = np.array(random.sample(G, batch_size), dtype=np.int32)
#     a = np.array([get_corrupted(entity, q) for q in this_batch], dtype=np.int32)
#     data = {
#         # 注意元组格式0 1 2.。。
#         h_pos: [i[0] for i in this_batch],
#         r_pos: [i[2] for i in this_batch],
#         t_pos: [i[1] for i in this_batch],
#         h_neg: [i[0] for i in a],
#         t_neg: [i[1] for i in a],
#         r_neg: [i[2] for i in a]
#     }
#     return data


tf.disable_v2_behavior()
tf.reset_default_graph()
config = tf.ConfigProto()
config.allow_soft_placement = True  # 如果有在GPU无法运行的操作，就在CPU上运行
config.gpu_options.per_process_gpu_memory_fraction = 0.8  # 程序最多只能占用指定gpu50%的显存
config.gpu_options.allow_growth = True
entity, relation, G = load_data()
dim = 50
learning_rate = 0.01
dim = dim
margin = 1
epochs = 1000
batches = 100

# 同时输入多组数据，tensor必须大小足够
h_pos = tf.placeholder(dtype=tf.int32, name='h_pos', shape=[None])
t_pos = tf.placeholder(dtype=tf.int32, name='t_pos', shape=[None])
h_neg = tf.placeholder(dtype=tf.int32, name='h_neg', shape=[None])
t_neg = tf.placeholder(dtype=tf.int32, name='t_neg', shape=[None])
r_pos = tf.placeholder(dtype=tf.int32, name='r_neg', shape=[None])  # r只有一个也是一样的
r_neg = tf.placeholder(dtype=tf.int32, name='r_neg', shape=[None])
global_steps = tf.Variable(0, dtype=tf.int32, name="global_step", trainable=False)  # 没有梯度，也就不会修改
ent_embeddings = tf.get_variable(name="ent_embedding", shape=[len(entity), dim],
                                 initializer=tf.random_uniform_initializer(minval=-6 / np.sqrt(dim),
                                                                           maxval=6 / np.sqrt(dim)))
rel_embeddings = tf.Variable(tf.random_uniform(minval=-6 / np.sqrt(dim), maxval=6 / np.sqrt(dim),
                                               shape=[len(relation), dim]), name="rel_embedding")

# 这里就是仅仅让修改的地方有梯度，这样只会改动那里stop_gradient！！！！
# 这里不写仍然可以梯度下降
# ent_indices = tf.concat([h_pos, t_pos, h_neg, t_neg], 0)
# ent_indices = tf.reshape(ent_indices, [-1, 1])
# ent_value = tf.concat([tf.nn.embedding_lookup(ent_embeddings, h_pos),
#                        tf.nn.embedding_lookup(ent_embeddings, t_pos),
#                        tf.nn.embedding_lookup(ent_embeddings, h_neg),
#                        tf.nn.embedding_lookup(ent_embeddings, t_neg)], 0)
# part_ent_embeddings = tf.scatter_nd([ent_indices], [ent_value], ent_embeddings.shape)
# ent_embeddings_new = part_ent_embeddings + tf.stop_gradient(-part_ent_embeddings + ent_embeddings)
#
# h_pos_data = tf.nn.embedding_lookup(ent_embeddings_new, h_pos)
# t_pos_data = tf.nn.embedding_lookup(ent_embeddings_new, t_pos)
# h_neg_data = tf.nn.embedding_lookup(ent_embeddings_new, h_neg)
# t_neg_data = tf.nn.embedding_lookup(ent_embeddings_new, t_neg)
# r_data = tf.nn.embedding_lookup(rel_embeddings, r)
#
h_pos_data = tf.nn.embedding_lookup(ent_embeddings, h_pos)
t_pos_data = tf.nn.embedding_lookup(ent_embeddings, t_pos)
h_neg_data = tf.nn.embedding_lookup(ent_embeddings, h_neg)
t_neg_data = tf.nn.embedding_lookup(ent_embeddings, t_neg)
r_pos_data = tf.nn.embedding_lookup(rel_embeddings, r_pos)
r_neg_data = tf.nn.embedding_lookup(rel_embeddings, r_neg)

# 每一行进行规范化
# ent_embeddings=tf.nn.l2_normalize(ent_embeddings, dim = 1)
# tf.nn.l2_normalize 这个是对每一个值进行规范化
# 4x1,还是4;没啥影响
n1 = tf.reduce_sum(tf.square(h_pos_data + r_pos_data - t_pos_data), 1, keepdims=True)
n2 = tf.reduce_sum(tf.square(h_neg_data + r_neg_data - t_neg_data), 1, keepdims=True)
# 注意>0,而且是先取0，在reduce_sum；Maximum 支持广播
# TODO 草，损失函数写错了
loss = tf.reduce_sum(tf.maximum(n1 - n2 + margin, 0))
optimizer = tf.train.AdamOptimizer(learning_rate)
optimizer = optimizer.minimize(loss, var_list=[ent_embeddings, rel_embeddings], global_step=global_steps)
# 之后进行正则化；keep_dims=True，使结果维度不变
# norm = tf.sqrt(tf.reduce_sum(tf.square(ent_embeddings), 1, keep_dims=True))
# ent_embeddings = ent_embeddings / norm

# tf.summary.histogram('loss', loss)  # histogram是保存张量的
metric_loss = tf.summary.scalar("loss", loss)
# tf.summary.text('text', tf.convert_to_tensor(text))
saver = tf.train.Saver()
start_time = time.time()
with tf.Session(config=config) as sess, tf.device('/gpu:0'):
    # 查看是否能重新加载模型；没必要初始化了
    if 'save' not in os.listdir('.'):
        os.mkdir('save')
    if 'checkpoint' in os.listdir('save'):
        saver.restore(sess, "save/model.ckpt")
        print('reload done.')
    else:
        sess.run(tf.global_variables_initializer())
    merged = tf.summary.merge_all()  # 一定要先merge all
    writer = tf.summary.FileWriter('logs', sess.graph)
    batch_size = len(G) // batches
    # 创建计算loss的总体数据
    # pq = np.array(G, dtype=np.int32)
    # a = np.array([get_corrupted(entity, q) for q in pq], dtype=np.int32)
    # dd = {h_pos: pq[:, 0],
    #       r: pq[:, 1],
    #       t_pos: pq[:, 2],
    #       h_neg: a[:, 0],
    #       t_neg: a[:, 2]}
    for i in range(epochs):
        this_loss = 0
        for batch in range(batches):
            # this_batch = np.array(G[batch * batch_size:(batch + 1) * batch_size], dtype=np.int32)
            this_batch = np.array(random.sample(G, batch_size), dtype=np.int32)
            a = np.array([get_corrupted(entity, q) for q in this_batch], dtype=np.int32)
            data = {
                # 注意元组格式0 1 2.。。
                h_pos: this_batch[:, 0],
                r_pos: this_batch[:, 2],
                t_pos: this_batch[:, 1],
                h_neg: a[:, 0],
                t_neg: a[:, 1],
                r_neg: a[:, 2],
            }
            # data = get_data(batch_size)
            # 注意不要覆盖merged
            opt, l, entity_data, relation_data, summary, gs = sess.run(
                [optimizer, loss, ent_embeddings, rel_embeddings, merged, global_steps], feed_dict=data)
            # opt, l, entity_data, relation_data = sess.run([optimizer, loss, ent_embeddings, rel_embeddings],
            #                                               feed_dict=data)
            this_loss += l
            if (batch + 1) % 50 == 0:
                print('epoch {}/{}, batch {}/{}, use time {:.2f} second(s), loss {:.2f}'.
                      format(i + 1, epochs, batch + 1, batches, time.time() - start_time, this_loss))
        # 保存模型
        save_data(entity_data, relation_data, 'FB15k/entity_out_tf.bin', 'FB15k/relation_out_tf.bin')
        saver_path = saver.save(sess, "save/model.ckpt")
        print('model saved')
        # 计算loss并保存，这次使用的是全部的数据,获得总的loss
        # summary = sess.run(merged, feed_dict=dd)
        writer.add_summary(summary, gs)
        # writer.add_summary(this_loss, i+1)
        print('epoch {}/{}, use time {:.2f} second(s), loss {:.2f}'.
              format(i + 1, epochs, time.time() - start_time, this_loss))
        if (i + 1) % 5 == 0:
            print('start test')
            # 在训练集上测试，取一部分样本进行测试，加快速度
            do_test(entity, relation, np.array(G, dtype=np.int32), entity_data, relation_data, 40)
