import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from numpy import random
from torch.nn.parameter import Parameter
from utils import *


def get_batches(pairs, neighbors, batch_size):
    # 计算迭代次数，(452200+64-1)//64 = 7066，一轮训练64个
    n_batches = (len(pairs) + (batch_size - 1)) // batch_size

    for idx in range(n_batches):  # range(7066)
        x, y, t, neigh = [], [], [], []  # src, dst, type, neigh
        for i in range(batch_size):  # range(64)
            index = idx * batch_size + i
            if index >= len(pairs):  # >=542200
                break
            x.append(pairs[index][0])  # 中心词
            y.append(pairs[index][1])  # 上下文单词
            t.append(pairs[index][2])  # 边的类型
            neigh.append(neighbors[pairs[index][0]])  # src（中心词）的每种边类型的邻居节点，
        yield torch.tensor(x), torch.tensor(y), torch.tensor(t), torch.tensor(neigh)


class GATNEModel(nn.Module):
    # num_nodes=511, embedding_size=200, embedding_u_size=10, edge_type_count=2, dim_a=20, features=None
    def __init__(
            self, num_nodes, embedding_size, embedding_u_size, edge_type_count, dim_a, features
    ):
        super(GATNEModel, self).__init__()
        self.num_nodes = num_nodes
        self.embedding_size = embedding_size  # base嵌入维度，最终嵌入维度
        self.embedding_u_size = embedding_u_size  # 边嵌入维度
        self.edge_type_count = edge_type_count  # 边类别数量
        self.dim_a = dim_a  # 中间隐藏层数量=20
        # self.features和features不是同一个变量
        self.features = None  # 这里的是self.features
        if features is not None:  # 这里是传入的features变量
            self.features = features
            feature_dim = self.features.shape[-1]
            self.embed_trans = Parameter(torch.FloatTensor(feature_dim, embedding_size))
            self.u_embed_trans = Parameter(torch.FloatTensor(edge_type_count, feature_dim, embedding_u_size))
        else:
            # 初始化 base embedding,  parameter(511,200)，表示每个节点的初始嵌入维度，维度=200
            self.node_embeddings = Parameter(torch.FloatTensor(num_nodes, embedding_size))
            # 初始化 edge embedding, parameter(511,2,10)，表示每一个节点每条边的边嵌入初始值,维度=10
            self.node_type_embeddings = Parameter(
                torch.FloatTensor(num_nodes, edge_type_count, embedding_u_size)
            )
        # 定义（初始化）Mr矩阵，Parameter(2,10,200)，每个边类型下的Mr矩阵=[10,200]，最终嵌入使用的是MrT=[200,10]
        self.trans_weights = Parameter(
            torch.FloatTensor(edge_type_count, embedding_u_size, embedding_size)
        )
        # 计算attention中的Wr，Parameter(2,10,20),每个边类型下的Wr矩阵=[20，10]，最终嵌入使用的是Wr=[20,10]
        self.trans_weights_s1 = Parameter(
            torch.FloatTensor(edge_type_count, embedding_u_size, dim_a)
        )
        # 计算attention中的wr，Parameter(2,20,1),每个边类型下的wr矩阵=[20，1]，最终嵌入使用的是wrT=[1,20]
        self.trans_weights_s2 = Parameter(torch.FloatTensor(edge_type_count, dim_a, 1))
        # 参数初始化，限制初始数据类型和范围
        self.reset_parameters()

    def reset_parameters(self):
        if self.features is not None:
            self.embed_trans.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
            self.u_embed_trans.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
        else:
            # 均匀分布（-1，1），base embdeeding初始值，edge embedding初始值
            self.node_embeddings.data.uniform_(-1.0, 1.0)
            self.node_type_embeddings.data.uniform_(-1.0, 1.0)
        # 正态分布，Mr,Wr,wr
        self.trans_weights.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
        self.trans_weights_s1.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
        self.trans_weights_s2.data.normal_(std=1.0 / math.sqrt(self.embedding_size))

    def forward(self, train_inputs, train_types, node_neigh):
        if self.features is None:  # GATNE-T模型
            # 初始化并存储每个节点对应的base embedding，node_embed=Tensor(64,200)
            node_embed = self.node_embeddings[train_inputs]
            # 初始化并存储每个节点聚合了周围邻居之后的信息，edge embedding，Tensor(64,2,10,2,10)，
            # 表示64个节点2个类别，每个类别需要聚合10个邻居，然后聚合了2个类别，每个类别10个邻居
            node_embed_neighbors = self.node_type_embeddings[node_neigh]
        else:  # GATNE-I模型
            node_embed = torch.mm(self.features[train_inputs], self.embed_trans)
            node_embed_neighbors = torch.einsum('bijk,akm->bijam', self.features[node_neigh],
                                                self.u_embed_trans)  # 生成ui
        # 和视频不同，37：00
        # 转化形状，node_embed_tmp=Tensor(64,2,10,10)，表示64个节点，2个类别，聚合10个邻居，每个邻居的特征维度是10
        node_embed_tmp = torch.diagonal(node_embed_neighbors, dim1=1, dim2=3).permute(0, 3, 1, 2)
        # 对10个邻居进行求和，node_type_embed=Tensor(64,2,10)，表示聚合了邻居之后的edge embedding，
        # 表示64个节点，2个类别，每个类别的边嵌入edge embedding的维度是10
        node_type_embed = torch.sum(node_embed_tmp, dim=2)

        trans_w = self.trans_weights[train_types]  # Mr矩阵[2,10,200]，训练64个节点对应的边类型， Tensor(64,10,200)
        trans_w_s1 = self.trans_weights_s1[train_types]  # Wr=[2,10,20],Tensor(64,10,20)
        trans_w_s2 = self.trans_weights_s2[train_types]  # wr=[2,20,1],Tensor(64,20,1)

        # 计算注意力系数attention = air = [64,1,2]，表示
        attention = F.softmax(
            # 公式为softmax( wrT*tanh(Wr*Ui) )T
            torch.matmul(
                # node_type_embed=Ui=[64,2,10] , Wr=[64,10,20]，wrT=[64,20,1]
                # [64,10,20]*[64,2,10] =>[64,2,20]
                # [64,2,20]*[64,20,1] =>[64,2,1]
                torch.tanh(torch.matmul(node_type_embed, trans_w_s1)), trans_w_s2
            ).squeeze(2),
            dim=1,
        ).unsqueeze(1)

        # [64,1,2]*[64,2,10]，注意力系数和边嵌入相乘
        node_type_embed = torch.matmul(attention, node_type_embed)

        # 最终嵌入node_embed=[64,200]，节点嵌入，MrT = trans_w =[64,10,200]，node_type_embed=[64,2,10]，计算MrT*Ui
        # torch.matmul(node_type_embed, trans_w)=[64,1,200]
        node_embed = node_embed + torch.matmul(node_type_embed, trans_w).squeeze(1)  # base embedding + edge embedding

        # 归一化操作，L2操作，所有元素平方和之后开根号，dim=1，表示按每行来计算（当Tensor为2维时）
        last_node_embed = F.normalize(node_embed, dim=1)

        return last_node_embed


class NSLoss(nn.Module):
    def __init__(self, num_nodes, num_sampled, embedding_size):
        # 损失函数，num_nodes=511,num_sampled=5,embeding_size=200
        super(NSLoss, self).__init__()
        self.num_nodes = num_nodes
        self.num_sampled = num_sampled
        self.embedding_size = embedding_size
        # cj向量，Parameter(511,200)，表示511个节点的向量，cj=[200,1]，使用的是cjT=[1,200]
        self.weights = Parameter(torch.FloatTensor(num_nodes, embedding_size))
        # Tensor(511,)对节点进行初始化，这里计算得到的值是有大变小的，即负采样时采样经常出现的节点。因为index2word中节点是按照数量从大到小排序的，所以可以间接对应到
        self.sample_weights = F.normalize(
            torch.Tensor(
                [
                    (math.log(k + 2) - math.log(k + 1)) / math.log(num_nodes + 1)
                    for k in range(num_nodes)
                ]
            ),
            dim=0,
        )

        self.reset_parameters()

    def reset_parameters(self):
        # 正态分布
        self.weights.data.normal_(std=1.0 / math.sqrt(self.embedding_size))

    def forward(self, input, embs, label):
        # input为中心词，embs为嵌入，label为上下文词汇
        # 损失E为：-log(sigmoid(cjT*vir))-sum(log(sigmoid(-ckT*vir)))
        n = input.shape[0]  # n=64
        # 计算公式：log(sigmoid(cjT*vir))
        # torch.mul:对应位置相乘，log_target=Tensor(64,)，求embedding和label的损失函数
        # input_embedding*labels_embedding=[64,200]*[511,200]=>[61,200]
        # log_target=Tensor[64,]
        log_target = torch.log(
            torch.sigmoid(torch.sum(torch.mul(embs, self.weights[label]), 1))
        )

        # 负采样抽样函数，self.sample_weights是抽样权重
        # sample_weights是从大到小的概率值，因为index2word中节点是按照数量从大到小排序的，所以可以间接对应到
        # negs=[64,5]
        negs = torch.multinomial(
            self.sample_weights, self.num_sampled * n, replacement=True
        ).view(n, self.num_sampled)

        # 计算-ckT，noise=[64,5,200]
        noise = torch.neg(self.weights[negs])  # cj*-1

        # 计算sum(log(sigmoid(-ckT*vir)))
        # embs=[64,200],=>,embs.unsqueeze(2)=[64,200,1]
        # sum_log_sampled=Tensor(64,)
        sum_log_sampled = torch.sum(
            torch.log(torch.sigmoid(torch.bmm(noise, embs.unsqueeze(2)))), 1
        ).squeeze()

        # 相加后，乘以负号
        loss = log_target + sum_log_sampled
        return -loss.sum() / n


def train_model(network_data, feature_dic):
    # generate(network_data=train_data, args.num_walks=20, args.walk_length=10, args.schema=None, file_name,
    #                                               args.window_size=5, args.num_workers=16, args.walk_file=None
    # 生成随机游走训练序列和训练语料
    vocab, index2word, train_pairs = generate(network_data, args.num_walks, args.walk_length, args.schema, file_name,
                                              args.window_size, args.num_workers, args.walk_file)

    edge_types = list(network_data.keys())  # 边的类别
    num_nodes = len(index2word)  # 节点数量=512
    edge_type_count = len(edge_types)  # 边类别数量=2
    epochs = args.epoch  # 100
    batch_size = args.batch_size  # 64
    embedding_size = args.dimensions  # base embedding_size=200; 同时也是 节点类别的embedding_size
    embedding_u_size = args.edge_dim  # edge_embedding_size=10，边嵌入的维度
    u_num = edge_type_count  # 边类别数量=2
    num_sampled = args.negative_samples  # 负采样个数=5
    dim_a = args.att_dim  # 计算attention的中间变量维度=20
    att_head = 1
    neighbor_samples = args.neighbor_samples  # 10，聚合邻居的数量

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # num_nodes=511，edge_types=['1','2']，neighbor_samples=10
    neighbors = generate_neighbors(network_data, vocab, num_nodes, edge_types, neighbor_samples)

    features = None
    # 如果节点的特征（属性）是空的None
    if feature_dic is not None:  # GATNE-I
        feature_dim = len(list(feature_dic.values())[0])  # 特征长度
        print('feature dimension: ' + str(feature_dim))
        features = np.zeros((num_nodes, feature_dim), dtype=np.float32)  # feature array
        for key, value in feature_dic.items():
            if key in vocab:
                features[vocab[key].index, :] = np.array(value)
        features = torch.FloatTensor(features).to(device)  # 特征矩阵

    # 建立GATNE 模型
    # num_nodes=511, embedding_size=200, embedding_u_size=10, edge_type_count=2, dim_a=20, features=None
    model = GATNEModel(
        num_nodes, embedding_size, embedding_u_size, edge_type_count, dim_a, features
    )
    # 损失函数，num_nodes=511,num_sampled=5,embeding_size=200
    nsloss = NSLoss(num_nodes, num_sampled, embedding_size)

    model.to(device)  # device=cuda:0
    nsloss.to(device)
    # 优化函数
    optimizer = torch.optim.Adam(
        [{"params": model.parameters()}, {"params": nsloss.parameters()}], lr=1e-4
    )

    best_score = 0
    test_score = (0.0, 0.0, 0.0)
    patience = 0
    for epoch in range(epochs):
        random.shuffle(train_pairs)  # 打乱
        # yield生成器，在使用batches中的元素的时候才会计算，现在只是定义
        batches = get_batches(train_pairs, neighbors, batch_size)

        data_iter = tqdm(
            batches,
            desc="epoch %d" % (epoch),
            total=(len(train_pairs) + (batch_size - 1)) // batch_size,
            bar_format="{l_bar}{r_bar}",
        )
        avg_loss = 0.0

        # 跳转到get_batches函数中生成变量batches，再构成data_iter
        for i, data in enumerate(data_iter):
            optimizer.zero_grad()
            # 求出这轮epoch=64个节点的最终embedding，embs=[64,200]，进入forward函数
            embs = model(data[0].to(device), data[2].to(device), data[3].to(device), )  # device=cuda:0

            # 进入forward函数，计算loss
            loss = nsloss(data[0].to(device), embs, data[1].to(device))
            # loss反向传播
            loss.backward()
            optimizer.step()

            avg_loss += loss.item()

            if i % 5000 == 0:
                post_fix = {
                    "epoch": epoch,
                    "iter": i,
                    "avg_loss": avg_loss / (i + 1),
                    "loss": loss.item(),
                }
                data_iter.write(str(post_fix))

            # '''调试使用，只运行一轮'''
            # if i==0:
            #     break

        # 每个类别下节点的embedding
        final_model = dict(zip(edge_types, [dict() for _ in range(edge_type_count)]))
        # 遍历每个节点
        for i in range(num_nodes):  # range(511)
            # 节点在每个边类别下的embedding，train_input=Tensor(2,)
            train_inputs = torch.tensor([i for _ in range(edge_type_count)]).to(device)
            # 对应的边类型，train_types=(2,)
            train_types = torch.tensor(list(range(edge_type_count))).to(device)
            # 节点在每个类别下的neighbors,node_neigh=Tensor(2,2,10)，重复写一次，用于不同的边类型
            node_neigh = torch.tensor(
                [neighbors[i] for _ in range(edge_type_count)]
            ).to(device)

            # 模型训练
            # node_emb = model(train_inputs, train_types, node_neigh)  # 原版会报错
            # node_emb=[2,200]，表示每个边类别下的embedding
            node_emb = model(train_inputs, train_types, node_neigh.type(torch.int64))
            # 将每个节点在各个类别下的embedding导入到final_model中
            for j in range(edge_type_count):
                final_model[edge_types[j]][index2word[i]] = (
                    node_emb[j].cpu().detach().numpy()
                )

        # 测试工作
        valid_aucs, valid_f1s, valid_prs = [], [], []
        test_aucs, test_f1s, test_prs = [], [], []
        for i in range(edge_type_count):  # range(2)
            if args.eval_type == "all" or edge_types[i] in args.eval_type.split(","):
                tmp_auc, tmp_f1, tmp_pr = evaluate(
                    final_model[edge_types[i]],
                    valid_true_data_by_edge[edge_types[i]],
                    valid_false_data_by_edge[edge_types[i]],
                )

                valid_aucs.append(tmp_auc)
                valid_f1s.append(tmp_f1)
                valid_prs.append(tmp_pr)

                tmp_auc, tmp_f1, tmp_pr = evaluate(
                    final_model[edge_types[i]],
                    testing_true_data_by_edge[edge_types[i]],
                    testing_false_data_by_edge[edge_types[i]],
                )
                test_aucs.append(tmp_auc)
                test_f1s.append(tmp_f1)
                test_prs.append(tmp_pr)
        print("valid auc:", np.mean(valid_aucs))
        print("valid pr:", np.mean(valid_prs))
        print("valid f1:", np.mean(valid_f1s))

        average_auc = np.mean(test_aucs)
        average_f1 = np.mean(test_f1s)
        average_pr = np.mean(test_prs)

        cur_score = np.mean(valid_aucs)
        if cur_score > best_score:
            best_score = cur_score
            test_score = (average_auc, average_f1, average_pr)
            patience = 0
        else:
            patience += 1
            if patience > args.patience:
                print("Early Stopping")
                break
    return test_score


if __name__ == "__main__":
    args = parse_args()
    # args.input = r'F:\Desktop\论文学习笔记\代码\GATNE-master\data\amazon'
    args.input = r'F:\Desktop\论文学习笔记\代码\GATNE-master\data\example'
    # 节点特征（属性），使用GATNE-I方法
    args.features = r'F:\Desktop\论文学习笔记\代码\GATNE-master\data\example\feature.txt'
    file_name = args.input
    print(args)
    if args.features is not None:  # 每个节点对应到的特征（属性），在example中args.features为空，即使用GATNE-T方法
        feature_dic = load_feature_data(args.features)
    else:
        feature_dic = None

    # training_data_by_type = load_training_data(file_name + "/train.txt")
    # valid_true_data_by_edge, valid_false_data_by_edge = load_testing_data(
    #     file_name + "/valid.txt"
    # )
    # testing_true_data_by_edge, testing_false_data_by_edge = load_testing_data(
    #     file_name + "/test.txt"
    # )

    training_data_by_type = load_training_data(file_name + r"\train.txt")
    valid_true_data_by_edge, valid_false_data_by_edge = load_testing_data(
        file_name + r"\valid.txt"
    )
    testing_true_data_by_edge, testing_false_data_by_edge = load_testing_data(
        file_name + r"\test.txt"
    )

    average_auc, average_f1, average_pr = train_model(training_data_by_type, feature_dic)  # 进入训练函数，输入训练集和特征

    print("Overall ROC-AUC:", average_auc)
    print("Overall PR-AUC", average_pr)
    print("Overall F1:", average_f1)
