import math
# 3.5
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from numpy import random
from torch.nn.parameter import Parameter
import gc
from utils import *

def get_paths(x, paths, seq_length):
    # print("paths_value", paths)
    # print("contradict value", paths[9365])
    sequence_length = args.sequenceLength
    default_list = [-1] * args.sequenceLength
    path_data = list()
    seq_lengths = list()
    count = 0
    for ele in x:
        ele_value = int(ele.data)
        path = paths[ele_value]
        if ele_value in paths:
            count = count + 1
            path_len = len(path)
            if path_len > sequence_length:
                path_data.append(path[0: sequence_length])
            elif path_len == seq_length:
                path_data.append(path)
            elif path_len == 0:
                path_data.append(default_list)
            else:
                vacancy = sequence_length - path_len
                for i in range(0, vacancy):
                    path.append(-1)
                path_data.append(path)
        else:
            # print("default list")
            path_data.append(default_list)
        seq_lengths.append(seq_length)

    # print("count value", count)
    # now we have the raw list
    # path_data = np.reshape(path_data, (data_shape[0], data_shape[1]))
    path_data = np.array(path_data)
    seq_lengths = np.array(seq_lengths)
    myDevice = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    seq_lengths_tensor = torch.tensor(seq_lengths.tolist(), device=myDevice, dtype=torch.int64)
    path_tensor = torch.tensor(path_data.tolist(), device=myDevice, dtype=torch.int64)
    torch.cuda.empty_cache()
    del path_data
    del seq_lengths
    gc.collect()
    return path_tensor, seq_lengths_tensor

def get_batches(pairs, neighbors, batch_size):
    n_batches = (len(pairs) + (batch_size - 1)) // batch_size
    n_batches = int(n_batches)
    for idx in range(n_batches):
        x, y, t, neigh, path = [], [], [], [], []
        for i in range(int(batch_size)):
            index = idx * batch_size + i
            if index >= len(pairs):
                break
            x.append(pairs[index][0])
            y.append(pairs[index][1])
            t.append(pairs[index][2])
            neigh.append(neighbors[pairs[index][0]])
        x = np.array(x)
        y = np.array(y)
        t = np.array(t)
        neigh = np.array(neigh)
        myDevice = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        x_tensor = torch.tensor(x.tolist(), device=myDevice, dtype=torch.int64)
        y_tensor = torch.tensor(y.tolist(), device=myDevice, dtype=torch.int64)
        t_tensor = torch.tensor(t.tolist(), device=myDevice,dtype=torch.int64)
        neigh_tensor = torch.tensor(neigh.tolist(), device=myDevice, dtype=torch.int64)
        del x
        del y
        del t
        del neigh
        torch.cuda.empty_cache()
        gc.collect()
        yield x_tensor, y_tensor, t_tensor, neigh_tensor


class GATNEModel(nn.Module):
    def __init__(
            self, num_nodes, embedding_size, embedding_u_size, edge_type_count, dim_a, features,
            gru_hidden_size, gru_layer_num, sequence_length, batch_size
    ):
        super(GATNEModel, self).__init__()
        self.num_nodes = num_nodes
        self.embedding_size = embedding_size
        self.embedding_u_size = embedding_u_size
        self.edge_type_count = edge_type_count
        self.dim_a = dim_a
        self.sequence_length = sequence_length
        self.batch_size = batch_size
        self.use_last = True

        self.features = None
        if features is not None:
            self.features = features
            myDevice = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
            # last_feature = torch.tensor([[0.0, 0.0, 0.0]], device=myDevice)
            # shape = np.shape(self.features)
            # self.features = torch.cat([self.features, last_feature])
            feature_dim = self.features.shape[1]
            self.embed_trans = Parameter(torch.FloatTensor(feature_dim, embedding_size))
            self.u_embed_trans = Parameter(torch.FloatTensor(edge_type_count, feature_dim, embedding_u_size))
        else:
            self.node_embeddings = Parameter(torch.FloatTensor(num_nodes, embedding_size))
            self.node_type_embeddings = Parameter(
                torch.FloatTensor(num_nodes, edge_type_count, embedding_u_size)
            )
        self.trans_weights = Parameter(
            torch.FloatTensor(edge_type_count, embedding_u_size, embedding_size)
        )
        self.trans_weights_s1 = Parameter(
            torch.FloatTensor(edge_type_count, embedding_u_size, dim_a)
        )
        self.trans_weights_s2 = Parameter(torch.FloatTensor(edge_type_count, dim_a, 1))

        # add GRU part
        self.drop_en = nn.Dropout(p=0.3)
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.GRU_layer = nn.GRU(input_size=embedding_size, hidden_size=gru_hidden_size, num_layers=gru_layer_num, dropout=0.5,
                              batch_first=True, bidirectional=False)
        self.output_linear = nn.Linear(gru_hidden_size, embedding_size).cuda()
        self.GRU_layer.to(device)
        self.output_linear.to(device)
        self.hidden = None
        self.reset_parameters()

    def reset_parameters(self):
        if self.features is not None:
            self.embed_trans.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
            self.u_embed_trans.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
        else:
            self.node_embeddings.data.uniform_(-1.0, 1.0)
            self.node_type_embeddings.data.uniform_(-1.0, 1.0)
        self.trans_weights.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
        self.trans_weights_s1.data.normal_(std=1.0 / math.sqrt(self.embedding_size))
        self.trans_weights_s2.data.normal_(std=1.0 / math.sqrt(self.embedding_size))

    def forward(self, train_inputs, train_types, node_neigh, paths, lengths):
        # print(self.features)
        if self.features is None:
            node_embed = self.node_embeddings[train_inputs]
            node_embed_neighbors = self.node_type_embeddings[node_neigh]
        else:
            node_embed = torch.mm(self.features[train_inputs], self.embed_trans)
            node_embed_neighbors = torch.einsum('bijk,akm->bijam', self.features[node_neigh], self.u_embed_trans)
        node_embed_tmp = torch.cat(
            [
                node_embed_neighbors[:, i, :, i, :].unsqueeze(1)
                for i in range(self.edge_type_count)
            ],
            dim=1,
        )
        node_type_embed = torch.sum(node_embed_tmp, dim=2)

        trans_w = self.trans_weights[train_types]
        trans_w_s1 = self.trans_weights_s1[train_types]
        trans_w_s2 = self.trans_weights_s2[train_types]

        attention = F.softmax(
            torch.matmul(
                torch.tanh(torch.matmul(node_type_embed, trans_w_s1)), trans_w_s2
            ).squeeze(2),
            dim=1,
        ).unsqueeze(1)
        node_type_embed = torch.matmul(attention, node_type_embed)
        node_embed = node_embed + torch.matmul(node_type_embed, trans_w).squeeze(1)
        # we have two options:
        # 1.we get a empty tensor with full dimension and give value to it
        # 2.we get a tensor with no dimension and stake new list to it
        sequence = self.features[paths]
        sequence_embed = torch.einsum("abc,cd->abd", sequence, self.embed_trans)
        packed_output, ht = self.GRU_layer(sequence_embed, None)
        out_rnn, _ = packed_output, ht
        row_indices = torch.arange(0, paths.size(0)).long()
        col_indices = lengths - 1
        if next(self.parameters()).is_cuda and isinstance(col_indices, int) is False:
            row_indices = row_indices.cuda()
            col_indices = col_indices.cuda()

        self.drop_en(out_rnn)
        if self.use_last:
            last_tensor = out_rnn[row_indices, col_indices, :]
        else:
            # use mean
            last_tensor = out_rnn[row_indices, :, :]
            last_tensor = torch.mean(last_tensor, dim=1)
        last_tensor = F.normalize(last_tensor, dim=1)
        node_embed = self.drop_en(node_embed)
        last_node_embed = F.normalize(node_embed, dim=1)
        last_node_embed = last_node_embed + last_tensor
        del train_inputs
        del train_types
        del node_neigh
        del paths
        del sequence
        gc.collect()
        return last_node_embed


class NSLoss(nn.Module):
    def __init__(self, num_nodes, num_sampled, embedding_size):
        super(NSLoss, self).__init__()
        self.num_nodes = num_nodes
        self.num_sampled = num_sampled
        self.embedding_size = embedding_size
        self.weights = Parameter(torch.FloatTensor(num_nodes, embedding_size))
        self.sample_weights = F.normalize(
            torch.Tensor(
                [
                    (math.log(k + 2) - math.log(k + 1)) / math.log(num_nodes + 1)
                    for k in range(num_nodes)
                ]
            ),
            dim=0,
        )

        self.reset_parameters()

    def reset_parameters(self):
        self.weights.data.normal_(std=1.0 / math.sqrt(self.embedding_size))

    def forward(self, input, embs, label):
        n = input.shape[0]
        log_target = torch.log(
            torch.sigmoid(torch.sum(torch.mul(embs, self.weights[torch.Tensor.long(label)]), 1))
        )
        negs = torch.multinomial(
            self.sample_weights, self.num_sampled * n, replacement=True
        ).view(n, self.num_sampled)
        noise = torch.neg(self.weights[torch.Tensor.long(negs)])
        sum_log_sampled = torch.sum(
            torch.log(torch.sigmoid(torch.bmm(noise, embs.unsqueeze(2)))), 1
        ).squeeze()

        loss = log_target + sum_log_sampled
        return -loss.sum() / n


def train_model(network_data, feature_map, raw_paths):
    vocab, index2word, train_pairs, neo_paths = generate(network_data, args.num_walks, args.walk_length, args.schema, file_name, args.window_size, args.num_workers, args.walk_file, raw_paths)

    # print("generated paths", neo_paths)
    edge_types = list(network_data.keys())
    num_nodes = len(index2word)
    edge_type_count = len(edge_types)
    epochs = args.epoch
    batch_size = args.batch_size
    embedding_size = args.dimensions
    embedding_u_size = args.edge_dim
    u_num = edge_type_count
    num_sampled = args.negative_samples
    dim_a = args.att_dim
    att_head = 1
    neighbor_samples = args.neighbor_samples
    gru_layer_num = args.gruHiddenLayerNum
    gru_hidden_size = args.gruHiddenSize
    sequence_length = args.sequenceLength

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    neighbors = generate_neighbors(network_data, vocab, num_nodes, edge_types, neighbor_samples)

    features = None
    if feature_dic is not None:
        feature_dim = len(list(feature_dic.values())[0])
        # print('feature dimension: ' + str(feature_dim))
        features = np.zeros((num_nodes, feature_dim), dtype=np.float64)
        # print("feature map", feature_map)
        for key, value in feature_map.items():
            if key in vocab:
                features[vocab[key].index, :] = np.array(value)
        features = torch.FloatTensor(features).to(device)

    model = GATNEModel(
        num_nodes, embedding_size, embedding_u_size, edge_type_count, dim_a, features,
        gru_hidden_size, gru_layer_num, sequence_length, batch_size
    )
    nsloss = NSLoss(num_nodes, num_sampled, embedding_size)
    model.to(device)
    nsloss.to(device)

    optimizer = torch.optim.Adam(
        [{"params": model.parameters()}, {"params": nsloss.parameters()}], lr=1e-4
    )

    best_score = 0
    test_score = (0.0, 0.0, 0.0)
    patience = 0
    for epoch in range(epochs):
        random.shuffle(train_pairs)
        batches = get_batches(train_pairs, neighbors, batch_size)

        data_iter = tqdm(
            batches,
            desc="epoch %d" % (epoch),
            total=(len(train_pairs) + (batch_size - 1)) // batch_size,
            bar_format="{l_bar}{r_bar}",
        )
        avg_loss = 0.0

        for i, data in enumerate(data_iter):
            optimizer.zero_grad()
            input_paths, lengths = get_paths(data[0], neo_paths, sequence_length)
            embs = model(data[0].to(device), data[2].to(device), data[3].to(device), input_paths, lengths)
            loss = nsloss(data[0].to(device), embs, data[1].to(device))
            loss.backward()
            optimizer.step()

            avg_loss += loss.item()

            if i % 10 == 0:
                post_fix = {
                    "epoch": epoch,
                    "iter": i,
                    "avg_loss": avg_loss / (i + 1),
                    "loss": loss.item(),
                }
                fo = open("dim.txt", 'a+')
                fo.writelines(str(post_fix) + "\n")
                fo.close()
                data_iter.write(str(post_fix))

        final_model = dict(zip(edge_types, [dict() for _ in range(edge_type_count)]))
        for i in range(num_nodes):
            train_inputs = torch.tensor([i for _ in range(edge_type_count)], device=device)
            train_types = torch.tensor(list(range(edge_type_count)), device=device)
            node_neigh = torch.tensor([neighbors[i] for _ in range(edge_type_count)], device=device)
            train_paths, train_lengths = get_paths(train_inputs, neo_paths, sequence_length)
            node_emb = model(train_inputs, train_types, node_neigh, train_paths,train_lengths)
            # print("embed", node_emb)
            for j in range(edge_type_count):
                final_model[edge_types[j]][index2word[i]] = (
                    node_emb[j].cpu().detach().numpy()
                )

        valid_aucs, valid_f1s, valid_prs = [], [], []
        test_aucs, test_f1s, test_prs = [], [], []
        for i in range(edge_type_count):
            if args.eval_type == "all" or edge_types[i] in args.eval_type.split(","):
                tmp_auc, tmp_f1, tmp_pr = evaluate(
                    final_model[edge_types[i]],
                    valid_true_data_by_edge[edge_types[i]],
                    valid_false_data_by_edge[edge_types[i]],
                )
                valid_aucs.append(tmp_auc)
                valid_f1s.append(tmp_f1)
                valid_prs.append(tmp_pr)

                tmp_auc, tmp_f1, tmp_pr = evaluate(
                    final_model[edge_types[i]],
                    testing_true_data_by_edge[edge_types[i]],
                    testing_false_data_by_edge[edge_types[i]],
                )
                test_aucs.append(tmp_auc)
                test_f1s.append(tmp_f1)
                test_prs.append(tmp_pr)
        fo = open("valid.txt", 'a+')
        fo.writelines("valid auc:" + str(np.mean(valid_aucs)) + "\n")
        fo.writelines("valid pr:" + str(np.mean(valid_prs)) + "\n")
        fo.writelines("valid f1:" + str(np.mean(valid_f1s)) + "\n")
        fo.close()
        print("valid auc:", np.mean(valid_aucs))
        print("valid pr:", np.mean(valid_prs))
        print("valid f1:", np.mean(valid_f1s))

        average_auc = np.mean(test_aucs)
        average_f1 = np.mean(test_f1s)
        average_pr = np.mean(test_prs)

        cur_score = np.mean(valid_aucs)
        if cur_score > best_score:
            best_score = cur_score
            test_score = (average_auc, average_f1, average_pr)
            patience = 0
        else:
            patience += 1
            if patience > args.patience:
                print("Early Stopping")
                break
        del train_inputs
        del input_paths
        del train_types
        del node_neigh
        del node_emb
        torch.cuda.empty_cache()
        gc.collect()
    return test_score


if __name__ == "__main__":
    args = parse_args()
    file_name = args.input
    print(args)
    if args.features is not None:
        feature_dic = load_feature_data(args.features)
    else:
        feature_dic = None

    training_data_by_type, paths = load_training_data(file_name + "/train.txt", file_name + "/paths.txt")
    valid_true_data_by_edge, valid_false_data_by_edge = load_testing_data(
        file_name + "/valid.txt"
    )
    testing_true_data_by_edge, testing_false_data_by_edge = load_testing_data(
        file_name + "/test.txt"
    )

    average_auc, average_f1, average_pr = train_model(training_data_by_type, feature_dic, paths)

    print("Overall ROC-AUC:", average_auc)
    print("Overall PR-AUC", average_pr)
    print("Overall F1:", average_f1)
