# -*- coding: UTF-8 -*-
import os
import time
import torch
import argparse
import numpy as np

import utils
import config
import discriminator
import generator

from BFS_trees import BFS_trees
from evalution import link_prediction as lp
from evalution import node_classification as nc
from manifolds.hyperboloid import Hyperboloid
from manifolds.euclidean import Euclidean
from manifolds.poincare import PoincareBall

torch.backends.cudnn.deterministic = True


class GraphGan(object):
    def __init__(self, args):
        # 初始化基本参数
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.app = args.app
        self.data = args.data
        self.keep_prob = args.keep_prob

        if self.data == 'CA-GrQc':
            self.train_filename = config.train_filename
            self.test_filename = config.test_filename
            self.result_filename = config.result_filename
            self.emb_filenames = config.emb_filenames
            self.cache_filename = config.cache_filename
        else:
            if self.app == 'link-prediction':
                if args.manifold == 'Euclidean':
                    self.result_filename = f'../results/LP2/{self.data}/{self.data}_result_{self.keep_prob}.pt'
                    self.emb_filenames = [f'../results/LP2/{self.data}/{self.data}_gen_{self.keep_prob}.emb',
                                          f'../results/LP2/{self.data}/{self.data}_dis_{self.keep_prob}.emb']
                else:
                    self.result_filename = f'../results/LP/{self.data}/{self.data}_result_{self.keep_prob}.pt'
                    self.emb_filenames = [f'../results/LP/{self.data}/{self.data}_gen_{self.keep_prob}.emb',
                                          f'../results/LP/{self.data}/{self.data}_dis_{self.keep_prob}.emb']

                self.train_filename = f'../data/LP/{self.data}/{self.data}_train_ei_{self.keep_prob}.pt'
                self.test_filename = f'../data/LP/{self.data}/{self.data}_test_ei_{self.keep_prob}.pt'
                self.label_filename = f'../data/LP/{self.data}/{self.data}_test_label_{self.keep_prob}.pt'
                self.cache_filename = f'../cache/LP/{self.data}/{self.data}_{self.keep_prob}.pt'
            elif self.app == 'node-classification':
                if args.manifold == 'Euclidean':
                    self.result_filename = f'../results/NC2/{self.data}/{self.data}_result_{self.keep_prob}.pt'
                    self.emb_filenames = [f'../results/NC2/{self.data}/{self.data}_gen_{self.keep_prob}.emb',
                                          f'../results/NC2/{self.data}/{self.data}_dis_{self.keep_prob}.emb']
                else:
                    self.result_filename = f'../results/NC/{self.data}/{self.data}_result_{self.keep_prob}.pt'
                    self.emb_filenames = [f'../results/NC/{self.data}/{self.data}_gen_{self.keep_prob}.emb',
                                          f'../results/NC/{self.data}/{self.data}_dis_{self.keep_prob}.emb']

                if self.data in ["cora", "citeseer", "pubmed"]:
                    self.train_filename = f'../data/NC/'
                    self.test_filename = None
                    self.label_filename = None
                else:
                    self.train_filename = f'../data/NC/{self.data}/{self.data}_edge_index.pt'
                    self.test_filename = None
                    self.label_filename = f'../data/NC/{self.data}/{self.data}_label.pt'
                self.cache_filename = f'../cache/NC/{self.data}/{self.data}.pt'

        print('Application is ' + str(self.app))
        print('Dataset is ' + str(self.data))
        print('Keep prob is ' + str(self.keep_prob))
        print('Device is ' + str(self.device))
        print('Basic setting initialized.\n')

        # 初始化模型的流形和曲率
        if args.manifold == 'Euclidean':
            self.manifold = Euclidean()
        elif args.manifold == 'PoincareBall':
            self.manifold = PoincareBall()
        else:
            self.manifold = Hyperboloid()
        self.curvature = None
        self.curvature = args.curvature

        print('Manifold is ' + str(args.manifold))
        print('Curvature is ' + str(self.curvature))
        print('Manifold initialized.\n')

        # 初始化图
        self.n_emb = config.n_emb
        self.n_node = None
        self.n_class = None
        self.graph = None
        self.root_nodes = None
        if self.data == 'CA-GrQc':
            self.n_node, self.graph = utils.read_edges(self.train_filename, self.test_filename)
        else:
            if self.app == 'link-prediction':
                self.n_node, self.graph = utils.load_data_lp(self.train_filename, self.test_filename)
            elif self.app == 'node-classification':
                if self.data in ["cora", "citeseer", "pubmed"]:
                    self.n_node, self.n_class, self.graph, self.label = utils.load_data_nc(self.train_filename,
                                                                                           self.data)
                else:
                    self.n_node, self.n_class, self.graph, self.label = utils.load_data_nc(self.train_filename,
                                                                                           self.data,
                                                                                           self.label_filename)
        self.root_nodes = [i for i in range(self.n_node)]

        print('Dataset is ' + str(args.data))
        print('The number of nodes is ' + str(self.n_node))
        if self.app == 'node-classification':
            print('The number of classes is ' + str(self.n_class))
        print('Graph initialized.\n')

        # 初始化节点表征
        if self.data == 'CA-GrQc':
            self.node_embed_init_d = utils.read_embeddings(config.pretrain_emb_filename_d, self.n_node, config.n_emb)
            self.node_embed_init_g = utils.read_embeddings(config.pretrain_emb_filename_g, self.n_node, config.n_emb)
        else:
            self.node_embed_init_d = torch.Tensor(self.n_node, config.n_emb)
            torch.nn.init.xavier_uniform_(self.node_embed_init_d, gain=1)
            self.node_embed_init_d = self.node_embed_init_d.numpy()
            self.node_embed_init_g = torch.Tensor(self.n_node, config.n_emb)
            torch.nn.init.xavier_uniform_(self.node_embed_init_g, gain=1)
            self.node_embed_init_g = self.node_embed_init_g.numpy()

        print('Node embedding initialized.\n')

        # 初始化 BFS-trees
        self.BFS_trees = BFS_trees(self.root_nodes, self.graph, self.cache_filename, batch_num=config.cache_batch)

        # 初始化 GraphGAN 模型
        if self.data == 'CA-GrQc':
            self.generator = generator.Generator(self.n_node, self.node_embed_init_g, self.manifold, self.curvature)
            self.discriminator = discriminator.Discriminator(self.n_node, self.node_embed_init_d, self.manifold,
                                                             self.curvature)
        else:
            self.generator = generator.Generator(self.n_node, self.node_embed_init_g, self.manifold, self.curvature)
            self.discriminator = discriminator.Discriminator(self.n_node, self.node_embed_init_d, self.manifold,
                                                             self.curvature)

        print('Model GraphGAN initialized.\n')

    def train(self):
        self.write_embeddings_to_file()
        self.evaluation(self, note='\npretrained embedding\n')

        print("start training...")
        for epoch in range(config.n_epochs):
            since = time.time()
            print(f"epoch {epoch} begin")

            # 保存模型
            torch.save({
                'epoch': epoch,
                'optimizerD_state_dict': self.discriminator.optimizer.state_dict(),
                'optimizerG_state_dict': self.generator.optimizer.state_dict(),
                'lossD': self.discriminator.d_loss,
                'lossG': self.generator.g_loss
            }, config.model_log + 'model.checkpoint_{}'.format(epoch) + '.tar')

            # 训练 d_epoch 轮次的判别器
            center_nodes = []
            neighbor_nodes = []
            labels = []
            for d_epoch in range(config.n_epochs_dis):
                since_d = time.time()
                print(
                    'discriminator epoch ' + str(d_epoch) + ': memory allocated ' + str(torch.cuda.memory_allocated()))
                if d_epoch % config.dis_interval == 0:
                    center_nodes, neighbor_nodes, labels = self.prepare_data_for_d()
                print('discriminator epoch ' + str(d_epoch) + ': data prepared ')

                loss = 0.0
                train_size = len(center_nodes)
                start_list = list(range(0, train_size, config.batch_size_dis))
                np.random.shuffle(start_list)
                for start in start_list:
                    end = start + config.batch_size_dis
                    loss = self.discriminator.train(center_nodes[start:end], neighbor_nodes[start:end],
                                                    labels[start:end])
                loss = loss / len(start_list)
                print('discriminator epoch ' + str(d_epoch) + ': mean loss ' + str(loss))
                time_elapsed_d = time.time() - since_d
                print('discriminator epoch ' + str(d_epoch) + ': complete in {:.0f}m {:.0f}s'.format(
                    time_elapsed_d // 60, time_elapsed_d % 60))

            # 训练 g_epoch 轮次的生成器
            node_1 = []
            node_2 = []
            reward = []
            for g_epoch in range(config.n_epochs_gen):
                since_g = time.time()
                print('generator epoch ' + str(g_epoch) + ': memory allocated ' + str(torch.cuda.memory_allocated()))
                print(g_epoch, "check memory: ", torch.cuda.memory_allocated())
                if g_epoch % config.gen_interval == 0:
                    node_1, node_2, reward = self.prepare_data_for_g()
                print('generator epoch ' + str(g_epoch) + ': data prepared ')

                loss = 0.0
                train_size = len(node_1)
                start_list = list(range(0, train_size, config.batch_size_gen))
                np.random.shuffle(start_list)
                for start in start_list:
                    end = start + config.batch_size_gen
                    loss += self.generator.train(node_1[start:end], node_2[start:end], reward[start:end])
                loss = loss / len(start_list)
                print('generator epoch ' + str(g_epoch) + ': mean loss is ' + str(loss))
                time_elapsed_g = time.time() - since_g
                print('generator epoch ' + str(g_epoch) + ': complete in {:.0f}m {:.0f}s'.format(
                    time_elapsed_g // 60, time_elapsed_g % 60))

            # 更新生成器的 all_score
            with torch.no_grad():
                self.generator.forward()

            self.write_embeddings_to_file()
            self.evaluation(self, note='epoch ' + str(epoch) + '\n')
            time_elapsed = time.time() - since
            print('Epoch ' + str(epoch) + ' complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
        print("training completes")

    def prepare_data_for_d(self):
        """generate positive and negative samples for the discriminator, and record them in the txt file"""
        print("prepare_data_for_d")
        center_nodes = []
        neighbor_nodes = []
        labels = []
        for i in self.root_nodes:
            if np.random.rand() < config.update_ratio:
                pos = self.graph[i]
                neg, _ = self.sample(i, self.BFS_trees.get_tree(i), len(pos), for_d=True)
                if len(pos) != 0 and neg is not None:
                    # positive samples
                    center_nodes.extend([i] * len(pos))
                    neighbor_nodes.extend(pos)
                    labels.extend([1] * len(pos))

                    # negative samples
                    center_nodes.extend([i] * len(pos))
                    neighbor_nodes.extend(neg)
                    labels.extend([0] * len(neg))
        return center_nodes, neighbor_nodes, labels

    def prepare_data_for_g(self):
        """sample nodes for the generator"""
        print("prepare_data_for_g")
        paths = []
        for i in self.root_nodes:
            if np.random.rand() < config.update_ratio:
                sample, paths_from_i = self.sample(i, self.BFS_trees.get_tree(i), config.n_sample_gen, for_d=False)
                if paths_from_i is not None:
                    paths.extend(paths_from_i)

        node_pairs = list(map(self.get_node_pairs_from_path, paths))

        node_1 = []
        node_2 = []
        for i in range(len(node_pairs)):
            for pair in node_pairs[i]:
                node_1.append(pair[0])
                node_2.append(pair[1])

        # reward = self.sess.run(self.discriminator.reward,
        #                        feed_dict={self.discriminator.node_id: np.array(node_1),
        #                                   self.discriminator.node_neighbor_id: np.array(node_2)})
        reward = None
        node_size = len(node_1)
        start_list = list(range(0, node_size, config.batch_size_prepare_data_for_g))
        for start in start_list:
            end = start + config.batch_size_prepare_data_for_g
            if end > node_size:
                end = node_size
            with torch.no_grad():
                tmp_reward = self.discriminator.forward(node_1[start:end], node_2[start:end])
            if reward is None:
                reward = tmp_reward
            else:
                reward = torch.cat((reward, tmp_reward), dim=0)
        print('node_size = ', node_size)
        print('reward.shape = ', reward.shape)

        return node_1, node_2, reward

    def sample(self, root, tree, sample_num, for_d):
        """ sample nodes from BFS-tree

        Args:
            root: int, root node
            tree: dict, BFS-tree
            sample_num: the number of required samples
            for_d: bool, whether the samples are used for the generator or the discriminator
        Returns:
            samples: list, the indices of the sampled nodes
            paths: list, paths from the root to the sampled nodes
        """

        # all_score = self.sess.run(self.generator.all_score)
        # all_score is a matrix with shape [n_node, n_node]
        # all_score = self.generator.all_score
        all_score = self.generator.get_all_score()

        samples = []
        paths = []
        n = 0

        while len(samples) < sample_num:
            current_node = root
            previous_node = -1
            paths.append([])
            is_root = True
            paths[n].append(current_node)
            while True:
                node_neighbor = tree[current_node][1:] if is_root else tree[current_node]
                # print("////", tree[current_node])
                is_root = False
                if len(node_neighbor) == 0:  # the tree only has a root
                    return None, None
                if for_d:  # skip 1-hop nodes (positive samples)
                    if node_neighbor == [root]:
                        # in current version, None is returned for simplicity
                        return None, None
                    if root in node_neighbor:
                        node_neighbor.remove(root)

                relevance_probability = all_score[current_node][node_neighbor]
                relevance_probability = relevance_probability.cpu().detach().numpy()
                relevance_probability = utils.softmax(relevance_probability)
                next_node = np.random.choice(node_neighbor, size=1, p=relevance_probability)[0]  # select next node
                paths[n].append(next_node)
                if next_node == previous_node:  # terminating condition
                    samples.append(current_node)
                    break
                previous_node = current_node
                current_node = next_node
            n = n + 1  # n equal to sample_num
        return samples, paths  # for each sample, we get one path from root to that sample

    @staticmethod
    def get_node_pairs_from_path(path):
        """
        given a path from root to a sampled node, generate all the node pairs within the given windows size
        e.g., path = [1, 0, 2, 4, 2], window_size = 2 -->
        node pairs= [[1, 0], [1, 2], [0, 1], [0, 2], [0, 4], [2, 1], [2, 0], [2, 4], [4, 0], [4, 2]]
        :param path: a path from root to the sampled node
        :return pairs: a list of node pairs
        """

        path = path[:-1]
        pairs = []
        for i in range(len(path)):
            center_node = path[i]
            for j in range(max(i - config.window_size, 0), min(i + config.window_size + 1, len(path))):
                if i == j:
                    continue
                node = path[j]
                pairs.append([center_node, node])
        return pairs

    def write_embeddings_to_file(self):
        """write embeddings of the generator and the discriminator to files"""
        modes = [self.generator, self.discriminator]
        for i in range(2):
            embedding_matrix = modes[i].embedding_matrix
            embedding_matrix = embedding_matrix.detach().to('cpu').numpy()
            index = np.array(range(self.n_node)).reshape(-1, 1)
            embedding_matrix = np.hstack([index, embedding_matrix])
            embedding_list = embedding_matrix.tolist()
            embedding_str = [str(int(emb[0])) + "\t" + "\t".join([str(x) for x in emb[1:]]) + "\n"
                             for emb in embedding_list]
            with open(self.emb_filenames[i], "w+") as f:
                lines = [str(self.n_node) + "\t" + str(self.n_emb) + "\n"] + embedding_str
                f.writelines(lines)

    @staticmethod
    def evaluation(self, note=None):
        results = []
        if note is not None:
            results.append(note)
        if self.app == "link-prediction":
            for i in range(2):
                if self.data == 'CA-GrQc':
                    lpe = lp.LinkPredictEval(self.emb_filenames[i], config.test_filename,
                                             self.n_node, config.n_emb, config.test_neg_filename)
                else:
                    lpe = lp.LinkPredictEval(self.emb_filenames[i], self.test_filename, self.n_node, self.n_emb,
                                             label_filename=self.label_filename)
                accuracy, auc_score = lpe.eval_link_prediction()
                results.append(
                    config.modes[i] + ": accuracy is " + str(accuracy) + ", auc_score is " + str(auc_score) + "\n")
        elif self.app == 'node-classification':
            for i in range(2):
                for j in np.arange(0.1, 1, 0.1):
                    nce = nc.NodeClassificationEval(self.emb_filenames[i], self.label, self.n_node, self.n_emb,
                                                    self.n_class, split=j, time=10)
                    accuracy, micro_score, macro_score = nce.eval_node_classification()
                    results.append(config.modes[i] + "(split={" + str(j) + "}): accuracy is " + str(
                        accuracy) + ", micro_score is " + str(micro_score) + ", macro_score is " + str(
                        macro_score) + "\n")

        with open(self.result_filename, mode="a+") as f:
            f.writelines(results)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='GraphGAN')

    parser.add_argument('--app', type=str, default='link-prediction', help='choose application for GraphGAN.')
    parser.add_argument('--data', type=str, default='CA-GrQc', help='choose dataset for GraphGAN.')
    parser.add_argument("--keep_prob", type=float, choices=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], default=0.2)
    parser.add_argument('--manifold', type=str, default='Hyperboloid', help='choose manifold for GraphGAN.')
    parser.add_argument('--curvature', type=float, default=1.0, help='curvature for manifold.')

    args = parser.parse_args()

    graph_gan = GraphGan(args)
    graph_gan.train()
