from __future__ import division
from __future__ import print_function

import argparse
import random
import time
from collections import defaultdict
import numpy as np
import csv
import scipy.sparse as sp
import torch
import dgl
from torch import optim

from model import NewGCN
from optimizer import loss_function_relation_semi
from utils import pro_adj, pos_graph_to_edge, neg_graph_to_edge, get_adj_train, get_roc_score
from buildGraph import get_graph, read_src_dst_weight

torch.manual_seed(0)
random.seed(0)
np.random.seed(0)

dataset_folder = '../data/'

parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=float, default=0.01, help='Initial learning rate.')
parser.add_argument('--dropout', type=float, default=0.1, help='Dropout rate (1 - keep probability).')

args = parser.parse_args()

##device = ("cuda:0" if torch.cuda.is_available() else "cpu")
device = 'cpu'
print('device: ', device)


def gae_for(args, iter):
    'Load features!'
    graph = get_graph()
    print('graph: ', graph)

    # split train/val/test 训练 验证 测试 523

    src3, dst3, s2s_weight = read_src_dst_weight(dataset_folder + "edge/s2s/src3_dst3_s2s_weight_0.csv", 's-s')

    src_dst_dic = dict(zip(src3, dst3))
    print(src_dst_dic)

    random.shuffle(src3)
    # 固定种子
    ratio1 = 0.5
    ratio2 = 0.4
    offset1 = int(len(src3) * ratio1)
    offset2 = int(len(src3) * ratio2)
    train_src = src3[:offset1]
    val_test_src = src3[offset1:]
    val_src = val_test_src[:offset2]
    test_src = val_test_src[offset2:]

    train_dst = []
    val_dst = []
    test_dst = []

    for i in range(len(train_src)):
        train_dst.append(src_dst_dic[train_src[i]])

    for i in range(len(val_src)):
        val_dst.append(src_dst_dic[val_src[i]])

    for i in range(len(test_src)):
        test_dst.append(src_dst_dic[test_src[i]])
    print("train_src", train_src)
    print("train_dst", train_dst)
    print("val_src", val_src)
    print("val_dst", val_dst)
    print("test_src", test_src)
    print("test_dst", test_dst)

    train_eids = graph.edge_ids(train_src, train_dst, etype='s-s')
    val_eids = graph.edge_ids(val_src, val_dst, etype='s-s')
    test_eids = graph.edge_ids(test_src, test_dst, etype='s-s')

    print('train_eids: ', train_eids)  # [7, 5, 1, 3]
    print('val_eids: ', val_eids)  # [4, 2, 0]
    print('test_eids: ', test_eids)  # [8, 6]
    # print('length of val_eids: ', len(val_eids))
    # print('length of test_eids: ', len(test_eids))

    # get train_graph and val_graph
    train_graph = graph.clone()
    train_graph.remove_edges(torch.cat([val_eids, test_eids]), etype='s-s')  # 相当于新建的图,EID不是在原图上的序号
    # print('training graph: ')
    # print(train_graph)

    val_graph = graph.clone()
    val_graph.remove_edges(test_eids, etype='s-s')
    # print('val graph: ')
    # print(val_graph)

    neg_num = 1
    pos_weight = 1 / neg_num + 1

    # 为了在同构图上的一组节点 s-s 上训练 2 层 GNN 进行节点分类，其中每个节点分别从第一、第二层的 20\20个邻居获取消息（假设后端是 PyTorch）：
    sampler = dgl.dataloading.MultiLayerNeighborSampler([20, 20])
    neg_sampler = dgl.dataloading.negative_sampler.Uniform(neg_num)

    # print("sampler", sampler)

    # test_neg_sampler = test_NegativeSampler(graph, n_test_negs)
    # test_train_neg_sampler = test_NegativeSampler(graph, n_test_negs)

    # DGL排序器，将边及其计算依赖关系组合在一个小批中，用于训练带有邻域抽样的单个图上的边分类、边回归或链接预测。

    train_collator = dgl.dataloading.EdgeCollator(
        train_graph, {'s-s': train_graph.edge_ids(train_src, train_dst, etype='s-s')}, sampler,
        negative_sampler=neg_sampler)

    val_collator = dgl.dataloading.EdgeCollator(
        val_graph, {'s-s': val_graph.edge_ids(val_src, val_dst, etype='s-s')}, sampler,
        negative_sampler=neg_sampler)
    test_collator = dgl.dataloading.EdgeCollator(
        graph, {('step', 's-s', 'step'): test_eids}, sampler,
        negative_sampler=neg_sampler)
    # 主要是 数据 使用minibatch训练
    train_edgeloader = torch.utils.data.DataLoader(
        train_collator.dataset, collate_fn=train_collator.collate,
        batch_size=4, shuffle=True, drop_last=False, num_workers=0)
    val_edgeloader = torch.utils.data.DataLoader(
        val_collator.dataset, collate_fn=val_collator.collate,
        batch_size=4, shuffle=False, drop_last=False, num_workers=0)
    test_edgeloader = torch.utils.data.DataLoader(
        test_collator.dataset, collate_fn=test_collator.collate,
        batch_size=4, shuffle=False, drop_last=False, num_workers=0)

    # print('# of batches in train_edgeloader: ', len(train_edgeloader))
    # print('# of batches in val_edgeloader: ', len(val_edgeloader))
    # print('# of batches in test_edgeloader: ', len(test_edgeloader))

    for input_nodes, pos_pair_graph, neg_pair_graph, blocks in train_edgeloader:
        # print("blocks:", blocks)
        # pos_pair_graph [0, 3, 2, 1] 不一定
        break
    for test_input_nodes, test_pos_pair_graph, test_neg_pair_graph, test_blocks in test_edgeloader:
        # test_pos_pair_graph [8, 6]
        break
    for val_input_nodes, val_positive_graph, val_negative_graph, val_blocks in val_edgeloader:
        # val_positive_graph [4, 2, 0]
        break

    model = NewGCN(args.dropout, graph).to(device)

    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    print('Now start training...')
    hidden_emb = None
    for epoch in range(50):
        train_start = time.time()

        for input_nodes, positive_graph, negative_graph, blocks in train_edgeloader:

            model = model.train()
            blocks = [b.to(device) for b in blocks]

            input_recipe = blocks[0].srcdata['features']['recipe']
            input_instr = blocks[0].srcdata['features']['step']
            input_ingredient = blocks[0].srcdata['features']['ingredient']

            # print("input_instrsrc",input_instr)
            # print("input_instr_dst",blocks[0].dstdata['features']['step'])
            # print("input_instr_edge",blocks[0].edges)

            # print("len(input_instr)",len(input_instr))
            index = []
            step_num = len(graph.ndata['features']['step'])
            print("train step_num:", step_num)

            for i in range(step_num):  # 14
                index.append(i)
            node_list = blocks[0].ndata[dgl.NID]['step'].tolist()  # block_node_list
            block_node_num = len(node_list)
            print("train block_node_num:", block_node_num)

            for i in range(step_num):
                if i not in node_list:
                    node_list.append(i)
                    print("train node_append:", i)
            node_dic = dict(zip(node_list, index))  # nodes of all

            train_edge_list = train_eids.tolist()

            tag_nodes = pro_adj(src_dst_dic, node_dic)  # nodes of all
            adj_train = get_adj_train(node_dic, train_edge_list)  # 生成对应的邻接矩阵，0:11 1:5...具体顺序参照blocks[
            # 0].ndata[dgl.NID]['step']

            adj = adj_train
            adj_label = adj_train + sp.eye(adj_train.shape[0])
            adj_norm = adj.shape[0] * adj.shape[0] / float(adj.shape[0] * adj.shape[0] - adj.sum())
            # pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()

            val_edge_pos, edge_num = pos_graph_to_edge(node_dic, val_positive_graph)  # 原节点，而非打乱顺序的
            val_edge_neg = neg_graph_to_edge(node_dic, val_negative_graph, edge_num)

            input_features = [input_recipe, input_instr, input_ingredient]

            recovered, mu, logvar = model(blocks, input_features, model)

            pred = np.zeros((step_num, step_num), dtype=np.float64)
            for i in range(step_num):
                for j in range(step_num):
                    if i < block_node_num and j < block_node_num:
                        pred[i][j] = recovered[i][j]

            list=np.zeros((1, mu.shape[1]), dtype=np.float64).tolist()
            mu_list = mu.tolist()
            for i in range(step_num-block_node_num):
                mu_list.append(list[0])
            hidden_emb=np.array(mu_list)

            # print("recovered", recovered)
            # 输出recovered
            f = open('F:\\recovered.csv', 'w', encoding='utf-8', newline='')

            (m, n) = recovered.shape
            print(recovered.shape)
            sum = 0
            for i in range(m):
                for j in range(n):
                    w = []
                    if recovered[i][j] != 0:
                        w.append(i)
                        w.append(j)
                        w.append(recovered[i][j])
                        sum = sum + recovered[i][j]
                        csv_writer = csv.writer(f)
                        csv_writer.writerow(w)
            f.close()

            loss = loss_function_relation_semi(preds=pred, labels=adj_label,  # 是最初的adj_train 含有01的矩阵
                                               mu=torch.from_numpy(mu), logvar=torch.from_numpy(logvar),
                                               n_nodes=recovered.shape[0], norm=adj_norm, pos_weight=pos_weight,
                                               tags_nodes=tag_nodes)  # tag_nodes就是标记了先序的方阵

            loss.requires_grad_(True)
            loss.backward()  # 反向传播进行训练
            cur_loss = loss.item()
            optimizer.step()

            acc_score, p, r, f1, roc_curr, map_curr = get_roc_score(hidden_emb, tag_nodes, val_edge_pos, val_edge_neg)

            print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(cur_loss),
                  "roc_curr=", "{:.5f}".format(roc_curr),
                  "val_ap=", "{:.5f}".format(map_curr),
                  "acc_score=", "{:.5f}".format(acc_score),
                  "f1=", "{:.5f}".format(f1),
                  "time=", "{:.5f}".format(time.time() - train_start)
                  )

    test_edge_pos, test_edge_num = pos_graph_to_edge(node_dic, test_pos_pair_graph)
    test_edge_neg = neg_graph_to_edge(node_dic, test_neg_pair_graph, test_edge_num)
    acc_score, p, r, f1, roc_curr, map_score = get_roc_score(hidden_emb, tag_nodes, test_edge_pos, test_edge_neg)

    print("result:",
          "roc_curr=", "{:.5f}".format(roc_curr),
          "val_ap=", "{:.5f}".format(map_curr),
          "acc_score=", "{:.5f}".format(acc_score),
          "f1=", "{:.5f}".format(f1)
          )

    return roc_curr, map_curr


if __name__ == '__main__':
    'We run '
    seeds = [5, 25, 200]
    iterations = [str(x) + '.txt' for x in range(3)]

    # import pdb;pdb.set_trace()
    results = defaultdict(list)
    for iter, seed in zip(iterations, seeds):
        # if you are using GPU
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
        torch.backends.cudnn.enabled = False
        torch.backends.cudnn.benchmark = False
        torch.backends.cudnn.deterministic = True

        roc_score, ap_score = gae_for(args, iter)

        results['roc_score'].append(roc_score)
        results['ap_score'].append(ap_score)

    # get average
    output = []

    # output.append(flags)
    for k, v in results.items():
        avg = float("{0:.5f}".format(sum(v) / 3))
        output.append(str(avg))
        print(k, avg)

    print(','.join(output))
