import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn

from DWGI.models import DWGI, LogReg
from DWGI.utils import process, generate_features


def train():
    # dataset = 'cora'
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # device = torch.device("cpu")

    # training params
    batch_size = 1
    nb_epochs = 100000
    patience = 200
    lr = 0.001
    l2_coef = 0.0
    drop_prob = 0.0
    hid_units = 200
    sparse = False
    nonlinearity = 'prelu'  # special name to separate parameters

    # adj, features, labels, idx_train, idx_val, idx_test = process.load_data()

    generate_features.generate()
    adj, features, index, node_subgraph, subgraph_node, doc_word_edge = process.load_data()

    # 求出单词节点的范围和文档节点的范围,子图编号的范围
    word_index_min, word_index_max, doc_index_min, doc_index_max, graph_min, graph_max = process.get_doc_word_index_range(node_subgraph)

    features = process.preprocess_features(features)

    # 预处理，获得每一个子图的嵌入
    # (550, 200)
    sub_graph_embs = process.get_sub_graph_embs(graph_min, graph_max, subgraph_node, features)
    # 对所有子图求和 (1, 200)
    sub_graph_embs_sum = sub_graph_embs.sum(dim=0).unsqueeze(0)
    sub_graph_embs_sum = sub_graph_embs_sum.to(device)
    sub_graph_embs = sub_graph_embs.to(device)

    nb_nodes = features.shape[0]
    ft_size = features.shape[1]
    # nb_classes = labels.shape[1]

    adj = process.normalize_adj(adj + sp.eye(adj.shape[0]))

    if sparse:
        sp_adj = process.sparse_mx_to_torch_sparse_tensor(adj)
    else:
        adj = (adj + sp.eye(adj.shape[0])).todense()

    features = torch.FloatTensor(features[np.newaxis])
    if not sparse:
        adj = torch.FloatTensor(adj[np.newaxis])
    # labels = torch.FloatTensor(labels[np.newaxis])
    # idx_train = torch.LongTensor(idx_train)
    # idx_val = torch.LongTensor(idx_val)
    # idx_test = torch.LongTensor(idx_test)

    model = DWGI(ft_size, hid_units, nonlinearity)
    optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)


    # print('Using CUDA')
    model.to(device)
    features = features.to(device)
    if sparse:
        sp_adj = sp_adj.to(device)
    else:
        adj = adj.to(device)

    # labels = labels.to(device)
    # idx_train = idx_train.to(device)
    # idx_val = idx_val.to(device)
    # idx_test = idx_test.to(device)

    b_xent = nn.BCEWithLogitsLoss()
    xent = nn.CrossEntropyLoss()
    cnt_wait = 0
    best = 1e9
    best_t = 0

    for epoch in range(nb_epochs):
        model.train()
        optimiser.zero_grad()

        # 随机选取一个节点和其所在的子图，然后选取一个节点不在的子图，分别作为正负样本
        node_index = np.random.randint(doc_index_min, word_index_max+1)
        node_emb = features[:, node_index, :]  #（1，200）
        if node_index >= word_index_min:  # 选择的是单词节点
            # 正样本子图的编号
            positive_graph_index = node_subgraph[node_index]
            # 正子图
            positive_graph_emb = sub_graph_embs[positive_graph_index].unsqueeze(dim=0)

            # positive_graph_emb = process.get_graph_emb_by_word(node_index, node_subgraph, features, subgraph_node, batch_size, device)

            # 其它所有子图均为负样本
            negative_graph_emb = (sub_graph_embs_sum - sub_graph_embs[positive_graph_index]) / (graph_max - graph_min)

            # # 选择一个负样本
            # negative_graph_index = np.random.randint(graph_min, graph_max + 1)
            # while negative_graph_index == positive_graph_index:
            #     negative_graph_index = np.random.randint(graph_min, graph_max + 1)
            # negative_graph_emb = process.get_graph_emb_by_graph(negative_graph_index, subgraph_node, features, batch_size, device)
        else:  # 选择的是文档节点
            positive_graph_emb, negative_graph_emb = process.get_positive_graph_emb_by_doc(node_index, node_subgraph, doc_word_edge, sub_graph_embs, sub_graph_embs_sum, device)

            # positive_graph_index, positive_graph_emb = process.get_graph_emb_by_doc(node_index, node_subgraph, doc_word_edge, features, subgraph_node, batch_size, device)
            #
            # # 获得单词列表
            # neighbors = doc_word_edge[node_index]
            # # 文档节点所属的正样本的子图
            # pos_graph_list = []
            # for neighbor in neighbors:
            #     pos_graph_list.append(node_subgraph[neighbor])
            #
            # # 选择一个负样本
            # negative_graph_index = np.random.randint(graph_min, graph_max + 1)
            # while negative_graph_index == positive_graph_index or negative_graph_index in pos_graph_list:
            #     negative_graph_index = np.random.randint(graph_min, graph_max + 1)
            # negative_graph_emb = process.get_graph_emb_by_graph(negative_graph_index, subgraph_node, features, batch_size, device)

        emb_1 = positive_graph_emb.unsqueeze(dim=0)   # (1, 1, 200)
        emb_2 = negative_graph_emb.unsqueeze(dim=0)

        # emb_1 = positive_graph_emb   # (1, 1, 200)
        # emb_2 = negative_graph_emb

        lbl_1 = torch.ones(batch_size, emb_1.shape[2])   # (1, 200)
        lbl_2 = torch.zeros(batch_size, emb_2.shape[2])
        # torch.cat 拼接两个tensor，1表示在列上拼
        lbl = torch.cat((lbl_1, lbl_2), 1)             # (1, 400)

        # if torch.cuda.is_available():
        lbl = lbl.to(device)

        logits = model(features, emb_1, emb_2, node_emb, adj, sparse, None, None, None)

        loss = b_xent(logits, lbl)

        print('epoch', epoch, 'Loss:', loss)

        if loss < best:
            best = loss
            best_t = epoch
            cnt_wait = 0
            torch.save(model.state_dict(), r'F:\mypython\final_subject\DWGI\best_dgi.pkl')
        else:
            cnt_wait += 1

        if cnt_wait == patience:
            print('Early stopping!')
            break

        loss.backward()
        optimiser.step()

    print('Loading {}th epoch'.format(best_t))
    # model.load_state_dict(torch.load('best_dgi.pkl'))


if __name__ == '__main__':
    train()