# main.py
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from unittest import loader
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import f1_score
import random
from MDGPT.models import LogReg
from MDGPT.model_reg_smooth import PrePrompt,pca_compression
from MDGPT.model_reg_smooth import PrePrompt as preprompt
from MDGPT.utils import process
import pdb
import tqdm
import argparse
from MDGPT.model_reg_smooth import *
import csv
from tqdm import tqdm
parser = argparse.ArgumentParser("MDGPT")
import torch.nn.functional as F
from MDGPT.config import get_args
from MDGPT.utils.data_util import get_loader_pretrain_data, get_loader_down_data
from torch_geometric.datasets import TUDataset,Planetoid,Amazon,Coauthor,Reddit
from torch_geometric.loader import DataLoader
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
import torch
import torch.nn as nn
import wandb
from sklearn.decomposition import TruncatedSVD
from torch_geometric.utils import to_scipy_sparse_matrix

def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)

def get_laplacian_evd(adj):
    # calculate laplacian matrix
    adj = adj.copy()  # 防止修改原始矩阵
    adj.setdiag(-adj.sum(axis=1))
    adj = -adj

    svd = TruncatedSVD(n_components=100, n_iter=20, random_state=42)
    svd.fit(adj)

    eival = torch.tensor( svd.explained_variance_ ** 0.5, dtype=torch.float32 ).to('cuda')
    eivec = torch.tensor( svd.components_, dtype=torch.float32 ).to('cuda')
    return eival, eivec

def train_model(unify_dim, reg_weight, reg_thres, is_Reddit, sparse, num_tokens, hid_units, nonlinearity, lr, l2_coef, nb_epochs, patience, LP, lambda_entropy, n_samples=3, variance_weight=0.1, downstreamlr=0.001):
    if is_Reddit:
        loader1, loader2, loader3, loader4, loader5 = get_loader_pretrain_data(args.dataset)
    else:
        loader1, loader2, loader3, loader4, loader5, loader6, loader7, loader8, loader9 = get_loader_pretrain_data(args.dataset)

    if is_Reddit:
        for step, (data1, data2, data3, data4, data5) in enumerate(zip(loader1, loader2, loader3, loader4 ,loader5)):
            features11, adj1 = process.process_tu(data1,data1.x.shape[1])
            #抽取特征矩阵、邻接矩阵
            features11, adj1 = process.process_tu(data1,data1.x.shape[1])
            features22, adj2 = process.process_tu(data2,data2.x.shape[1])
            features33, adj3 = process.process_tu(data3,data3.x.shape[1])
            features44, adj4 = process.process_tu(data4,data4.x.shape[1])
            features55, adj5 = process.process_tu(data5,data5.x.shape[1])

            #利用pca对其特征矩阵维度
            features1 = pca_compression(features11, k=unify_dim)
            features2 = pca_compression(features22, k=unify_dim)
            features3 = pca_compression(features33, k=unify_dim)
            features4 = pca_compression(features44, k=unify_dim)
            features5 = pca_compression(features55, k=unify_dim)

            features1 = torch.FloatTensor(features1).cuda()
            features2 = torch.FloatTensor(features2).cuda()
            features3 = torch.FloatTensor(features3).cuda()
            features4 = torch.FloatTensor(features4).cuda()
            features5 = torch.FloatTensor(features5).cuda()

            #为每个节点抽样一个正样本+50个负样本
            adj = process.combine_dataset(adj1, adj2, adj3, adj4, adj5) # (47154, 47154)
            negetive_sample = prompt_pretrain_sample(adj, 50) # (47154, 51)

    else:
        for step, (data1, data2, data3, data4, data5, data6, data7, data8, data9) in enumerate(zip(loader1, loader2, loader3, loader4, loader5, loader6, loader7, loader8, loader9)):
            # print(data1)
            #抽取特征矩阵、邻接矩阵
            features11 = data1.x
            features22 = data2.x
            features33 = data3.x
            features44 = data4.x
            features55 = data5.x
            features66 = data6.x
            features77 = data7.x
            features88 = data8.x
            features99 = data9.x
            adj1 = to_scipy_sparse_matrix(data1.edge_index)
            adj2 = to_scipy_sparse_matrix(data2.edge_index)
            adj3 = to_scipy_sparse_matrix(data3.edge_index)
            adj4 = to_scipy_sparse_matrix(data4.edge_index)
            adj5 = to_scipy_sparse_matrix(data5.edge_index)
            adj6 = to_scipy_sparse_matrix(data6.edge_index)
            adj7 = to_scipy_sparse_matrix(data7.edge_index)
            adj8 = to_scipy_sparse_matrix(data8.edge_index)
            adj9 = to_scipy_sparse_matrix(data9.edge_index)

            #利用pca对其特征矩阵维度
            features1 = pca_compression(features11, k=unify_dim)
            features2 = pca_compression(features22, k=unify_dim)
            features3 = pca_compression(features33, k=unify_dim)
            features4 = pca_compression(features44, k=unify_dim)
            features5 = pca_compression(features55, k=unify_dim)
            features6 = pca_compression(features66, k=unify_dim)
            features7 = pca_compression(features77, k=unify_dim)
            features8 = pca_compression(features88, k=unify_dim)
            features9 = pca_compression(features99, k=unify_dim)

            features1 = torch.FloatTensor(features1).cuda()
            features2 = torch.FloatTensor(features2).cuda()
            features3 = torch.FloatTensor(features3).cuda()
            features4 = torch.FloatTensor(features4).cuda()
            features5 = torch.FloatTensor(features5).cuda()
            features6 = torch.FloatTensor(features6).cuda()
            features7 = torch.FloatTensor(features7).cuda()
            features8 = torch.FloatTensor(features8).cuda()
            features9 = torch.FloatTensor(features9).cuda()

            #为每个节点抽样一个正样本+50个负样本
            adj = process.combine_dataset(adj1, adj2, adj3, adj4, adj5, adj6, adj7, adj8, adj9)
            negetive_sample = prompt_pretrain_sample(adj, 50)
            #利用度数标准化
    adj1 = process.normalize_adj(adj1 + sp.eye(adj1.shape[0]))
    adj2 = process.normalize_adj(adj2 + sp.eye(adj2.shape[0]))
    adj3 = process.normalize_adj(adj3 + sp.eye(adj3.shape[0]))
    adj4 = process.normalize_adj(adj4 + sp.eye(adj4.shape[0]))
    adj5 = process.normalize_adj(adj5 + sp.eye(adj5.shape[0]))
    adj6 = process.normalize_adj(adj6 + sp.eye(adj6.shape[0]))
    adj7 = process.normalize_adj(adj7 + sp.eye(adj7.shape[0]))
    adj8 = process.normalize_adj(adj8 + sp.eye(adj8.shape[0]))
    adj9 = process.normalize_adj(adj9 + sp.eye(adj9.shape[0]))
    if args.is_Reddit:
        adj5 = process.normalize_adj(adj5 + sp.eye(adj5.shape[0]))

    #转换为tensor稀疏张量
    if sparse:
        sp_adj1 = process.sparse_mx_to_torch_sparse_tensor(adj1) # torch.Size([2708, 2708])
        sp_adj2 = process.sparse_mx_to_torch_sparse_tensor(adj2)
        sp_adj3 = process.sparse_mx_to_torch_sparse_tensor(adj3)
        sp_adj4 = process.sparse_mx_to_torch_sparse_tensor(adj4)
        sp_adj5 = process.sparse_mx_to_torch_sparse_tensor(adj5) # torch.Size([2708, 2708])
        sp_adj6 = process.sparse_mx_to_torch_sparse_tensor(adj6)
        sp_adj7 = process.sparse_mx_to_torch_sparse_tensor(adj7)
        sp_adj8 = process.sparse_mx_to_torch_sparse_tensor(adj8)
        sp_adj9 = process.sparse_mx_to_torch_sparse_tensor(adj9)
        if is_Reddit:
            sp_adj5 = process.sparse_mx_to_torch_sparse_tensor(adj5)

    model = PrePrompt(unify_dim, hid_units, nonlinearity,negetive_sample,3,0.1,args.combinetype, variance_weight, num_tokens, n_samples)

    optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)

    if torch.cuda.is_available():
        print('Using CUDA')
        model = model.cuda()
        features1 = features1.cuda()
        features2 = features2.cuda()
        features3 = features3.cuda()
        features4 = features4.cuda()
        features5 = features5.cuda()
        features6 = features6.cuda()
        features7 = features7.cuda()
        features8 = features8.cuda()
        features9 = features9.cuda()
        if is_Reddit:
            features5 = features5.cuda()
        
        if sparse:
            sp_adj1 = sp_adj1.cuda()
            sp_adj2 = sp_adj2.cuda()
            sp_adj3 = sp_adj3.cuda()
            sp_adj4 = sp_adj4.cuda()
            sp_adj5 = sp_adj5.cuda()
            sp_adj6 = sp_adj6.cuda()
            sp_adj7 = sp_adj7.cuda()
            sp_adj8 = sp_adj8.cuda()
            sp_adj9 = sp_adj9.cuda()
            if is_Reddit:
                sp_adj5 = sp_adj5.cuda()

    best = 1e9
    firstbest = 0

    for epoch in range(args.nb_epochs):
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)
        loss = 0
        regloss = 0
        model.train()
        optimiser.zero_grad()
        features_list = [features1, features2, features3, features4, features5, features6, features7, features8, features9]
        adj_list = [sp_adj1 if sparse else adj1, sp_adj2 if sparse else adj2, sp_adj3 if sparse else adj3, sp_adj4 if sparse else adj4, sp_adj5 if sparse else adj5, sp_adj6 if sparse else adj6, sp_adj7 if sparse else adj7, sp_adj8 if sparse else adj8, sp_adj9 if sparse else adj9]
        if is_Reddit:
            features_list.append(features5)
            adj_list.append(sp_adj5 if sparse else adj5)
        loss = model(features_list, adj_list, sparse, None, None, None)
        loss.backward()
        optimiser.step()
        print('Loss:[{:.4f}]'.format(loss.item()))
        wandb.log({
            "pretrain_loss": loss.item()  # 记录当前轮次的预训练损失
        })
        if loss < best:
            firstbest = 1
            best = loss
            best_t = epoch
            cnt_wait = 0
            torch.save(model.state_dict(), args.save_name)
        else:
            cnt_wait += 1
        if cnt_wait == patience:
            print('Early stopping!')
            break
        print('Loading {}th epoch'.format(best_t))
    # 训练循环结束后，添加以下代码记录最终的最小loss值
    final_best_loss = best  # 用一个变量记录整个训练过程中的最小loss值
    wandb.log({
        "final_best_loss": final_best_loss.item()  # 将最终的最小loss值记录到wandb中
    })






if __name__ == "__main__":
    args = get_args()
    print('-' * 100)
    print(args)
    print('-' * 100)
    seed = args.seed
    set_seed(seed)
    device = torch.device("cuda")
    print(device)
    unify_dim = args.unify_dim
    is_Reddit = args.is_Reddit
    sparse = args.sparse
    num_tokens = args.num_tokens
    hid_units = args.hid_units
    nonlinearity = args.nonlinearity
    lr = args.lr
    l2_coef = args.l2_coef
    nb_epochs = args.nb_epochs
    patience = args.patience
    LP = args.LP
    lambda_entropy = args.lambda_entropy
    n_samples = args.n_samples
    variance_weight = args.variance_weight
    downstreamlr = args.downstreamlr
    reg_weight = args.reg_weight
    reg_thres = args.reg_thres
    train_model(unify_dim, reg_weight, reg_thres, is_Reddit, sparse, num_tokens, hid_units, nonlinearity, lr, l2_coef, nb_epochs, patience, LP, lambda_entropy, n_samples, variance_weight, downstreamlr)










