# main.py
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from unittest import loader
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import f1_score
import random
from MDGPT.models import LogReg
from MDGPT.model_reg_smooth import PrePrompt,pca_compression
from MDGPT.model_reg_smooth import PrePrompt as preprompt
from MDGPT.utils import process
import pdb
import tqdm
import argparse
from MDGPT.model_reg_smooth import *
import csv
from tqdm import tqdm
parser = argparse.ArgumentParser("MDGPT")
import torch.nn.functional as F
from MDGPT.config import get_args
from MDGPT.utils.data_util_temp import get_loader_pretrain_data, get_loader_down_data
from torch_geometric.datasets import TUDataset,Planetoid,Amazon,Coauthor,Reddit
from torch_geometric.loader import DataLoader
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
import torch
import torch.nn as nn
import wandb
from sklearn.decomposition import TruncatedSVD

def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)

def get_laplacian_evd(adj):
    # calculate laplacian matrix
    adj = adj.copy()  # 防止修改原始矩阵
    adj.setdiag(-adj.sum(axis=1))
    adj = -adj

    svd = TruncatedSVD(n_components=100, n_iter=20, random_state=42)
    svd.fit(adj)

    eival = torch.tensor( svd.explained_variance_ ** 0.5, dtype=torch.float32 ).to('cuda')
    eivec = torch.tensor( svd.components_, dtype=torch.float32 ).to('cuda')
    return eival, eivec

def train_model(unify_dim, reg_weight, reg_thres, is_Reddit, sparse, num_tokens, hid_units, nonlinearity, lr, l2_coef, nb_epochs, patience, LP, lambda_entropy, n_samples=3, variance_weight=0.1, downstreamlr=0.001):
    if is_Reddit:
        loader1, loader2, loader3, loader4, loader5 = get_loader_pretrain_data(args.dataset)
    else:
        loader1, loader2, loader3, loader4 = get_loader_pretrain_data(args.dataset)

    if is_Reddit:
        for step, (data1, data2, data3, data4, data5) in enumerate(zip(loader1, loader2, loader3, loader4 ,loader5)):
            features11, adj1 = process.process_tu(data1,data1.x.shape[1])
            #抽取特征矩阵、邻接矩阵
            features11, adj1 = process.process_tu(data1,data1.x.shape[1])
            features22, adj2 = process.process_tu(data2,data2.x.shape[1])
            features33, adj3 = process.process_tu(data3,data3.x.shape[1])
            features44, adj4 = process.process_tu(data4,data4.x.shape[1])
            features55, adj5 = process.process_tu(data5,data5.x.shape[1])

            #利用pca对其特征矩阵维度
            features1 = pca_compression(features11, k=unify_dim)
            features2 = pca_compression(features22, k=unify_dim)
            features3 = pca_compression(features33, k=unify_dim)
            features4 = pca_compression(features44, k=unify_dim)
            features5 = pca_compression(features55, k=unify_dim)

            features1 = torch.FloatTensor(features1).cuda()
            features2 = torch.FloatTensor(features2).cuda()
            features3 = torch.FloatTensor(features3).cuda()
            features4 = torch.FloatTensor(features4).cuda()
            features5 = torch.FloatTensor(features5).cuda()

            #为每个节点抽样一个正样本+50个负样本
            adj = process.combine_dataset(adj1, adj2, adj3, adj4, adj5) # (47154, 47154)
            negetive_sample = prompt_pretrain_sample(adj, 50) # (47154, 51)

    else:
        for step, (data1, data2, data3, data4) in enumerate(zip(loader1, loader2, loader3, loader4)):
            #抽取特征矩阵、邻接矩阵
            features11, adj1 = process.process_tu(data1,data1.x.shape[1])
            features22, adj2 = process.process_tu(data2,data2.x.shape[1])
            features33, adj3 = process.process_tu(data3,data3.x.shape[1])
            features44, adj4 = process.process_tu(data4,data4.x.shape[1])
            print(type(features11))
            print(type(adj1))

    #         #利用pca对其特征矩阵维度
    #         features1 = pca_compression(features11, k=unify_dim)
    #         features2 = pca_compression(features22, k=unify_dim)
    #         features3 = pca_compression(features33, k=unify_dim)
    #         features4 = pca_compression(features44, k=unify_dim)

    #         features1 = torch.FloatTensor(features1).cuda()
    #         features2 = torch.FloatTensor(features2).cuda()
    #         features3 = torch.FloatTensor(features3).cuda()
    #         features4 = torch.FloatTensor(features4).cuda()

    #         #为每个节点抽样一个正样本+50个负样本
    #         adj = process.combine_dataset(adj1, adj2, adj3, adj4)
    #         negetive_sample = prompt_pretrain_sample(adj, 50)
    #         #利用度数标准化
    # adj1 = process.normalize_adj(adj1 + sp.eye(adj1.shape[0]))
    # adj2 = process.normalize_adj(adj2 + sp.eye(adj2.shape[0]))
    # adj3 = process.normalize_adj(adj3 + sp.eye(adj3.shape[0]))
    # adj4 = process.normalize_adj(adj4 + sp.eye(adj4.shape[0]))
    # if args.is_Reddit:
    #     adj5 = process.normalize_adj(adj5 + sp.eye(adj5.shape[0]))

    # #转换为tensor稀疏张量
    # if sparse:
    #     sp_adj1 = process.sparse_mx_to_torch_sparse_tensor(adj1) # torch.Size([2708, 2708])
    #     sp_adj2 = process.sparse_mx_to_torch_sparse_tensor(adj2)
    #     sp_adj3 = process.sparse_mx_to_torch_sparse_tensor(adj3)
    #     sp_adj4 = process.sparse_mx_to_torch_sparse_tensor(adj4)
    #     if is_Reddit:
    #         sp_adj5 = process.sparse_mx_to_torch_sparse_tensor(adj5)

    # model = PrePrompt(unify_dim, hid_units, nonlinearity,negetive_sample,3,0.1,args.combinetype, variance_weight, num_tokens, n_samples)

    # optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)

    # if torch.cuda.is_available():
    #     print('Using CUDA')
    #     model = model.cuda()
    #     features1 = features1.cuda()
    #     features2 = features2.cuda()
    #     features3 = features3.cuda()
    #     features4 = features4.cuda()
    #     if is_Reddit:
    #         features5 = features5.cuda()
        
    #     if sparse:
    #         sp_adj1 = sp_adj1.cuda()
    #         sp_adj2 = sp_adj2.cuda()
    #         sp_adj3 = sp_adj3.cuda()
    #         sp_adj4 = sp_adj4.cuda()
    #         if is_Reddit:
    #             sp_adj5 = sp_adj5.cuda()

    # best = 1e9
    # firstbest = 0

    # for epoch in range(args.nb_epochs):
    #     np.random.seed(seed)
    #     torch.manual_seed(seed)
    #     torch.cuda.manual_seed(seed)
    #     loss = 0
    #     regloss = 0
    #     model.train()
    #     optimiser.zero_grad()
    #     features_list = [features1, features2, features3, features4]
    #     adj_list = [sp_adj1 if sparse else adj1, sp_adj2 if sparse else adj2, sp_adj3 if sparse else adj3, sp_adj4 if sparse else adj4]
    #     if is_Reddit:
    #         features_list.append(features5)
    #         adj_list.append(sp_adj5 if sparse else adj5)
    #     loss = model(features_list, adj_list, sparse, None, None, None)
    #     loss.backward()
    #     optimiser.step()
    #     print('Loss:[{:.4f}]'.format(loss.item()))
    #     wandb.log({
    #         "pretrain_loss": loss.item()  # 记录当前轮次的预训练损失
    #     })
    #     if loss < best:
    #         firstbest = 1
    #         best = loss
    #         best_t = epoch
    #         cnt_wait = 0
    #         torch.save(model.state_dict(), args.save_name)
    #     else:
    #         cnt_wait += 1
    #     if cnt_wait == patience:
    #         print('Early stopping!')
    #         break
    #     print('Loading {}th epoch'.format(best_t))
    # # 训练循环结束后，添加以下代码记录最终的最小loss值
    # final_best_loss = best  # 用一个变量记录整个训练过程中的最小loss值
    # wandb.log({
    #     "final_best_loss": final_best_loss.item()  # 将最终的最小loss值记录到wandb中
    # })






if __name__ == "__main__":
    args = get_args()
    print('-' * 100)
    print(args)
    print('-' * 100)
    seed = args.seed
    set_seed(seed)
    device = torch.device("cuda")
    print(device)
    unify_dim = args.unify_dim
    is_Reddit = args.is_Reddit
    sparse = args.sparse
    num_tokens = args.num_tokens
    hid_units = args.hid_units
    nonlinearity = args.nonlinearity
    lr = args.lr
    l2_coef = args.l2_coef
    nb_epochs = args.nb_epochs
    patience = args.patience
    LP = args.LP
    lambda_entropy = args.lambda_entropy
    n_samples = args.n_samples
    variance_weight = args.variance_weight
    downstreamlr = args.downstreamlr
    reg_weight = args.reg_weight
    reg_thres = args.reg_thres
    train_model(unify_dim, reg_weight, reg_thres, is_Reddit, sparse, num_tokens, hid_units, nonlinearity, lr, l2_coef, nb_epochs, patience, LP, lambda_entropy, n_samples, variance_weight, downstreamlr)










