# main.py
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from unittest import loader
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import f1_score
import random
from MDGPT.models import LogReg
from MDGPT.model_reg_smooth import PrePrompt,pca_compression
from MDGPT.model_reg_smooth import PrePrompt as preprompt
from MDGPT.utils import process
import pdb
import tqdm
import argparse
from MDGPT.model_reg_smooth import *
import csv
from tqdm import tqdm
parser = argparse.ArgumentParser("MDGPT")
import torch.nn.functional as F
from MDGPT.config import get_args
from MDGPT.utils.data_util_temp import get_loader_pretrain_data, get_loader_down_data
from torch_geometric.datasets import TUDataset,Planetoid,Amazon,Coauthor,Reddit
from torch_geometric.loader import DataLoader
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
import torch.distributed as dist
import torch
import torch.nn as nn
import wandb
from sklearn.decomposition import TruncatedSVD

def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)

def get_laplacian_evd(adj):
    # calculate laplacian matrix
    adj = adj.copy()  # 防止修改原始矩阵
    adj.setdiag(-adj.sum(axis=1))
    adj = -adj

    svd = TruncatedSVD(n_components=100, n_iter=20, random_state=42)
    svd.fit(adj)

    eival = torch.tensor( svd.explained_variance_ ** 0.5, dtype=torch.float32 ).to('cuda')
    eivec = torch.tensor( svd.components_, dtype=torch.float32 ).to('cuda')
    return eival, eivec

def train_model(unify_dim, reg_weight, reg_thres, is_Reddit, sparse, num_tokens, hid_units, nonlinearity, lr, l2_coef, nb_epochs, patience, LP, lambda_entropy, n_samples=3, variance_weight=0.1, downstreamlr=0.001):
    if is_Reddit:
        loader1, loader2, loader3, loader4, loader5 = get_loader_pretrain_data(args.dataset)
    else:
        loader1, loader2, loader3, loader4 = get_loader_pretrain_data(args.dataset)

    if is_Reddit:
        for step, (data1, data2, data3, data4, data5) in enumerate(zip(loader1, loader2, loader3, loader4 ,loader5)):
            #抽取特征矩阵、邻接矩阵
            features11, adj1 = process.process_tu(data1,data1.x.shape[1])
            features22, adj2 = process.process_tu(data2,data2.x.shape[1])
            features33, adj3 = process.process_tu(data3,data3.x.shape[1])
            features44, adj4 = process.process_tu(data4,data4.x.shape[1])
            features55, adj5 = process.process_tu(data5,data5.x.shape[1])

            #利用pca对其特征矩阵维度
            features1 = pca_compression(features11, k=unify_dim)
            features2 = pca_compression(features22, k=unify_dim)
            features3 = pca_compression(features33, k=unify_dim)
            features4 = pca_compression(features44, k=unify_dim)
            features5 = pca_compression(features55, k=unify_dim)

            features1 = torch.FloatTensor(features1).cuda()
            features2 = torch.FloatTensor(features2).cuda()
            features3 = torch.FloatTensor(features3).cuda()
            features4 = torch.FloatTensor(features4).cuda()
            features5 = torch.FloatTensor(features5).cuda()

            #为每个节点抽样一个正样本+50个负样本
            adj = process.combine_dataset(adj1, adj2, adj3, adj4, adj5) # (47154, 47154)
            negetive_sample = prompt_pretrain_sample(adj, 50) # (47154, 51)

    else:
        for step, (data1, data2, data3, data4) in enumerate(zip(loader1, loader2, loader3, loader4)):
            #抽取特征矩阵、邻接矩阵
            features11, adj1 = process.process_tu(data1,data1.x.shape[1])
            features22, adj2 = process.process_tu(data2,data2.x.shape[1])
            features33, adj3 = process.process_tu(data3,data3.x.shape[1])
            features44, adj4 = process.process_tu(data4,data4.x.shape[1])

            #利用pca对其特征矩阵维度
            features1 = pca_compression(features11, k=unify_dim)
            features2 = pca_compression(features22, k=unify_dim)
            features3 = pca_compression(features33, k=unify_dim)
            features4 = pca_compression(features44, k=unify_dim)

            features1 = torch.FloatTensor(features1).cuda()
            features2 = torch.FloatTensor(features2).cuda()
            features3 = torch.FloatTensor(features3).cuda()
            features4 = torch.FloatTensor(features4).cuda()

            #为每个节点抽样一个正样本+50个负样本
            adj = process.combine_dataset(adj1, adj2, adj3, adj4)
            negetive_sample = prompt_pretrain_sample(adj, 50)
            #利用度数标准化
    adj1 = process.normalize_adj(adj1 + sp.eye(adj1.shape[0]))
    adj2 = process.normalize_adj(adj2 + sp.eye(adj2.shape[0]))
    adj3 = process.normalize_adj(adj3 + sp.eye(adj3.shape[0]))
    adj4 = process.normalize_adj(adj4 + sp.eye(adj4.shape[0]))
    if args.is_Reddit:
        adj5 = process.normalize_adj(adj5 + sp.eye(adj5.shape[0]))

    #转换为tensor稀疏张量
    if sparse:
        sp_adj1 = process.sparse_mx_to_torch_sparse_tensor(adj1) # torch.Size([2708, 2708])
        sp_adj2 = process.sparse_mx_to_torch_sparse_tensor(adj2)
        sp_adj3 = process.sparse_mx_to_torch_sparse_tensor(adj3)
        sp_adj4 = process.sparse_mx_to_torch_sparse_tensor(adj4)
        if is_Reddit:
            sp_adj5 = process.sparse_mx_to_torch_sparse_tensor(adj5)

    model = PrePrompt(unify_dim, hid_units, nonlinearity,negetive_sample,3,0.1,args.combinetype, variance_weight, num_tokens, n_samples)

    optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)

    if torch.cuda.is_available():
        print('Using CUDA')
        model = model.cuda()
        features1 = features1.cuda()
        features2 = features2.cuda()
        features3 = features3.cuda()
        features4 = features4.cuda()
        if is_Reddit:
            features5 = features5.cuda()
        
        if sparse:
            sp_adj1 = sp_adj1.cuda()
            sp_adj2 = sp_adj2.cuda()
            sp_adj3 = sp_adj3.cuda()
            sp_adj4 = sp_adj4.cuda()
            if is_Reddit:
                sp_adj5 = sp_adj5.cuda()

    best = 1e9
    firstbest = 0

    for epoch in range(args.nb_epochs):
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)
        loss = 0
        regloss = 0
        model.train()
        optimiser.zero_grad()
        features_list = [features1, features2, features3, features4]
        adj_list = [sp_adj1 if sparse else adj1, sp_adj2 if sparse else adj2, sp_adj3 if sparse else adj3, sp_adj4 if sparse else adj4]
        if is_Reddit:
            features_list.append(features5)
            adj_list.append(sp_adj5 if sparse else adj5)
        loss = model(features_list, adj_list, sparse, None, None, None)
        loss.backward()
        optimiser.step()
        print('Loss:[{:.4f}]'.format(loss.item()))
        wandb.log({
            "pretrain_loss": loss.item()  # 记录当前轮次的预训练损失
        })
        if loss < best:
            firstbest = 1
            best = loss
            best_t = epoch
            cnt_wait = 0
            torch.save(model.state_dict(), args.save_name)
        else:
            cnt_wait += 1
        if cnt_wait == patience:
            print('Early stopping!')
            break
        print('Loading {}th epoch'.format(best_t))
    # 训练循环结束后，添加以下代码记录最终的最小loss值
    final_best_loss = best  # 用一个变量记录整个训练过程中的最小loss值
    wandb.log({
        "final_best_loss": final_best_loss.item()  # 将最终的最小loss值记录到wandb中
    })


    model = PrePrompt(unify_dim, hid_units, nonlinearity,1,3,0.1,args.combinetype, variance_weight, num_tokens, n_samples)
    print('#'*50)
    print('Downastream dataset is ',args.dataset)
    loader = get_loader_down_data(args.dataset)
    eival = 0
    eivec = 0
    for data in loader:
        print(data)
        features,adj= process.process_tu(data,data.x.shape[1])
        eival, eivec = get_laplacian_evd(adj)
        print('process done')
        features = pca_compression(features,k=unify_dim)
        print('pca')
        adj = process.normalize_adj(adj + sp.eye(adj.shape[0]))
        print("adj")
        sp_adj = process.sparse_mx_to_torch_sparse_tensor(adj)
        sp_adj = sp_adj.cuda()
        features = torch.FloatTensor(features).cuda()
        print(features.shape)
        idx_test = range(data.y.shape[0]-1000,data.y.shape[0])
        labels = data.y
        data=np.array(data.y)

        np.unique(data)

        nb_classes=len(np.unique(data))
        print(nb_classes)

    model = model.cuda()
    model.load_state_dict(torch.load(args.save_name))
    embeds, _ = model.embed(features, sp_adj if sparse else adj, sparse, None,LP)
    downstreamlrlist = [downstreamlr]
    acclist = torch.FloatTensor(100,).cuda()
    xent = nn.CrossEntropyLoss()
    # 这里的数据路径是用来fewshot的
    config = wandb.config
    data_path = config.data_path
    for downstreamlr in downstreamlrlist:
        print(labels.shape)
        test_lbls = labels[idx_test].cuda()
        tot = torch.zeros(1)
        tot = tot.cuda()
        accs = []
        print('-' * 100)

        for shotnum in range(args.shot_num, args.shot_num + 1):
            tot = torch.zeros(1)
            tot = tot.cuda()
            accs = []
            cnt_wait = 0
            best = 1e9
            best_t = 0
            print("shotnum",shotnum)
            for i in tqdm(range(args.prompt_times)): # 100
                masks_logits = model.masks_logits
                soft_masks = torch.sigmoid(masks_logits)
                # weights_list = [pretext.weight.detach() for pretext in model.pretexts]
                log = downprompt(soft_masks, hid_units, nb_classes, args.combinetype, unify_dim, num_tokens).cuda()
                idx_train = torch.load("{}/fewshot_{}_50/{}-shot_{}/{}/idx.pt".format(data_path, args.dataset.lower(),shotnum,args.dataset.lower(),i)).type(torch.long).cuda()
                pretrain_embs = embeds[0, idx_train]
                test_embs = embeds[0, idx_test]
                train_lbls = torch.load("{}/fewshot_{}_50/{}-shot_{}/{}/labels.pt".format(data_path, args.dataset.lower(),shotnum,args.dataset.lower(),i)).type(torch.long).squeeze().cuda()
                opt = torch.optim.Adam([
                    {'params': log.parameters()}
                ], lr=downstreamlr)
                log = log.cuda()
                best = 1e9
                pat_steps = 0
                best_acc = torch.zeros(1)
                best_acc = best_acc.cuda()
                best_acc = 0  # 初始化最佳准确度
                cnt_wait = 0  # 等待计数器
                for idx_temp in range(args.fw_epochs):  # 训练最大迭代次数
                    log.train()
                    opt.zero_grad()
                    logits, entropy_logits, reg_loss = log(eivec, eival, reg_thres, features, sp_adj, sparse, model.gcn, idx_train, pretrain_embs, train_lbls, 1)
                    entropy_loss_value = torch.mean(entropy_logits)  # 熵的均值作为损失
                    loss = xent(logits, train_lbls)
                    loss = loss + lambda_entropy * entropy_loss_value  + reg_weight * reg_loss
                    #print(loss, reg_weight * reg_loss) + reg_weight * reg_loss
                    # 测试阶段计算当前准确率
                    logits_test, _ , _= log(eivec, eival, reg_thres, features, sp_adj, sparse, model.gcn, idx_test, test_embs)
                    preds = torch.argmax(logits_test, dim=1).cuda()
                    acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0]

                    # Early Stopping 基于 `acc`
                    if acc > best_acc:
                        best_acc = acc  # 更新最佳准确度
                        cnt_wait = 0  # 重置等待计数器
                    else:
                        cnt_wait += 1

                    if cnt_wait == patience:
                        print(f'Early stopping at iteration {idx_temp + 1} with best accuracy: {best_acc:.4f}')
                        break

                    loss.backward(retain_graph=True)
                    opt.step()
                logits, _, _ = log(eivec, eival, reg_thres, features,sp_adj,sparse,model.gcn,idx_test,test_embs)
                preds = torch.argmax(logits, dim=1).cuda()
                acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0]
                accs.append(acc * 100)
                tot += acc
            print('-' * 100)
            print('Average accuracy:[{:.4f}]'.format(tot.item() / 100))
            accs = torch.stack(accs)
            mean_acc = accs.mean().item()
            std_acc = accs.std().item()
            print('Mean:[{:.4f}]'.format(mean_acc))
            print('Std :[{:.4f}]'.format(std_acc))
            print('-' * 100)
            wandb.log({
                "shot_num": shotnum,                 # Few-shot 学习中的 shot 数量
                "learning_rate": lr,                # 上游学习率
                "downstream_learning_rate": downstreamlr,  # 下游学习率
                "hidden_units": hid_units,          # 隐藏层维度
                "mean_accuracy": mean_acc,          # 准确率均值
                "std_accuracy": std_acc             # 准确率标准差
            })
            print('-' * 100)
            row = [shotnum,lr,downstreamlr,hid_units,accs.mean().item(),accs.std().item()]
            out = open("{}/NIPS24_{}_fewshot.csv".format(data_path, args.dataset.lower()), "a", newline="")
            csv_writer = csv.writer(out, dialect="excel")
            csv_writer.writerow(row)



if __name__ == "__main__":
    args = get_args()
    print('-' * 100)
    print(args)
    print('-' * 100)
    seed = args.seed
    set_seed(seed)
    device = torch.device("cuda")
    print(device)
    unify_dim = args.unify_dim
    is_Reddit = args.is_Reddit
    sparse = args.sparse
    num_tokens = args.num_tokens
    hid_units = args.hid_units
    nonlinearity = args.nonlinearity
    lr = args.lr
    l2_coef = args.l2_coef
    nb_epochs = args.nb_epochs
    patience = args.patience
    LP = args.LP
    lambda_entropy = args.lambda_entropy
    n_samples = args.n_samples
    variance_weight = args.variance_weight
    downstreamlr = args.downstreamlr
    reg_weight = args.reg_weight
    reg_thres = args.reg_thres
    train_model(unify_dim, reg_weight, reg_thres, is_Reddit, sparse, num_tokens, hid_units, nonlinearity, lr, l2_coef, nb_epochs, patience, LP, lambda_entropy, n_samples, variance_weight, downstreamlr)










