"""
    Utility functions for training one epoch 
    and evaluating one epoch
"""
import torch
import torch.nn as nn
import math

from train.metrics import accuracy_MNIST_CIFAR as accuracy

from nets.superpixels_graph_classification.diffpool import HSIC_weight
import dgl
from tqdm import tqdm
import pickle
"""
    For GCNs
"""
cls_criterion = torch.nn.BCEWithLogitsLoss(reduce=False)
reg_criterion = torch.nn.MSELoss(reduce=False)
import sys
softmax = nn.Softmax(0)

# 用于稀疏图的训练一个epoch，接收模型、优化器、设备、数据加载器、当前epoch和任务类型作为参数，并返回该epoch的损失、训练准确率和更新后的优化器。
# 这一行定义了函数train_epoch_sparse()，它接受模型、优化器、设备、数据加载器、当前epoch和任务类型作为输入参数。
def train_epoch_sparse(model, optimizer, device, data_loader, epoch, task_type):
    # 这几行初始化了一些变量，model.train()将模型设置为训练模式，epoch_loss和epoch_train_acc用于记录本epoch的总损失和训练准确率，nb_data记录训练样本的数量，gpu_mem记录GPU内存使用情况。
    model.train()
    epoch_loss = 0
    epoch_train_acc = 0
    nb_data = 0
    gpu_mem = 0
    # 这一行开始一个循环，遍历数据加载器中的每个批次数据。
    # enumerate()函数将可迭代对象转换为一个枚举对象，其中iter是循环索引，(batch_graphs, batch_labels)是数据加载器返回的每个批次的图数据和对应的标签。
    for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
        # 这几行将批次数据转移到指定的设备上，并提取出节点特征和边特征，并将它们转换为指定的数据类型。
        batch_graphs = batch_graphs.to(device)
        batch_x = batch_graphs.ndata['feat'].float().to(device)  # num x feat
        batch_e = batch_graphs.edata['feat'].long().to(device)
        batch_labels = batch_labels.to(device)
        # 这一行将优化器的梯度清零，以准备开始一个新的反向传播过程。
        optimizer.zero_grad()        
        #batch_scores, embed = model.forward(batch_graphs)
        
        #batch_scores = model.forward(batch_graphs, batch_x)
        # 这一行调用模型的forward()方法，传入批次数据，并得到模型的预测分数和嵌入向量。
        batch_scores, embed = model.forward(batch_graphs, batch_x, batch_e)
        # 这一行创建一个布尔张量is_labeled，用于表示哪些样本是有标签的。
        is_labeled = batch_labels == batch_labels
        # 这一行计算出哪些样本的标签是完整的（即每个节点都有标签）。
        tmp = torch.sum(is_labeled, 1) == is_labeled.size(1)

        # 这一段根据任务类型计算损失。如果是分类任务，则使用交叉熵损失函数cls_criterion，否则使用均方误差损失函数reg_criterion。
        if 'classification' in task_type:
            loss = cls_criterion(batch_scores.to(torch.float32)[tmp], batch_labels.to(torch.float32)[tmp]).mean()
        else:
            loss = reg_criterion(batch_scores.to(torch.float32)[is_labeled], batch_labels.to(torch.float32)[is_labeled]).mean()

        # 这一行进行反向传播，计算损失相对于模型参数的梯度。
        loss.backward()
        # 这一行使用优化器来更新模型参数，使得损失最小化。
        optimizer.step()
        # 这两行更新本epoch的总损失和训练样本的数量。
        epoch_loss += loss.detach().item()
        nb_data += batch_labels.size(0)
    # 这两行计算本epoch的平均损失和训练准确率，并返回这些值以及更新后的优化器。
    epoch_loss /= (iter + 1)
    epoch_train_acc /= nb_data    
    return epoch_loss, epoch_train_acc, optimizer

criterion = nn.CrossEntropyLoss(reduction='none')

# 调整学习率的函数，根据epoch来降低学习率。
# 这一行定义了函数adjust_learning_rate_bl()，它接受优化器、当前epoch和基本学习率作为输入参数。
def adjust_learning_rate_bl(optimizer, epoch, lrbl):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    # 这一行根据当前epoch来调整学习率。学习率衰减的方式是每30个epoch将学习率乘以0.1。
    lr = lrbl * (0.1 ** (epoch // 30))
    # 这一行遍历优化器中的参数组。
    for param_group in optimizer.param_groups:
        # 这一行将每个参数组的学习率设置为计算得到的新学习率。
        param_group['lr'] = lr

#  基于稀疏图的训练函数，支持HSIC正则化。与train_epoch_sparse类似，但包含了HSIC正则化的处理。
# 这一行定义了函数train_epoch_sparse_HSIC()，它接受模型、优化器、嵌入大小、分配数量、设备、数据加载器、当前epoch、基本学习率、HSIC损失的超参数等作为输入参数。
def train_epoch_sparse_HSIC(model, optimizer,  embedding_size, assign_num, device, data_loader, epoch, lrbl, lambdap, lambda_decay_rate, lambda_decay_epoch, min_lambda_times, task_type):
    # 这几行初始化了一些变量，用于记录当前epoch的损失、准确率等信息，以及用于存储嵌入和权重的列表。
    epoch_loss = 0
    epoch_train_acc = 0
    nb_data = 0
    gpu_mem = 0
    embedding_memory = []
    weights_memory = []
    # 这一行开始迭代数据加载器，获取每个迭代中的图数据和标签。
    for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
        # 这几行将图数据转移到指定的设备，并提取图的节点特征和边特征，并将它们转换为所需的数据类型。
        batch_graphs = batch_graphs.to(device)
        batch_x = batch_graphs.ndata['feat'].float().to(device)  # num x feat
        batch_e = batch_graphs.edata['feat'].long().to(device)
        # 这一行将标签数据也移动到指定的设备。
        batch_labels = batch_labels.to(device)
        # 这两行将优化器的梯度归零，并将模型设置为训练模式。
        optimizer.zero_grad()
        model.train()
        # 这一行调用模型的前向传播方法，计算批次的预测分数和图的特征。
        batch_scores, cfeatures = model.forward(batch_graphs, batch_x, batch_e)
        # 这一行创建一个布尔掩码，用于标识具有标签的样本。
        is_labeled = batch_labels == batch_labels
        # 这一行计算每个样本是否都有标签，并创建一个布尔掩码。
        tmp = torch.sum(is_labeled, 1) == is_labeled.size(1)
        #batch_scores, cfeatures = model.forward(batch_graphs)
        # 这两行获取模型中预先计算的特征和权重。
        pre_features = model.pre_features
        pre_weights = model.pre_weights
        # 这一段逻辑用于确定是否是第一个epoch和第一个迭代。
        if epoch==0 and iter ==0:
            first=True
        else:
            first=False
        # 这一行调用了HSIC_weight函数，计算HSIC损失。
        HSIC_reg = HSIC_weight(cfeatures[tmp], pre_features, pre_weights, embedding_size, torch.sum(tmp), assign_num, first=first).cuda()
        #optimizer_weight = torch.optim.SGD(HSIC_reg.parameters(), lr=lrbl, momentum=0.9)
        # 这一行定义了用于优化HSIC损失的优化器。
        optimizer_weight = torch.optim.Adam(HSIC_reg.parameters(), lr=lrbl)

        #pre_weight = torch.zeros_like(batch_labels)
        deltas = []
        lossbs = []
        # 这几行使用优化器来更新HSIC损失函数的参数，从而调整嵌入的相关性。
        for it in range(50):
            #adjust_learning_rate_bl(optimizer_weight, epoch, lrbl)
            optimizer_weight.zero_grad()
            loss, lossb = HSIC_reg(lambdap, epoch, lambda_decay_rate, lambda_decay_epoch, min_lambda_times, first=first)
            loss.backward()
            optimizer_weight.step()

            weights = HSIC_reg.weights
            #weights = torch.reshape(weights, batch_labels.size())
            #pre_weight = weights
        # 这一行获取更新后的权重。
        weights = HSIC_reg.weights
        #print("weights", weights)
        #if epoch == 0 and iter < 10:
        #    pre_features = (pre_features * iter + cfeatures) / (iter + 1)
        #    pre_weights = (pre_weights * iter + weights) / (iter + 1)
        #else:
        #    pre_features = pre_features * 0.9 + cfeatures * 0.1
        #    pre_weights = pre_weights * 0.9 + weights * 0.1
        #model.pre_features.data.copy_(pre_features)
        #model.pre_weights.data.copy_(pre_weights)

        # 这几行使用加权损失来更新模型的参数。
        for it in range(1):
            #model.train()
            #batch_scores, embed = model.forward(batch_graphs, batch_x, batch_e)
            optimizer.zero_grad()
            weight = softmax(weights.detach().clone())
            if 'classification' in task_type:
                loss = cls_criterion(batch_scores.to(torch.float32)[tmp], batch_labels.to(torch.float32)[tmp])*weight.to(device)
            else:
                loss = reg_criterion(batch_scores.to(torch.float32)[is_labeled], batch_labels.to(torch.float32)[is_labeled]).view(weight.size(0), -1)*weight.to(device)
            #loss = criterion(batch_scores, batch_labels) * weight.to(device)
            loss = loss.sum()
            loss.backward()
            optimizer.step()
        # 这两行更新损失和样本计数。
        epoch_loss += loss.detach().item()
        nb_data += batch_labels.size(0)
    # 这一行计算每个样本的平均损失。
    epoch_loss /= (iter + 1)
    # 这一行初始化训练准确率。
    epoch_train_acc = 0
    # 这一行返回当前epoch的损失、训练准确率和优化器。
    return epoch_loss, epoch_train_acc, optimizer

# 生成保存的数据，用于模型训练。
def generate_saved_data(model, device, data_loader, epoch, max_num_nodes_train):
    model.eval()
    predictions = []
    with torch.no_grad():
        for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
            batch_labels = batch_labels            
            if iter == 0:
                graphs = batch_graphs
                labels = batch_labels
            else:
                print("graphs", graphs)
                print("batch_graphs", batch_graphs)
                graphs = torch.cat((graphs, batch_graphs), axis=0)
                labels = torch.cat((labels, batch_labels), axis=0)
    print("graphs", graphs.shape)
    print("labels", labels.shape)
    return graphs, labels

# 评估稀疏图网络的性能，返回测试集上的损失和准确率。
def evaluate_network_sparse(model, device, data_loader, epoch):
    model.eval()
    epoch_test_loss = 0
    epoch_test_acc = 0
    nb_data = 0
    predictions = []
    with torch.no_grad():
        for iter, (batch_graphs, batch_labels) in enumerate(data_loader):
            batch_graphs = batch_graphs.to(device)
            batch_x = batch_graphs.ndata['feat'].float().to(device)
            batch_e = batch_graphs.edata['feat'].to(device)
            batch_labels = batch_labels.to(device)           
            #batch_scores = model.forward(batch_graphs, batch_x, None)
            #batch_scores = model.forward(batch_graphs, batch_x, batch_snorm_n, batch_snorm_e)
            batch_scores = model.forward(batch_graphs, batch_x, batch_e)
            predictions += batch_scores.cpu().detach().numpy().tolist()
            loss = model.loss(batch_scores, batch_labels) 
            epoch_test_loss += loss.detach().item()
            epoch_test_acc += accuracy(batch_scores, batch_labels)
            nb_data += batch_labels.size(0)
        epoch_test_loss /= (iter + 1)
        epoch_test_acc /= nb_data
        
    return epoch_test_loss, epoch_test_acc




"""
    For WL-GNNs
"""
# 用于密集图的训练一个epoch，接收模型、优化器、设备、数据加载器、当前epoch和批量大小作为参数，并返回该epoch的损失、训练准确率和更新后的优化器。
def train_epoch_dense(model, optimizer, device, data_loader, epoch, batch_size):
    model.train()
    epoch_loss = 0
    epoch_train_acc = 0
    nb_data = 0
    gpu_mem = 0
    optimizer.zero_grad()
    for iter, (x_with_node_feat, labels) in enumerate(data_loader):
        x_with_node_feat = x_with_node_feat.to(device)
        labels = labels.to(device)
        
        scores = model.forward(x_with_node_feat)
        loss = model.loss(scores, labels) 
        loss.backward()
        
        if not (iter%batch_size):
            optimizer.step()
            optimizer.zero_grad()
            
        epoch_loss += loss.detach().item()
        epoch_train_acc += accuracy(scores, labels)
        nb_data += labels.size(0)
    epoch_loss /= (iter + 1)
    epoch_train_acc /= nb_data
    
    return epoch_loss, epoch_train_acc, optimizer

# 评估密集图网络的性能，返回测试集上的损失和准确率。
def evaluate_network_dense(model, device, data_loader, epoch):
    model.eval()
    epoch_test_loss = 0
    epoch_test_acc = 0
    nb_data = 0
    with torch.no_grad():
        for iter, (x_with_node_feat, labels) in enumerate(data_loader):
            x_with_node_feat = x_with_node_feat.to(device)
            labels = labels.to(device)
            
            scores = model.forward(x_with_node_feat)
            loss = model.loss(scores, labels) 
            epoch_test_loss += loss.detach().item()
            epoch_test_acc += accuracy(scores, labels)
            nb_data += labels.size(0)
        epoch_test_loss /= (iter + 1)
        epoch_test_acc /= nb_data
        
    return epoch_test_loss, epoch_test_acc
