import datetime
import os
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import warnings
from sklearn.metrics import r2_score
from torch.utils.tensorboard import SummaryWriter  # to save the process of the model
from tqdm import trange
from collections import OrderedDict
import logging
from .networks import SWNN
from .utils import OLS
from .diagnosis_opt import DIAGNOSIS_Opt
import gc
import uuid
from pathlib import Path
from gwlsa.datasets import LocalDistance, calc_max_neighbours
#from memory_profiler import profile
from utils.general_utils import timer

def compute_distance_weights(X_data, max_distance=60, resolution=30):
    points_arr = X_data.clone().numpy()
    dist = LocalDistance(points_arr, points_arr, max_distance, resolution)
    dist_tensor = torch.from_numpy(dist)
    return dist_tensor

class GNNWR:
    r"""
    GNNWR(Geographically neural network weighted regression) is a model to address spatial non-stationarity in various domains with complex geographical processes,
    which comes from the paper `Geographically neural network weighted regression for the accurate estimation of spatial non-stationarity <https://doi.org/10.1080/13658816.2019.1707834>`__.

    Parameters
    ----------
    train_dataset : baseDataset
        the dataset of training
    valid_dataset : baseDataset
        the dataset of validation
    test_dataset : baseDataset
        the dataset of testing
    dense_layers : list
        the dense layers of the model (default: ``None``)

        Default structure is a geometric sequence of power of 2, the minimum is 2, and the maximum is the power of 2 closest to the number of neurons in the input layer.
        
        i.e. ``[2,4,8,16,32,64,128,256]``
    start_lr : float
        the start learning rate of the model (default: ``0.1``)
    optimizer : str, optional
        the optimizer of the model (default: ``"Adagrad"``)
        choose from "SGD","Adam","RMSprop","Adagrad","Adadelta"
    drop_out : float
        the drop out rate of the model (default: ``0.2``)
    batch_norm : bool, optional
        whether use batch normalization (default: ``True``)
    activate_func : torch.nn
        the activate function of the model (default: ``nn.PReLU(init=0.4)``)
    model_name : str
        the name of the model (default: ``"GNNWR_" + datetime.datetime.today().strftime("%Y%m%d-%H%M%S")``)
    model_save_path : str
        the path of the model (default: ``"../gnnwr_models"``)
    write_path : str
        the path of the log (default: ``"../gnnwr_runs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")``)
    use_gpu : bool
        whether use gpu or not (default: ``True``)
    use_ols : bool
        whether use ols or not (default: ``True``)
    log_path : str
        the path of the log (default: ``"../gnnwr_logs/"``)
    log_file_name : str
        the name of the log (default: ``"gwlsa" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".log"``)
    log_level : int
        the level of the log (default: ``logging.INFO``)
    optimizer_params : dict, optional
        the params of the optimizer and the scheduler (default: ``None``)

        if optimizer is SGD, the params are:

            | maxlr: float, the max learning rate (default: ``0.1``)

            | minlr: float, the min learning rate (default: ``0.01``)

            | upepoch: int, the epoch of learning rate up (default: ``10000``)

            | decayepoch: int, the epoch of learning rate decay (default: ``20000``)

            | decayrate: float, the rate of learning rate decay (default: ``0.1``)

            | stop_change_epoch: int, the epoch of learning rate stop change (default: ``30000``)

            | stop_lr: float, the learning rate when stop change (default: ``0.001``)

        if optimizer is Other, the params are:

            | scheduler: str, the name of the scheduler (default: ``"CosineAnnealingWarmRestarts"``) in {``"MultiStepLR","CosineAnnealingLR","CosineAnnealingWarmRestarts"``}

            | scheduler_milestones: list, the milestones of the scheduler MultiStepLR (default: ``[500,1000,2000,4000]``)

            | scheduler_gamma: float, the gamma of the scheduler MultiStepLR (default: ``0.5``)

            | scheduler_T_max: int, the T_max of the scheduler CosineAnnealingLR (default: ``1000``)

            | scheduler_eta_min: float, the eta_min of the scheduler CosineAnnealingLR and CosineAnnealingWarmRestarts (default: ``0.01``)

            | scheduler_T_0: int, the T_0 of the scheduler CosineAnnealingWarmRestarts (default: ``100``)

            | scheduler_T_mult: int, the T_mult of the scheduler CosineAnnealingWarmRestarts (default: ``3``)

    F_test : bool
            如果为True，则会执行F检验，默认为False。

    show_train_metrics : bool
            如果为True，则会打印出训练数据集上的AIC，R2，AUC，F1，Recall等评估指标，默认为False。

    """

    def __init__(
            self,
            train_dataset,
            valid_dataset,
            test_dataset,
            dense_layers=None,
            start_lr: float = .1,
            optimizer="Adagrad",
            drop_out=0.2,
            batch_norm=True,
            activate_func=nn.PReLU(init=0.4),
            model_name="GNNWR_" + datetime.datetime.today().strftime("%Y%m%d-%H%M%S"),
            model_save_path="../gnnwr_models",
            write_path="../gnnwr_runs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S"),
            use_gpu: bool = True,
            use_ols: bool = True,
            log_path="../gnnwr_logs/",
            log_file_name="gwlsa" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".log",
            log_level=logging.INFO,
            optimizer_params=None,
            train_outputs="../gnnwr_tmp",
            F_test=False,
            show_train_metrics=False,
            spatial_columns=None,
            resolution=30,
            max_distance=90,
            id_column=None,
            batch_size=None
    ):
        self._train_dataset = train_dataset  # train dataset
        self._valid_dataset = valid_dataset  # valid dataset
        self._test_dataset = test_dataset  # test dataset
        self._dense_layers = dense_layers  # structure of layers
        self._start_lr = start_lr  # initial learning rate
        self._outsize = train_dataset.coefsize  # coefsize为特征数+1，w1x+w2x+...wmx+bias， 系数里面有bias，因此要加1
        max_neighbours = calc_max_neighbours(buffer_distance=max_distance, resolution=resolution)
        self._insize = max_neighbours * (train_dataset.coefsize + 1)  # 将距离权重和邻居点的所有特征X都输入至网络，由于在特征X后面加了距离权重列，因此多了一列，要在原来的基础上再加1
        # print('self._outsize', self._outsize)
        self._writer = SummaryWriter(write_path)  # summary writer
        self._drop_out = drop_out  # drop_out ratio
        self._batch_norm = batch_norm  # batch normalization
        self._activate_func = activate_func  # activate function , default: PRelu(0.4)
        self._model = SWNN(self._dense_layers, self._insize, self._outsize,
                           self._drop_out, self._activate_func, self._batch_norm)  # model
        self._log_path = log_path  # log path
        self._log_file_name = log_file_name  # log file
        self._log_level = log_level  # log level
        self.__istrained = False  # whether the model is trained
        self._train_outputs = train_outputs
        self._F_test = F_test
        self._show_train_metrics = show_train_metrics
        if spatial_columns is None:
            raise ValueError("spatial_columns is None!")
        self.spatial_columns = spatial_columns
        self.resolution = resolution
        self.max_distance = max_distance
        self.id_column = id_column

        # 这里的_coefficient实际上是论文中的β
        self._coefficient = OLS(
            train_dataset.dataframe, train_dataset.x_columns, train_dataset.y_column).params  # coefficients of OLS
        self._out = nn.Linear(
            self._outsize, 1, bias=False)  # layer to multiply OLS coefficients and model output
        if use_ols:
            # 如果使用OLS，那个初始的权重为OLS算出来的权重（wx+b中的w)
            self._out.weight = nn.Parameter(torch.tensor([self._coefficient]).to(
                torch.float32), requires_grad=False)  # define the weight
        else:
            # 如果不使用OLS，那个初始的权重为全部为1的矩阵
            self._coefficient = np.ones((1, self._outsize))
            self._out.weight = nn.Parameter(torch.tensor(np.ones((1, self._outsize))).to(
                torch.float32), requires_grad=False)  # define the weight
        self._criterion = nn.MSELoss()  # loss function
        self._trainLossList = []  # record the loss in training process
        self._validLossList = []  # record the loss in validation process
        self._epoch = 0  # current epoch
        self._bestr2 = float('-inf')  # best r2
        self._besttrainr2 = float('-inf')  # best train r2
        self._bestValAIC = float('inf') # best valid AIC
        self._bestValAUC = float('-inf')    # best valid AUC
        self._bestValF1 = float('-inf')     # best valid F1
        self._bestValRecall = float('-inf') # best valid Recall
        self._noUpdateEpoch = 0  # number of epochs without update
        self._modelName = Path(model_name)  # model name
        self._modelSavePath = model_save_path  # model save path
        self._train_diagnosis = None  # diagnosis of training
        self._test_diagnosis = None  # diagnosis of test
        self._val_diagnosis = None  # diagnosis of test
        self._valid_r2 = None  # r2 of validation
        self.result_data = None
        self._use_gpu = use_gpu
        if self._use_gpu:
            if torch.cuda.is_available():
                devices = [i for i in range(torch.cuda.device_count())]
                os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, devices))
            else:
                self._use_gpu = False
        self._optimizer = None
        self._scheduler = None
        self._optimizer_name = None

        # 将model和输出out移动到device设备上
        device = torch.device('cuda') if self._use_gpu else torch.device('cpu')
        self._model.to(device)
        self._out.to(device)  # 如果 self._out 也是一个网络模块

        self.init_optimizer(optimizer, optimizer_params)  # initialize the optimizer

    def init_optimizer(self, optimizer, optimizer_params=None):
        r"""
        initialize the optimizer

        Parameters
        ----------
        optimizer : str
            the optimizer of the model (default: ``"Adagrad"``)
            choose from "SGD","Adam","RMSprop","Adagrad","Adadelta"
        optimizer_params : dict, optional
            the params of the optimizer and the scheduler (default: ``None``)

            if optimizer is SGD, the params are:

                | maxlr: float, the max learning rate (default: ``0.1``)

                | minlr: float, the min learning rate (default: ``0.01``)

                | upepoch: int, the epoch of learning rate up (default: ``10000``)

                | decayepoch: int, the epoch of learning rate decay (default: ``20000``)

                | decayrate: float, the rate of learning rate decay (default: ``0.1``)

                | stop_change_epoch: int, the epoch of learning rate stop change (default: ``30000``)

                | stop_lr: float, the learning rate when stop change (default: ``0.001``)

            if optimizer is Other, the params are:

                | scheduler: str, the name of the scheduler (default: ``"CosineAnnealingWarmRestarts"``) in {``"MultiStepLR","CosineAnnealingLR","CosineAnnealingWarmRestarts"``}

                | scheduler_milestones: list, the milestones of the scheduler MultiStepLR (default: ``[500,1000,2000,4000]``)

                | scheduler_gamma: float, the gamma of the scheduler MultiStepLR (default: ``0.5``)

                | scheduler_T_max: int, the T_max of the scheduler CosineAnnealingLR (default: ``1000``)

                | scheduler_eta_min: float, the eta_min of the scheduler CosineAnnealingLR and CosineAnnealingWarmRestarts (default: ``0.01``)

                | scheduler_T_0: int, the T_0 of the scheduler CosineAnnealingWarmRestarts (default: ``100``)

                | scheduler_T_mult: int, the T_mult of the scheduler CosineAnnealingWarmRestarts (default: ``3``)
        """
        # initialize the optimizer
        if optimizer == "SGD":
            self._optimizer = optim.SGD(
                self._model.parameters(), lr=1, weight_decay=1e-3)
        elif optimizer == "Adam":
            self._optimizer = optim.Adam(
                self._model.parameters(), lr=self._start_lr, weight_decay=1e-3)
        elif optimizer == "RMSprop":
            self._optimizer = optim.RMSprop(
                self._model.parameters(), lr=self._start_lr)
        elif optimizer == "Adagrad":
            self._optimizer = optim.Adagrad(
                self._model.parameters(), lr=self._start_lr)
        elif optimizer == "Adadelta":
            self._optimizer = optim.Adadelta(
                self._model.parameters(), lr=self._start_lr, weight_decay=1e-3)
        else:
            raise ValueError("Invalid Optimizer")
        self._optimizer_name = optimizer  # optimizer name

        # lr scheduler
        if self._optimizer_name == "SGD":
            if optimizer_params is None:
                optimizer_params = {}
            maxlr = optimizer_params.get("maxlr", 0.1)
            minlr = optimizer_params.get("minlr", 0.01)
            upepoch = optimizer_params.get("upepoch", 10000)
            uprate = (maxlr - minlr) / upepoch * (upepoch // 10)
            decayepoch = optimizer_params.get("decayepoch", 20000)
            decayrate = optimizer_params.get("decayrate", 0.95)
            stop_change_epoch = optimizer_params.get("stop_change_epoch", 30000)
            stop_lr = optimizer_params.get("stop_lr", 0.001)
            lamda_lr = lambda epoch: (epoch // (upepoch // 10)) * uprate + minlr if epoch < upepoch else (
                maxlr if epoch < decayepoch else maxlr * (decayrate ** ((epoch - decayepoch)//10))) if epoch < stop_change_epoch else stop_lr
            self._scheduler = optim.lr_scheduler.LambdaLR(
                self._optimizer, lr_lambda=lamda_lr)
        else:
            if optimizer_params is None:
                optimizer_params = {}
            scheduler = optimizer_params.get("scheduler", "CosineAnnealingWarmRestarts")
            scheduler_milestones = optimizer_params.get(
                "scheduler_milestones", [100, 500, 1000, 2000])
            scheduler_gamma = optimizer_params.get("scheduler_gamma", 0.5)
            scheduler_T_max = optimizer_params.get("scheduler_T_max", 1000)
            scheduler_eta_min = optimizer_params.get("scheduler_eta_min", 0.01)
            scheduler_T_0 = optimizer_params.get("scheduler_T_0", 100)
            scheduler_T_mult = optimizer_params.get("scheduler_T_mult", 3)
            if scheduler == "MultiStepLR":
                self._scheduler = optim.lr_scheduler.MultiStepLR(
                    self._optimizer, milestones=scheduler_milestones, gamma=scheduler_gamma)
            elif scheduler == "CosineAnnealingLR":
                self._scheduler = optim.lr_scheduler.CosineAnnealingLR(
                    self._optimizer, T_max=scheduler_T_max, eta_min=scheduler_eta_min)
            elif scheduler == "CosineAnnealingWarmRestarts":
                self._scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(
                    self._optimizer, T_0=scheduler_T_0, T_mult=scheduler_T_mult, eta_min=scheduler_eta_min)
            else:
                raise ValueError("Invalid Scheduler")
    @timer
    def __train(self):
        """
        train the network - 高级优化版
        """
        print('\t开始训练。。。')
        self._model.train()
        train_loss = 0
        data_loader = self._train_dataset.dataloader

        device = torch.device('cuda') if self._use_gpu else torch.device('cpu')

        # 初始化混合精度训练的Gradient Scaler
        scaler = torch.cuda.amp.GradScaler(enabled=self._use_gpu)

        # 梯度累积步数
        accumulation_steps = 4  # 可根据显存情况调整

        # 预分配内存而不是动态扩展 - 减少内存碎片和拷贝操作
        batch_size = data_loader.batch_size
        num_batches = len(data_loader)
        total_samples = batch_size * num_batches

        # 只在需要计算训练指标时分配内存
        if self._show_train_metrics:
            weight_all = torch.zeros(total_samples, dtype=torch.float32, device=device)
            x_true = torch.zeros(total_samples, dtype=torch.float32, device=device)
            y_true = torch.zeros(total_samples, dtype=torch.float32, device=device)
            y_pred = torch.zeros(total_samples, dtype=torch.float32, device=device)
        else:
            weight_all, x_true, y_true, y_pred = None, None, None, None

        # 用于跟踪当前填充位置的指针
        current_idx = 0

        # 减少标量获取操作 - 使用张量累积损失
        running_loss = torch.tensor(0.0, device=device)

        for index, (distance_weights, neighbours_X, X, label, data_index) in enumerate(data_loader):
            # NaN检查代码 - 可以优化为批量检查
            has_nan = torch.any(torch.isnan(distance_weights)) or \
                      torch.any(torch.isnan(neighbours_X)) or \
                      torch.any(torch.isnan(X)) or \
                      torch.any(torch.isnan(label)) or \
                      torch.any(torch.isnan(data_index))
            if has_nan:
                print('数据中含有NaN值！')
                raise ValueError('数据中含有NaN值！')

            distance_weights = distance_weights.to(device, non_blocking=True)
            neighbours_X = neighbours_X.to(device, non_blocking=True)
            X = X.to(device, non_blocking=True)
            label = label.to(device, non_blocking=True)

            with torch.cuda.amp.autocast(enabled=self._use_gpu):
                if self._show_train_metrics:
                    current_batch_size = X.size(0)
                    end_idx = current_idx + current_batch_size
                    x_true[current_idx:end_idx] = X.squeeze()
                    y_true[current_idx:end_idx] = label.squeeze()
                    current_idx = end_idx

                distance_weights = distance_weights.unsqueeze(2)
                distance_and_neighbours = torch.cat((distance_weights, neighbours_X), 2)
                distance_and_neighbours = distance_and_neighbours.view(distance_and_neighbours.shape[0], -1)

                sptatial_weights = self._model(distance_and_neighbours)

                if self._show_train_metrics:
                    weight_all[current_idx - current_batch_size:current_idx] = sptatial_weights.squeeze()

                out1 = sptatial_weights.mul_(X)
                output = self._out(out1)

                if self._show_train_metrics:
                    y_pred[current_idx - current_batch_size:current_idx] = output.squeeze()

                loss = self._criterion(output, label)
                loss = loss / accumulation_steps

                running_loss += loss.detach()

            scaler.scale(loss).backward()

            if (index + 1) % accumulation_steps == 0:
                scaler.step(self._optimizer)
                scaler.update()
                self._optimizer.zero_grad()

                train_loss += running_loss.item() * accumulation_steps
                running_loss.zero_()

        # 处理可能剩余的梯度
        if (index + 1) % accumulation_steps != 0:
            scaler.step(self._optimizer)
            scaler.update()
            self._optimizer.zero_grad()
            train_loss += running_loss.item() * ((index + 1) % accumulation_steps)

        # 循环结束后清理
        gc.collect()
        if torch.cuda.is_available():
            torch.cuda.empty_cache()

        train_tmp_id = "{0}".format(uuid.uuid1())

        if self._show_train_metrics:
            self._train_diagnosis = DIAGNOSIS_Opt(weight_all, x_true, y_true, y_pred, train_tmp_id, self._train_outputs)
            self._train_diagnosis.set_trace_vectors(100)

        train_loss /= self._train_dataset.datasize
        self._trainLossList.append(train_loss)
    @timer
    def __valid(self):
        """
        validate the network - 优化版
        """
        print('\t开始验证。。。')
        self._model.eval()  # set the model to evaluation mode
        val_loss = 0
        # 初始化用于最终评估的列表，避免在循环中不断扩展
        all_outputs = []  # 用于收集所有输出张量
        all_labels = []  # 用于收集所有标签张量

        data_loader = self._valid_dataset.dataloader

        # 提前定义设备，避免在循环内重复定义[6,8](@ref)
        device = torch.device('cuda') if self._use_gpu else torch.device('cpu')

        # 直接在目标设备上创建张量，避免后续频繁的.to(device)调用[6,8](@ref)
        x_data = torch.tensor([], dtype=torch.float32, device=device)
        y_data = torch.tensor([], dtype=torch.float32, device=device)
        y_pred = torch.tensor([], dtype=torch.float32, device=device)
        weight_all = torch.tensor([], dtype=torch.float32, device=device)

        with torch.no_grad():
            for distance_weights, neighbours_X, X, label, data_index in data_loader:
                # 一次性将所有数据转移到设备，使用non_blocking可能加速（但需注意后续操作是否依赖数据就绪）[6](@ref)
                distance_weights = distance_weights.to(device, non_blocking=True)
                neighbours_X = neighbours_X.to(device, non_blocking=True)
                X = X.to(device, non_blocking=True)
                label = label.to(device, non_blocking=True)

                # 拼接操作（如果无法避免，至少设备已统一）
                x_data = torch.cat((x_data, X), 0)
                y_data = torch.cat((y_data, label), 0)

                distance_weights = distance_weights.unsqueeze(2)
                distance_and_neighbours = torch.cat((distance_weights, neighbours_X), 2)
                distance_and_neighbours = distance_and_neighbours.view(distance_and_neighbours.shape[0], -1)

                sptatial_weights = self._model(distance_and_neighbours)
                weight_all = torch.cat((weight_all, sptatial_weights),
                                       0)  # 移除不必要的类型转换(.to(torch.float32))和设备转移(.to(device))

                out1 = sptatial_weights.mul(X)  # 移除不必要的类型转换(.to(torch.float32))
                output = self._out(out1)
                y_pred = torch.cat((y_pred, output), 0)

                loss = self._criterion(output, label)

                # 🔥 关键优化：避免在每个batch都将数据从GPU移回CPU，收集张量最后统一转换[6,8](@ref)。
                # 这将减少GPU-CPU之间的同步和数据传输开销。
                all_outputs.append(output)
                all_labels.append(label)

                # 损失计算
                if isinstance(distance_weights, list):
                    val_loss += loss.item() * distance_weights[0].size(0)
                else:
                    val_loss += loss.item() * distance_weights.size(0)

        # 循环结束后，一次性将GPU上的张量移动到CPU并转换为numpy数组
        # 注意：此操作会同步GPU，但只发生一次，而不是每个batch一次[6](@ref)。
        out_list = torch.cat(all_outputs, dim=0).cpu().numpy().ravel()
        label_list = torch.cat(all_labels, dim=0).cpu().numpy().ravel()

        val_loss /= len(self._valid_dataset)
        self._validLossList.append(val_loss)

        val_tmp_id = "{0}".format(uuid.uuid1())
        # self._val_diagnosis = DIAGNOSIS(weight_all, x_data, y_data, y_pred, val_tmp_id, self._train_outputs)
        self._val_diagnosis = DIAGNOSIS_Opt(weight_all, x_data, y_data, y_pred, val_tmp_id, self._train_outputs)
        self._val_diagnosis.set_trace_vectors(100)

        f1_val = self._val_diagnosis.f1_val()
        recall_val = self._val_diagnosis.recall_val()
        auc_val = self._val_diagnosis.AUC()
        aic_val = self._val_diagnosis.AIC()

        if aic_val < self._bestValAIC:
            self._bestValAIC = aic_val
            if not os.path.exists(self._modelSavePath):
                os.mkdir(self._modelSavePath)
            torch.save(self._model, self._modelSavePath / self._modelName.with_suffix(".bestAIC.pkl"))
        if auc_val > self._bestValAUC:
            self._bestValAUC = auc_val
            if not os.path.exists(self._modelSavePath):
                os.mkdir(self._modelSavePath)
            # torch.save(self._model, self._modelSavePath / self._modelName.with_suffix(".bestAUC.pkl"))
        if f1_val > self._bestValF1:
            self._bestValF1 = f1_val
            if not os.path.exists(self._modelSavePath):
                os.mkdir(self._modelSavePath)
            # torch.save(self._model, self._modelSavePath / self._modelName.with_suffix(".bestF1.pkl"))
        if recall_val > self._bestValRecall:
            self._bestValRecall = recall_val
            if not os.path.exists(self._modelSavePath):
                os.mkdir(self._modelSavePath)
            # torch.save(self._model, self._modelSavePath / self._modelName.with_suffix(".bestRecall.pkl"))

        try:
            r2 = r2_score(label_list, out_list)  # calculate the R square
        except:
            print(label_list)
            print(out_list)
        self._valid_r2 = r2
        if r2 > self._bestr2:
            # if the R square is better than the best R square,record the R square and save the model
            self._bestr2 = r2
            if self._train_diagnosis is not None:
                self._besttrainr2 = self._train_diagnosis.R2().data
            self._noUpdateEpoch = 0
            if not os.path.exists(self._modelSavePath):
                os.mkdir(self._modelSavePath)
            # torch.save(self._model, self._modelSavePath / self._modelName.with_suffix(".bestR2.pkl"))
        else:
            self._noUpdateEpoch += 1
    @timer
    def __test(self):
        """
        test the network - 优化版
        """
        print('\t开始测试......')
        self._model.eval()
        test_loss = 0
        # 初始化用于收集输出的列表，避免在循环中不断进行GPU->CPU的传输
        all_outputs = []  # 用于在GPU上收集所有输出张量
        all_labels = []  # 用于在GPU上收集所有标签张量

        data_loader = self._test_dataset.dataloader

        # 提前定义设备，避免在循环内重复定义[2](@ref)
        device = torch.device('cuda') if self._use_gpu else torch.device('cpu')

        # 直接在目标设备上创建张量，避免后续频繁的.to(device)调用[2](@ref)
        x_data = torch.tensor([], dtype=torch.float32, device=device)
        y_data = torch.tensor([], dtype=torch.float32, device=device)
        y_pred = torch.tensor([], dtype=torch.float32, device=device)
        weight_all = torch.tensor([], dtype=torch.float32, device=device)

        with torch.no_grad():
            for distance_weights, neighbours_X, X, label, data_index in data_loader:
                # 一次性将所有数据转移到设备，使用non_blocking可能加速[2](@ref)
                distance_weights = distance_weights.to(device, non_blocking=True)
                neighbours_X = neighbours_X.to(device, non_blocking=True)
                X = X.to(device, non_blocking=True)
                label = label.to(device, non_blocking=True)

                # 拼接操作
                x_data = torch.cat((x_data, X), 0)
                y_data = torch.cat((y_data, label), 0)

                distance_weights = distance_weights.unsqueeze(2)
                distance_and_neighbours = torch.cat((distance_weights, neighbours_X), 2)
                distance_and_neighbours = distance_and_neighbours.view(distance_and_neighbours.shape[0], -1)

                sptatial_weights = self._model(distance_and_neighbours)
                weight_all = torch.cat((weight_all, sptatial_weights), 0)  # 移除不必要的类型转换(.to(torch.float32))

                out1 = sptatial_weights.mul(X)  # 移除不必要的类型转换(.to(torch.float32))
                output = self._out(out1)
                y_pred = torch.cat((y_pred, output), 0)

                loss = self._criterion(output, label)

                # 🔥 关键优化：避免在每个batch都将数据从GPU移回CPU，收集张量最后统一转换[2](@ref)
                all_outputs.append(output)
                all_labels.append(label)

                # 损失计算
                if isinstance(distance_weights, list):
                    test_loss += loss.item() * distance_weights[0].size(0)
                else:
                    test_loss += loss.item() * distance_weights.size(0)

        # 循环结束后，一次性将GPU上的张量移动到CPU并转换为numpy数组[2](@ref)
        out_list = torch.cat(all_outputs, dim=0).cpu().numpy().ravel()
        label_list = torch.cat(all_labels, dim=0).cpu().numpy().ravel()

        test_loss /= len(self._test_dataset)
        self.__testLoss = test_loss
        self.__testr2 = r2_score(label_list, out_list)
        test_tmp_id = "{0}".format(uuid.uuid1())
        # self._test_diagnosis = DIAGNOSIS(weight_all, x_data, y_data, y_pred, test_tmp_id, self._train_outputs)
        self._test_diagnosis = DIAGNOSIS_Opt(weight_all, x_data, y_data, y_pred, test_tmp_id, self._train_outputs)
        self._test_diagnosis.set_trace_vectors(100)
    @timer
    def run(self, max_epoch=1, early_stop=-1, print_frequency=50, show_detailed_info=True):
        """
        train the model and validate the model

        Parameters
        ----------
        max_epoch : int
            the max epoch of the training (default: ``1``)
        early_stop : int
            if the model has not been updated for ``early_stop`` epochs, the training will stop (default: ``-1``)

            if ``early_stop`` is ``-1``, the training will not stop until the max epoch
        print_frequency : int
            the frequency of printing the information (default: ``50``)

        show_detailed_info : bool
            if ``True``, the detailed information will be shown (default: ``True``)
        """
        print('开始run......')
        self.__istrained = True
        device = torch.device('cuda') if self._use_gpu else torch.device('cpu')
        if self._use_gpu:
            # self._model = nn.DataParallel(module=self._model)  # parallel computing
            self._model = self._model.to(device)
            self._out = self._out.to(device)
        # create file
        if not os.path.exists(self._log_path):
            os.mkdir(self._log_path)
        file_str = self._log_path / self._log_file_name

        # 每次循环前，移除所有已有的 handlers
        logger = logging.getLogger()
        for handler in logger.handlers[:]:
            logger.removeHandler(handler)
        # 配置日志处理器，每次循环创建一个新文件
        file_handler = logging.FileHandler(file_str)
        formatter = logging.Formatter('%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)
        # 设置日志级别
        logger.setLevel(logging.INFO)

        # logging.basicConfig(format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',
        #                     filename=file_str, level=logging.INFO)
        for epoch in trange(0, max_epoch):
            print(f'开始第{epoch+1}轮....')
            self._epoch = epoch
            # train the network
            # record the information of the training process
            self.__train()
            # validate the network
            # record the information of the validation process
            self.__valid()
            # out put log every {print_frequency} epoch:
            if (epoch + 1) % print_frequency == 0:
                if show_detailed_info:
                    print("\nEpoch: ", epoch + 1)
                    print("【learning rate】: ", self._optimizer.param_groups[0]['lr'])
                    print("Train Loss: ", self._trainLossList[-1])
                    if self._show_train_metrics:
                        print("Train R2: {:.3f}".format(self._train_diagnosis.R2().data))
                        print("Train RMSE: {:.3f}".format(self._train_diagnosis.RMSE().data))
                        print("Train AIC: {:.3f}".format(self._train_diagnosis.AIC()))
                        # print("Train AICc: {:.3f}".format(self._train_diagnosis.AICc()))
                        print("Train AUC: {:.3f}".format(self._train_diagnosis.AUC()))
                        print("Train F1: {:.3f}".format(self._train_diagnosis.f1_val()))
                        print("Train Recall: {:.3f}".format(self._train_diagnosis.recall_val()))
                    print("Valid Loss: ", self._validLossList[-1])
                    print("Valid R2: {:.3f}".format(self._valid_r2))
                    print("Valid AIC: {:.3f}".format(self._val_diagnosis.AIC()))
                    print("Valid AUC: {:.3f}".format(self._val_diagnosis.AUC()))
                    print("Valid F1: {:.3f}".format(self._val_diagnosis.f1_val()))
                    print("Valid Recall: {:.3f}".format(self._val_diagnosis.recall_val()), "\n")
                    print("Best R2: {:.3f}".format(self._bestr2))
                    print("Best valid AIC: {:.3f}".format(self._bestValAIC))
                    print("Best valid AUC: {:.3f}".format(self._bestValAUC))
                    print("Best valid F1: {:.3f}".format(self._bestValF1))
                    print("Best valid Recall: {:.3f}".format(self._bestValRecall), "\n")
                else:
                    print("\nEpoch: ", epoch + 1)
                    print(
                        "Train R2: {:.3f}  Valid R2: {:.3f}  Best R2: {:.3f}\n".format(self._train_diagnosis.R2().data,
                                                                                       self._valid_r2, self._bestr2))
            self._scheduler.step()  # update the learning rate
            # tensorboard
            self._writer.add_scalar('Training/Learning Rate', self._optimizer.param_groups[0]['lr'], self._epoch)
            self._writer.add_scalar('Training/Loss', self._trainLossList[-1], self._epoch)
            if self._show_train_metrics:
                self._writer.add_scalar('Training/R2', self._train_diagnosis.R2().data, self._epoch)
                self._writer.add_scalar('Training/RMSE', self._train_diagnosis.RMSE().data, self._epoch)
                self._writer.add_scalar('Training/AIC', self._train_diagnosis.AIC(), self._epoch)
                # self._writer.add_scalar('Training/AICc', self._train_diagnosis.AICc(), self._epoch)
                self._writer.add_scalar('Training/AUC', self._train_diagnosis.AUC(), self._epoch)
                self._writer.add_scalar('Training/F1', self._train_diagnosis.f1_val(), self._epoch)
                self._writer.add_scalar('Training/Recall', self._train_diagnosis.recall_val(), self._epoch)
            self._writer.add_scalar('Validation/Loss', self._validLossList[-1], self._epoch)
            self._writer.add_scalar('Validation/R2', self._valid_r2, self._epoch)
            self._writer.add_scalar('Validation/Best R2', self._bestr2, self._epoch)
            self._writer.add_scalar('Validation/Best valAIC', self._bestValAIC, self._epoch)
            self._writer.add_scalar('Validation/Best valAUC', self._bestValAUC, self._epoch)
            self._writer.add_scalar('Validation/Best valF1', self._bestValF1, self._epoch)
            self._writer.add_scalar('Validation/Best valRecall', self._bestValRecall, self._epoch)

            # log output
            log_str = "Epoch: " + str(epoch + 1) + \
                      "; Train Loss: " + str(self._trainLossList[-1])
            if self._show_train_metrics:
                log_str = log_str + "; Train R2: {:3f}".format(self._train_diagnosis.R2().data) + \
                      "; Train RMSE: {:3f}".format(self._train_diagnosis.RMSE().data) + \
                      "; Train AIC: {:3f}".format(self._train_diagnosis.AIC()) + \
                      "; Train AUC: {:3f}".format(self._train_diagnosis.AUC()) + \
                      "; Train F1: {:3f}".format(self._train_diagnosis.f1_val()) + \
                      "; Train Recall: {:3f}".format(self._train_diagnosis.recall_val())
            log_str = log_str + "; Valid Loss: " + str(self._validLossList[-1]) + \
                      "; Valid R2: " + str(self._valid_r2) + \
                      "; Valid AIC: {:3f}".format(self._val_diagnosis.AIC()) + \
                      "; Valid AUC: {:3f}".format(self._val_diagnosis.AUC()) + \
                      "; Valid F1: {:3f}".format(self._val_diagnosis.f1_val()) + \
                      "; Valid Recall: {:3f}".format(self._val_diagnosis.recall_val()) + \
                      "; Learning Rate: " + str(self._optimizer.param_groups[0]['lr'])
            logging.info(log_str)
            # 强制刷新缓冲区，确保所有日志被写入文件
            logging.shutdown()
            if 0 < early_stop < self._noUpdateEpoch:  # stop when the model has not been updated for long time
                print("Training stop! Model has not been improved for over {} epochs.".format(early_stop))
                break
        self.load_model(self._modelSavePath / self._modelName.with_suffix(".bestAIC.pkl"))
        self.result_data = self.getCoefs()
        print("Best_r2:", self._bestr2)
        print("Best_AIC:", self._bestValAIC)

    def predict(self, dataset):
        """
        predict the result of the dataset

        Parameters
        ----------
        dataset : baseDataset,predictDataset
            the dataset to be predicted
        
        Returns
        -------
        dataframe
            the Pandas dataframe of the dataset with the predicted result
        """
        data_loader = dataset.dataloader
        if not self.__istrained:
            print("WARNING! The model hasn't been trained or loaded!")
        self._model.eval()
        result = np.array([])
        with torch.no_grad():
            for distance_weights, X in data_loader:
                if self._use_gpu:
                    distance_weights, X = distance_weights.cuda(), X.cuda()
                output = self._out(self._model(distance_weights).mul(X.to(torch.float32)))
                output = output.view(-1).cpu().detach().numpy()
                result = np.append(result, output)
        dataset.dataframe['pred_result'] = result
        dataset.pred_result = result
        return dataset.dataframe

    def predict_coef(self, dataset):
        """
        predict the spatial coefficient of the independent variable

        Parameters
        ----------
        dataset : baseDataset,predictDataset
            the dataset to be predicted

        Returns
        -------
        dataframe
            the Pandas dataframe of the dataset with the predicted spatial coefficient
        """
        data_loader = dataset.dataloader
        if not self.__istrained:
            print("WARNING! The model hasn't been trained or loaded!")
        self._model.eval()
        result = torch.tensor([]).to(torch.float32)
        with torch.no_grad():
            for data, coef in data_loader:
                if self._use_gpu:
                    result, data, coef = result.cuda(), data.cuda(), coef.cuda()
                    ols_w = torch.tensor(self._coefficient).to(torch.float32).cuda()
                else:
                    ols_w = torch.tensor(self._coefficient).to(torch.float32)
                coefficient = self._model(data).mul(ols_w)
                result = torch.cat((result, coefficient), 0)
        result = result.cpu().detach().numpy()
        return result

    def load_model(self, path, use_dict=False, map_location=None):
        """
        load the model from the path

        Parameters
        ----------
        path : str
            the path of the model
        use_dict : bool
            whether the function use dict to load the model (default: ``False``)
        map_location : str
            the location of the model (default: ``None``)
            the location can be ``"cpu"`` or ``"cuda"``
        """
        if use_dict:
            data = torch.load(path, map_location=map_location, weights_only=False)
            self._model.load_state_dict(data)
        else:
            self._model = torch.load(path, map_location=map_location, weights_only=False)
        if self._use_gpu:
            self._model = self._model.cuda()
            self._out = self._out.cuda()
        else:
            self._model = self._model.cpu()
            self._out = self._out.cpu()
        self._modelSavePath = os.path.dirname(path)
        self._modelName = os.path.basename(path).split('/')[-1].split('.')[0]
        self.__istrained = True
        self.result_data = self.getCoefs()


    def gpumodel_to_cpu(self, path, save_path, use_model=True):
        """
        convert gpu model to cpu model

        Parameters
        ----------
        path : str
            the path of the model
        save_path : str
            the path of the new model
        use_model : bool
            whether use dict to load the model (default: ``True``)
        """
        if use_model:
            data = torch.load(path, map_location='cpu', weights_only=False).state_dict()
        else:
            data = torch.load(path, map_location='cpu', weights_only=False)
        new_state_dict = OrderedDict()
        for k, v in data.items():
            name = k[7:]  # remove module.
            new_state_dict[name] = v
        torch.save(new_state_dict, save_path)

    def getLoss(self):
        """
        get network's loss

        Returns
        -------
        list
            the list of the loss in training process and validation process
        """
        return self._trainLossList, self._validLossList

    def add_graph(self):
        """
        add the graph of the model to tensorboard
        """
        for data, coef, label, data_index in self._train_dataset.dataloader:
            if self._use_gpu:
                data = data.cuda()
                self._model = self._model.cuda()
            else:
                self._model = self._model.cpu()
                data = data.cpu()
            self._writer.add_graph(self._model, data)
            break
        print("Add Graph Successfully")

    def result(self, path=None, use_dict=False, map_location=None, print_OLS_coefficients=False, postfix='.bestAIC.pkl'):
        """
        print the result of the model, including the model name, regression fomula and the result of test dataset

        Parameters
        ----------
        path : str
            the path of the model(default: ``None``)
            | if ``path`` is ``None``, the model will be loaded from ``self._modelSavePath + "/" + self._modelName + ".bestAIC.pkl"``
        use_dict : bool
            whether the function use dict to load the model (default: ``False``)
            | if ``use_dict`` is ``True``, the model will be loaded from ``path`` as dict
        map_location : str
            the location of the model (default: ``None``)
            the location can be ``"cpu"`` or ``"cuda"``
        """
        # load model
        if not self.__istrained:
            raise Exception("The model hasn't been trained or loaded!")
        if path is None:
            path = self._modelSavePath + "/" + self._modelName + postfix
        if use_dict:
            data = torch.load(path, map_location=map_location, weights_only=False)
            self._model.load_state_dict(data)
        else:
            self._model = torch.load(path, map_location=map_location, weights_only=False)
        if self._use_gpu:
            self._model = nn.DataParallel(module=self._model)  # parallel computing
            self._model = self._model.cuda()
            self._out = self._out.cuda()
        else:
            self._model = self._model.cpu()
            self._out = self._out.cpu()
        with torch.no_grad():
            self.__test()
        # logging.info("Test Loss: " + str(self.__testLoss) + "; Test R2: " + str(self.__testr2))

        # print result
        # basic information
        result_lst = []
        result_lst.append("--------------------Model Information-----------------")
        result_lst.append("Model Name:           |" + self._modelName)
        result_lst.append("independent variable: |" + str(self._train_dataset.x_columns))
        result_lst.append("dependent variable:   |" + str(self._train_dataset.y_column))
        # OLS
        if print_OLS_coefficients:
            result_lst.append("\nOLS coefficients: ")
            for i in range(len(self._coefficient)):
                if i == len(self._coefficient) - 1:
                    result_lst.append("Intercept: {:.3f}".format(self._coefficient[i]))
                else:
                    result_lst.append("x{}: {:.3f}".format(i, self._coefficient[i]))
        result_lst.append("\n--------------------Result Information----------------")
        result_lst.append("Valid R2 : | {:>25.3f}".format(self._bestr2))
        result_lst.append("Test Loss: | {:>25.3f}".format(self.__testLoss))
        result_lst.append("Test R2  : | {:>25.3f}".format(self.__testr2))
        if self._besttrainr2 is not None and self._besttrainr2 != float('-inf'):
            result_lst.append("Train R2 : | {:>25.3f}".format(self._besttrainr2))
        result_lst.append("Test RMSE: | {:>25.3f}".format(self._test_diagnosis.RMSE().data))
        # result_lst.append("Test AIC:  | {:>25.3f}".format(self._test_diagnosis.AIC()))
        # result_lst.append("Test AICc: | {:>25.3f}".format(self._test_diagnosis.AICc()))
        result_lst.append("Test AUC:  | {:>25.3f}".format(self._test_diagnosis.AUC()))
        result_lst.append("Test F1:   | {:>25.3f}".format(self._test_diagnosis.f1_val()))
        result_lst.append("Test Recall: | {:>23.3f}".format(self._test_diagnosis.recall_val()))
        if self._F_test:
            result_lst.append("Test F1:   | {:>30.3f}".format(self._test_diagnosis.F1_Global().data))
            result_lst.append("Test F2:   | {:>30.3f}".format(self._test_diagnosis.F2_Global().flatten()[0].data))
            F3_Local_dict = self._test_diagnosis.F3_Local()[0]
            for key in F3_Local_dict:
                width = 30-(len(key) - 4)
                result_lst.append("{}: | {:>{width}.3f}".format(key, F3_Local_dict[key].data, width=width))
        result_str = '\n'.join(result_lst)
        print(result_str)
        with open(self._modelSavePath + '/' + '最终结果.txt', 'w') as file:
            file.write(result_str)

    def reg_result(self, filename=None, model_path=None, use_dict=False, only_return=False, map_location=None, postfix='.bestAIC.pkl'):
        """
        save the regression result of the model, including the coefficient of each argument, the bias and the predicted result

        Parameters
        ----------
        filename : str
            the path of the result file (default: ``None``)
            | if ``filename`` is ``None``, the result will not be saved as file
        model_path : str
            the path of the model (default: ``None``)
            | if ``model_path`` is ``None``, the model will be loaded from ``self._modelSavePath + "/" + self._modelName + ".pkl"``
        use_dict : bool
            whether use dict to load the model (default: ``False``)
            | if ``use_dict`` is ``True``, the model will be loaded from ``model_path`` as dict
        only_return : bool
            whether only return the result (default: ``False``)
            | if ``only_return`` is ``True``, the result will not be saved as file
        map_location : str
            the location of the model (default: ``None``)
            the location can be ``"cpu"`` or ``"cuda"``

        Returns
        -------
        dataframe
            the Pandas dataframe of the result
        """
        if model_path is None:
            model_path = self._modelSavePath + "/" + self._modelName + postfix
        if use_dict:
            distance_weights = torch.load(model_path, map_location=map_location, weights_only=False)
            self._model.load_state_dict(distance_weights)
        else:
            self._model = torch.load(model_path, map_location=map_location, weights_only=False)

        if self._use_gpu:
            self._model = nn.DataParallel(module=self._model)
            self._model = self._model.cuda()
            self._out = self._out.cuda()
        else:
            self._model = self._model.cpu()
            self._out = self._out.cpu()
        device = torch.device('cuda') if self._use_gpu else torch.device('cpu')
        result = torch.tensor([]).to(torch.float32).to(device)
        with (torch.no_grad()):
            for distance_weights, neighbours_X, X, label, data_index in self._train_dataset.dataloader:
                # print('data_index.shape', data_index.shape) # torch.Size([256])
                distance_weights, neighbours_X, X, label, data_index = distance_weights.to(device), neighbours_X.to(device), X.to(device), label.to(device), data_index.to(device)

                # 给距离权重矩阵最后添加一个维度
                distance_weights = distance_weights.unsqueeze(2)  # distance_weights.shape: torch.Size([256, 9, 1]),
                # 将X和distance_weights按照最后一个维度拼接起来
                distance_and_neighbours = torch.cat((distance_weights, neighbours_X), 2)
                distance_and_neighbours = distance_and_neighbours.view(distance_and_neighbours.shape[0], -1)

                sptatial_weights = self._model(distance_and_neighbours)
                out1 = sptatial_weights.mul(X.to(torch.float32))
                output = self._out(out1)

                coefficient = sptatial_weights.mul(torch.tensor(self._coefficient).to(torch.float32).to(device))
                data_index = data_index.unsqueeze(1)
                # print('coefficient.shape', coefficient.shape) # torch.Size([256, 23])
                # print('output.shape', output.shape) # torch.Size([256, 1])
                # print('unsqueeze data_index.shape', data_index.shape) # torch.Size([256, 1])

                output = torch.cat((coefficient, output, data_index), dim=1)
                result = torch.cat((result, output), 0)
                # print('result.shape', result.shape)

            for distance_weights, neighbours_X, X, label, data_index in self._valid_dataset.dataloader:
                distance_weights, neighbours_X, X, label, data_index = distance_weights.to(device), neighbours_X.to(device), X.to(device), label.to(device), data_index.to(device)

                # 给距离权重矩阵最后添加一个维度
                distance_weights = distance_weights.unsqueeze(2)  # distance_weights.shape: torch.Size([256, 9, 1]),
                # 将X和distance_weights按照最后一个维度拼接起来
                distance_and_neighbours = torch.cat((distance_weights, neighbours_X), 2)
                distance_and_neighbours = distance_and_neighbours.view(distance_and_neighbours.shape[0], -1)

                sptatial_weights = self._model(distance_and_neighbours)
                out1 = sptatial_weights.mul(X.to(torch.float32))
                output = self._out(out1)


                coefficient = sptatial_weights.mul(torch.tensor(self._coefficient).to(torch.float32).to(device))
                data_index = data_index.unsqueeze(1)
                output = torch.cat((coefficient, output, data_index), dim=1)
                result = torch.cat((result, output), 0)

            for distance_weights, neighbours_X, X, label, data_index in self._test_dataset.dataloader:
                distance_weights, neighbours_X, X, label, data_index = distance_weights.to(device), neighbours_X.to(device), X.to(device), label.to(device), data_index.to(device)

                # 给距离权重矩阵最后添加一个维度
                distance_weights = distance_weights.unsqueeze(2)  # distance_weights.shape: torch.Size([256, 9, 1]),
                # 将X和distance_weights按照最后一个维度拼接起来
                distance_and_neighbours = torch.cat((distance_weights, neighbours_X), 2)
                distance_and_neighbours = distance_and_neighbours.view(distance_and_neighbours.shape[0], -1)

                sptatial_weights = self._model(distance_and_neighbours)
                out1 = sptatial_weights.mul(X.to(torch.float32))
                output = self._out(out1)

                coefficient = sptatial_weights.mul(torch.tensor(self._coefficient).to(torch.float32).to(device))
                data_index = data_index.unsqueeze(1)
                output = torch.cat((coefficient, output, data_index), dim=1)
                result = torch.cat((result, output), 0)

        result = result.cpu().detach().numpy()
        columns = list(self._train_dataset.x_columns)
        for i in range(len(columns)):
            columns[i] = "coef_" + columns[i]
        columns.append("bias")
        y_col_name = self._train_dataset.y_column[0] if type(self._train_dataset.y_column)==list else self._train_dataset.y_column
        y_pred_col_name = "Pred_" + y_col_name
        columns.append(y_pred_col_name)
        columns.append(self._train_dataset.id)
        result = pd.DataFrame(result, columns=columns)
        result[self._train_dataset.id] = result[self._train_dataset.id].astype(np.int32)
        result[y_pred_col_name] = result[y_pred_col_name].astype(np.float32)
        if only_return:
            return result
        if filename is not None:
            result.to_csv(filename, index=False)
        else:
            warnings.warn(
                "Warning! The input write file path is not set. Result is returned by function but not saved as file.",
                RuntimeWarning)
        return result

    def getCoefs(self):
        """
        get the Coefficients of each argument in dataset

        Returns
        -------
        dataframe
            the Pandas dataframe of the coefficient of each argument in dataset
        """
        result_data = self.reg_result(only_return=True)
        id_name = self.id_column
        result_data[id_name] = result_data[id_name].astype(np.int64)
        data = pd.concat([self._train_dataset.dataframe, self._valid_dataset.dataframe, self._test_dataset.dataframe])
        data.set_index(id_name, inplace=True)
        result_data.set_index(id_name, inplace=True)
        result_data = result_data.join(data)
        return result_data
    def __str__(self) -> str:
        print("Model Name: ", self._modelName)
        print("Model Structure: ", self._model)
        return ""
    def __repr__(self) -> str:
        print("Model Name: ", self._modelName)
        print("Model Structure: ", self._model)
        return ""

