import configparser
import re

import numpy as np
import torch as th
from .utils.activation import act_dict
import warnings


class Config(object):
    def __init__(self, file_path, model, dataset, task, gpu):
        conf = configparser.ConfigParser( )
        try:
            conf.read(file_path)
        except:
            print("failed!")
        # training dataset path
        self.seed = 0
        self.patience = 1
        self.max_epoch = 1
        self.task = task
        self.model = model
        self.dataset = dataset
        if isinstance(dataset, str):
            self.dataset_name = dataset
        else:
            self.dataset_name = self.dataset.name
        if isinstance(model, str):
            self.model_name = model
        else:
            self.model_name = type(self.model).__name__
        self.optimizer = "Adam"
        # custom model
        if isinstance(model, th.nn.Module):
            self.lr = conf.getfloat("General", "learning_rate")
            self.dropout = conf.getfloat("General", "dropout")
            self.max_epoch = conf.getint("General", "max_epoch")
            self.weight_decay = conf.getfloat("General", "weight_decay")
            self.hidden_dim = conf.getint("General", "hidden_dim")
            self.seed = conf.getint("General", "seed")
            self.patience = conf.getint("General", "patience")
            self.mini_batch_flag = conf.getboolean("General", "mini_batch_flag")
##############      add config.py    #################
        elif self.model_name == 'HGA':
            self.lr = conf.getfloat("HGA", "lr")
            self.weight_decay = conf.getfloat("HGA", "weight_decay")
            self.seed = conf.getint("HGA", "seed")
            self.dropout = conf.getfloat("HGA", "dropout")
            self.hidden_dim = conf.getint('HGA', 'hidden_dim')
            self.out_dim = conf.getint('HGA', 'out_dim')
            self.num_heads = conf.getint('HGA', 'num_heads')
            self.patience = conf.getint('HGA', 'patience')
            self.max_epoch = conf.getint('HGA', 'max_epoch')
            self.mini_batch_flag = conf.getboolean("HGA", "mini_batch_flag")
            self.gamma=conf.getfloat("HGA", "gamma")
            self.batch_size=conf.getint("HGA", "batch_size")
            self.num_layers = conf.getint('HGA','num_layers')

        elif self.model_name == "RHINE":
            self.emb_dim=conf.getint("RHINE", "emb_dim")
            self.mini_batch_flag = conf.getboolean("RHINE", "mini_batch_flag")
            self.lr = conf.getfloat("RHINE", "lr")
            self.weight_decay = conf.getfloat("RHINE", "weight_decay")
            self.hidden_dim=conf.getint("RHINE", "hid_dim")
            self.batch_size=conf.getint("RHINE", "batch_size")
            self.max_epoch=conf.getint("RHINE", "max_epoch")

        elif self.model_name == "HGMAE":
            self.dataset = conf.get("HGMAE", "dataset")
            self.in_dim = conf.getint("HGMAE", "in_dim")
            self.hidden_dim = conf.getint("HGMAE", "hidden_dim")
            self.category = conf.get("HGMAE", "category")
            self.feat_drop = conf.getfloat("HGMAE", "feat_drop")
            self.attn_drop = conf.getfloat("HGMAE", "attn_drop")
            self.residual = conf.getboolean("HGMAE", "residual")
            self.negative_slope = conf.getfloat("HGMAE", "negative_slope")
            self.num_classes = conf.getint("HGMAE", "num_classes")
            self.num_heads = conf.getint("HGMAE", "num_heads")
            self.num_layers = conf.getint("HGMAE", "num_layers")
            self.num_out_heads = conf.getint("HGMAE", "num_out_heads")

            self.mp_edge_recon_loss_weight = conf.getfloat("HGMAE", "mp_edge_recon_loss_weight")
            self.mp_edge_mask_rate = conf.getfloat("HGMAE", "mp_edge_mask_rate")
            self.mp_edge_gamma = conf.getfloat("HGMAE", "mp_edge_gamma")
            self.node_mask_rate = conf.get("HGMAE", "node_mask_rate")

            self.attr_restore_loss_weight = conf.getfloat("HGMAE", "attr_restore_loss_weight")
            self.attr_restore_gamma = conf.getfloat("HGMAE", "attr_restore_gamma")
            self.attr_replace_rate = conf.getfloat("HGMAE", "attr_replace_rate")
            self.attr_unchanged_rate = conf.getfloat("HGMAE", "attr_unchanged_rate")

            self.mp2vec_negative_size = conf.getint("HGMAE", "mp2vec_negative_size")
            self.mp2vec_window_size = conf.getint("HGMAE", "mp2vec_window_size")
            self.mp2vec_batch_size = conf.getint("HGMAE", "mp2vec_batch_size")
            self.mp2vec_rw_length = conf.getint("HGMAE", "mp2vec_rw_length")
            self.mp2vec_walks_per_node = conf.getint("HGMAE", "mp2vec_walks_per_node")
            self.mp2vec_train_epoch = conf.getint("HGMAE", "mp2vec_train_epoch")
            self.mp2vec_train_lr = conf.getfloat("HGMAE", "mp2vec_train_lr")
            self.mp2vec_feat_dim = conf.getint("HGMAE", "mp2vec_feat_dim")
            self.mp2vec_feat_pred_loss_weight = conf.getfloat("HGMAE", "mp2vec_feat_pred_loss_weight")
            self.mp2vec_feat_gamma = conf.getfloat("HGMAE", "mp2vec_feat_gamma")
            self.mp2vec_feat_drop = conf.getfloat("HGMAE", "mp2vec_feat_drop")

            self.patience = conf.getint("HGMAE", "patience")
            self.gpu = conf.getint("HGMAE", "gpu")
            self.mae_epochs = conf.getint("HGMAE", "mae_epochs")
            self.mae_lr = conf.getfloat("HGMAE", "mae_lr")
            self.l2_coef = conf.getint("HGMAE", "l2_coef")
            self.eva_lr = conf.getfloat("HGMAE", "eva_lr")
            self.eva_wd = conf.getfloat("HGMAE", "eva_wd")
            self.scheduler_gamma = conf.getfloat("HGMAE", "scheduler_gamma")



        elif self.model_name == "HGPrompt":
            self.feats_type = conf.getint("HGPrompt", "feats-type")
            self.hidden_dim = conf.getint("HGPrompt", "hidden-dim")
            self.num_heads = conf.getint("HGPrompt", "num-heads")
            self.epoch = conf.getint("HGPrompt", "epoch")
            self.patience = conf.getint("HGPrompt", "patience")
            self.repeat = conf.getint("HGPrompt", "repeat")
            self.model_type = conf.get("HGPrompt", "model-type")
            self.num_layers = conf.getint("HGPrompt", "num-layers")
            self.lr = conf.getfloat("HGPrompt", "lr")
            self.run = conf.getint("HGPrompt", "run")
            self.device = conf.getint("HGPrompt", "device")
            self.dropout = conf.getfloat("HGPrompt", "dropout")
            self.weight_decay = conf.getfloat("HGPrompt", "weight-decay")
            self.slope = conf.getfloat("HGPrompt", "slope")
            self._dataset = conf.get("HGPrompt", "_dataset")    #   ACM,DBLP
            self.seed = conf.getint("HGPrompt", "seed")
            self.tuple_neg_disconnected_num = conf.getint("HGPrompt", "tuple_neg_disconnected_num")
            self.tuple_neg_unrelated_num = conf.getint("HGPrompt", "tuple_neg_unrelated_num")
            self.target_tuple_neg_disconnected_num = conf.getint("HGPrompt", "target_tuple_neg_disconnected_num")
            self.subgraph_hop_num = conf.getint("HGPrompt", "subgraph_hop_num")
            self.subgraph_neighbor_num_bar = conf.getint("HGPrompt", "subgraph_neighbor_num_bar")
            self.temperature = conf.getfloat("HGPrompt", "temperature")
            self.loss_weight = conf.getfloat("HGPrompt", "loss_weight")
            self.hetero_pretrain = conf.getint("HGPrompt", "hetero_pretrain")
            self.target_pretrain = conf.getint("HGPrompt", "target_pretrain")
            self.hetero_subgraph = conf.getint("HGPrompt", "hetero_subgraph")
            self.semantic_weight = conf.getint("HGPrompt", "semantic_weight")
            self.each_loss = conf.getint("HGPrompt", "each_loss")
            self.freebase_type = conf.getint("HGPrompt", "freebase_type")
            self.edge_feats = conf.getint("HGPrompt", "edge_feats")

            ###     downstream
            self.feats_type_down = conf.getint("HGPrompt", "feats_type_down")
            self.hidden_dim_down = conf.getint("HGPrompt", "hidden_dim_down")
            self.bottle_net_hidden_dim_down = conf.getint("HGPrompt", "bottle_net_hidden_dim_down")
            self.bottle_net_output_dim_down = conf.getint("HGPrompt", "bottle_net_output_dim_down")
            self.edge_feats_down = conf.getint("HGPrompt", "edge_feats_down")
            self.num_heads_down = conf.getint("HGPrompt", "num_heads_down")
            self.epoch_down = conf.getint("HGPrompt", "epoch_down")
            self.patience_down = conf.getint("HGPrompt", "patience_down")
            self.repeat_down = conf.getint("HGPrompt", "repeat_down")
            self.model_type_down = conf.get("HGPrompt", "model_type_down")
            self.num_layers_down = conf.getint("HGPrompt", "num_layers_down")
            self.lr_down = conf.getfloat("HGPrompt", "lr_down")
            self.run_down = conf.getint("HGPrompt", "run_down")
            self.device_down = conf.getint("HGPrompt", "device_down")
            self.dropout_down = conf.getfloat("HGPrompt", "dropout_down")
            self.weight_decay_down = conf.getfloat("HGPrompt", "weight_decay_down")
            self.slope_down = conf.getfloat("HGPrompt", "slope_down")
            self.dataset_down = conf.get("HGPrompt", "dataset_down")
            self.seed_down = conf.getint("HGPrompt", "seed_down")
            self.tasknum_down = conf.getint("HGPrompt", "tasknum_down")
            self.shotnum_down = conf.getint("HGPrompt", "shotnum_down")
            self.load_pretrain_down = conf.getint("HGPrompt", "load_pretrain_down")
            self.tuning_down = conf.get("HGPrompt", "tuning_down")
            self.subgraph_hop_num_down = conf.getint("HGPrompt", "subgraph_hop_num_down")
            self.pre_loss_weight_down = conf.getfloat("HGPrompt", "pre_loss_weight_down")
            self.hetero_pretrain_down = conf.getint("HGPrompt", "hetero_pretrain_down")
            self.hetero_pretrain_subgraph_down = conf.getint("HGPrompt", "hetero_pretrain_subgraph_down")
            self.pretrain_semantic_down = conf.getint("HGPrompt", "pretrain_semantic_down")
            self.add_edge_info2prompt_down = conf.getint("HGPrompt", "add_edge_info2prompt_down")
            self.each_type_subgraph_down = conf.getint("HGPrompt", "each_type_subgraph_down")
            self.pretrain_each_loss_down = conf.getint("HGPrompt", "pretrain_each_loss_down")
            self.cat_prompt_dim_down = conf.getint("HGPrompt", "cat_prompt_dim_down")
            self.cat_hprompt_dim_down = conf.getint("HGPrompt", "cat_hprompt_dim_down")
            self.tuple_neg_disconnected_num_down = conf.getint("HGPrompt", "tuple_neg_disconnected_num_down")
            self.tuple_neg_unrelated_num_down = conf.getint("HGPrompt", "tuple_neg_unrelated_num_down")
            self.meta_path_down = conf.getint("HGPrompt", "meta_path_down")
            self.semantic_prompt_down = conf.getint("HGPrompt", "semantic_prompt_down")
            self.freebase_type_down = conf.getint("HGPrompt", "freebase_type_down")
            self.semantic_prompt_weight_down = conf.getfloat("HGPrompt", "semantic_prompt_weight_down")
            self.shgn_hidden_dim_down = conf.getint("HGPrompt", "shgn_hidden_dim_down")


##########################################

        elif self.model_name == "DisenKGAT":

            self.name = conf.get("DisenKGAT","name")
            self.score_func = conf.get("DisenKGAT","score_func")
            self.opn = conf.get("DisenKGAT","opn")
            self.logdir = conf.get("DisenKGAT","logdir")
            self.config = conf.get("DisenKGAT","config")
            self.strategy = conf.get("DisenKGAT","strategy")
            self.form = conf.get("DisenKGAT","form")
            self.mi_method = conf.get("DisenKGAT","mi_method")
            self.att_mode = conf.get("DisenKGAT","att_mode")
            self.score_method = conf.get("DisenKGAT","score_method")
            self.score_order = conf.get("DisenKGAT","score_order")
            self.gamma_method = conf.get("DisenKGAT","gamma_method")

            self.k_w= conf.getint("DisenKGAT", "k_w") 
            self.batch = conf.getint("DisenKGAT", "batch")
            self.test_batch = conf.getint("DisenKGAT", "test_batch")
            self.epoch = conf.getint("DisenKGAT", "epoch")
            self.num_workers = conf.getint("DisenKGAT", "num_workers")
            self.seed = conf.getint("DisenKGAT", "seed")
            self.init_dim = conf.getint("DisenKGAT", "init_dim")
            self.gcn_dim = conf.getint("DisenKGAT", "gcn_dim")
            self.embed_dim = conf.getint("DisenKGAT", "embed_dim")
            self.gcn_layer = conf.getint("DisenKGAT", "gcn_layer")
            self.k_h = conf.getint("DisenKGAT", "k_h")
            self.num_filt = conf.getint("DisenKGAT", "num_filt")
            self.ker_sz = conf.getint("DisenKGAT", "ker_sz")
            self.num_bases = conf.getint("DisenKGAT", "num_bases")
            self.neg_num = conf.getint("DisenKGAT", "neg_num")
            self.ik_w = conf.getint("DisenKGAT", "ik_w")
            self.ik_h = conf.getint("DisenKGAT", "ik_h")
            self.inum_filt = conf.getint("DisenKGAT", "inum_filt")
            self.iker_sz = conf.getint("DisenKGAT", "iker_sz")
            self.iperm = conf.getint("DisenKGAT", "iperm")
            self.head_num = conf.getint("DisenKGAT", "head_num")
            self.num_factors = conf.getint("DisenKGAT", "num_factors")
            self.early_stop = conf.getint("DisenKGAT", "early_stop")
            self.mi_epoch = conf.getint("DisenKGAT", "mi_epoch")
            self.feat_drop = conf.getfloat("DisenKGAT", "feat_drop")
            self.hid_drop2 = conf.getfloat("DisenKGAT", "hid_drop2")
            self.hid_drop = conf.getfloat("DisenKGAT", "hid_drop")
            self.gcn_drop = conf.getfloat("DisenKGAT", "gcn_drop")
            self.gamma = conf.getfloat("DisenKGAT", "gamma")
            self.l2 = conf.getfloat("DisenKGAT", "l2")
            self.lr = conf.getfloat("DisenKGAT", "lr")
            self.lbl_smooth = conf.getfloat("DisenKGAT", "lbl_smooth")
            self.iinp_drop = conf.getfloat("DisenKGAT", "iinp_drop")
            self.ifeat_drop = conf.getfloat("DisenKGAT", "ifeat_drop")
            self.ihid_drop = conf.getfloat("DisenKGAT", "ihid_drop")
            self.alpha = conf.getfloat("DisenKGAT", "alpha")
            self.max_gamma = conf.getfloat("DisenKGAT", "max_gamma")
            self.init_gamma = conf.getfloat("DisenKGAT", "init_gamma")
            self.restore = conf.getboolean("DisenKGAT", "restore")
            self.bias = conf.getboolean("DisenKGAT", "bias")
            self.no_act = conf.getboolean("DisenKGAT", "no_act")
            self.mi_train = conf.getboolean("DisenKGAT", "mi_train")
            self.no_enc = conf.getboolean("DisenKGAT", "no_enc")
            self.mi_drop = conf.getboolean("DisenKGAT", "mi_drop")
            self.fix_gamma = conf.getboolean("DisenKGAT", "fix_gamma")


        elif self.model_name == "NBF" and self.dataset_name =="NBF_WN18RR":

            self.input_dim = conf.getint("NBF", "input_dim")
            self.hidden_dims = [32, 32, 32, 32, 32, 32]  
            self.message_func = conf.get("NBF", "message_func")
            self.aggregate_func = conf.get("NBF", "aggregate_func")
            self.short_cut = conf.getboolean("NBF","short_cut")
            self.layer_norm = conf.getboolean("NBF","layer_norm")
            self.dependent = conf.getboolean("NBF","dependent")

            self.num_negative = conf.getint("NBF","num_negative")
            self.strict_negative = conf.getboolean("NBF","strict_negative")
            self.adversarial_temperature = conf.getint("NBF", "adversarial_temperature")
            self.metric = ['mr', 'mrr', 'hits@1', 'hits@3', 'hits@10', 'hits@10_50']

            self.lr = conf.getfloat("NBF","lr")
            self.gpus = [0]       
            self.batch_size = conf.getint("NBF","batch_size")
            self.num_epoch = conf.getint("NBF","num_epoch")
            self.log_interval = conf.getint("NBF","log_interval")

###############################################################################################################

        elif self.model_name == "NSHE":
            self.dim_size = {}

            self.dim_size['emd'] = conf.getint("NSHE", "emd_dim")
            self.dim_size['context'] = conf.getint("NSHE", "context_dim")
            self.dim_size['project'] = conf.getint("NSHE", "project_dim")
            self.hidden_dim = conf.getint("NSHE", "hidden_dim")
            self.lr = conf.getfloat("NSHE", "learning_rate")
            self.weight_decay = conf.getfloat("NSHE", "weight_decay")
            self.beta = conf.getfloat("NSHE", "beta")
            self.seed = conf.getint("NSHE", "seed")
            self.dropout = conf.getfloat('NSHE', 'dropout')
            np.random.seed(self.seed)
            self.max_epoch = conf.getint("NSHE", "max_epoch")
            self.patience = conf.getint("NSHE", "patience")
            self.num_e_neg = conf.getint("NSHE", "num_e_neg")
            self.num_ns_neg = conf.getint("NSHE", "num_ns_neg")
            self.num_heads = conf.getint('NSHE', 'num_heads')
            self.norm_emd_flag = conf.get("NSHE", "norm_emd_flag")
            self.mini_batch_flag = conf.getboolean("NSHE", "mini_batch_flag")
            
            
            

        elif self.model_name in ["GTN", "fastGTN"]:
            self.lr = conf.getfloat("GTN", "learning_rate")
            self.weight_decay = conf.getfloat("GTN", "weight_decay")
            self.seed = conf.getint("GTN", "seed")
            # np.random.seed(self.seed)

            self.hidden_dim = conf.getint("GTN", "hidden_dim")
            self.out_dim = conf.getint("GTN", "out_dim")
            self.num_channels = conf.getint("GTN", "num_channels")
            self.num_layers = conf.getint("GTN", "num_layers")
            self.max_epoch = conf.getint("GTN", "max_epoch")
            self.patience = conf.getint("GTN", "patience")

            self.identity = conf.getboolean("GTN", "identity")
            self.norm_emd_flag = conf.getboolean("GTN", "norm_emd_flag")
            self.adaptive_lr_flag = conf.getboolean("GTN", "adaptive_lr_flag")
            self.mini_batch_flag = conf.getboolean("GTN", "mini_batch_flag")

        elif self.model_name == "MHNF":
            self.lr = conf.getfloat("MHNF", "learning_rate")
            self.weight_decay = conf.getfloat("MHNF", "weight_decay")
            self.seed = conf.getint("MHNF", "seed")
            # np.random.seed(self.seed)

            self.hidden_dim = conf.getint("MHNF", "hidden_dim")
            self.out_dim = conf.getint("MHNF", "out_dim")
            self.num_channels = conf.getint("MHNF", "num_channels")
            self.num_layers = conf.getint("MHNF", "num_layers")
            self.max_epoch = conf.getint("MHNF", "max_epoch")
            self.patience = conf.getint("MHNF", "patience")

            self.identity = conf.getboolean("MHNF", "identity")
            self.norm_emd_flag = conf.getboolean("MHNF", "norm_emd_flag")
            self.adaptive_lr_flag = conf.getboolean("MHNF", "adaptive_lr_flag")
            self.mini_batch_flag = conf.getboolean("MHNF", "mini_batch_flag")

        elif self.model_name == "RSHN":
            self.lr = conf.getfloat("RSHN", "learning_rate")
            self.weight_decay = conf.getfloat("RSHN", "weight_decay")
            self.dropout = conf.getfloat("RSHN", "dropout")

            self.seed = conf.getint("RSHN", "seed")
            self.hidden_dim = conf.getint("RSHN", "hidden_dim")
            self.max_epoch = conf.getint("RSHN", "max_epoch")
            self.rw_len = conf.getint("RSHN", "rw_len")
            self.batch_size = conf.getint("RSHN", "batch_size")
            self.num_node_layer = conf.getint("RSHN", "num_node_layer")
            self.num_edge_layer = conf.getint("RSHN", "num_edge_layer")
            self.patience = conf.getint("RSHN", "patience")
            self.validation = conf.getboolean("RSHN", "validation")
            self.mini_batch_flag = conf.getboolean("RSHN", "mini_batch_flag")

        elif self.model_name == "RGCN":
            self.lr = conf.getfloat("RGCN", "learning_rate")
            self.dropout = conf.getfloat("RGCN", "dropout")

            self.in_dim = conf.getint("RGCN", "in_dim")
            self.hidden_dim = conf.getint("RGCN", "hidden_dim")

            self.n_bases = conf.getint("RGCN", "n_bases")
            self.num_layers = conf.getint("RGCN", "num_layers")
            self.max_epoch = conf.getint("RGCN", "max_epoch")
            self.weight_decay = conf.getfloat("RGCN", "weight_decay")
            self.seed = conf.getint("RGCN", "seed")
            self.fanout = conf.getint("RGCN", "fanout")
            self.patience = conf.getint("RGCN", "patience")
            self.batch_size = conf.getint("RGCN", "batch_size")
            self.validation = conf.getboolean("RGCN", "validation")
            self.mini_batch_flag = conf.getboolean("RGCN", "mini_batch_flag")
            self.use_self_loop = conf.getboolean("RGCN", "use_self_loop")
            self.use_uva = conf.getboolean("RGCN", "use_uva")

        elif self.model_name == "CompGCN":
            self.lr = conf.getfloat("CompGCN", "learning_rate")

            self.weight_decay = conf.getfloat("CompGCN", "weight_decay")
            self.dropout = conf.getfloat("CompGCN", "dropout")

            self.in_dim = conf.getint("CompGCN", "in_dim")
            self.hidden_dim = conf.getint("CompGCN", "hidden_dim")
            self.out_dim = conf.getint("CompGCN", "out_dim")
            self.num_layers = conf.getint("CompGCN", "num_layers")
            self.max_epoch = conf.getint("CompGCN", "max_epoch")
            self.seed = conf.getint("CompGCN", "seed")
            self.patience = conf.getint("CompGCN", "patience")

            self.comp_fn = conf.get("CompGCN", "comp_fn")
            self.mini_batch_flag = conf.getboolean("CompGCN", "mini_batch_flag")
            self.validation = conf.getboolean("CompGCN", "validation")
            self.fanout = conf.getint("CompGCN", "fanout")
            self.batch_size = conf.getint("CompGCN", "batch_size")
            pass
        elif self.model_name == "HetGNN":
            self.lr = conf.getfloat("HetGNN", "learning_rate")
            self.weight_decay = conf.getfloat("HetGNN", "weight_decay")

            # self.dropout = conf.getfloat("CompGCN", "dropout")
            self.max_epoch = conf.getint("HetGNN", "max_epoch")
            self.dim = conf.getint("HetGNN", "dim")
            self.batch_size = conf.getint("HetGNN", "batch_size")
            self.window_size = conf.getint("HetGNN", "window_size")
            self.num_workers = conf.getint("HetGNN", "num_workers")
            self.batches_per_epoch = conf.getint("HetGNN", "batches_per_epoch")
            self.seed = conf.getint("HetGNN", "seed")
            self.patience = conf.getint("HetGNN", "patience")
            self.rw_length = conf.getint("HetGNN", "rw_length")
            self.rw_walks = conf.getint("HetGNN", "rw_walks")
            self.rwr_prob = conf.getfloat("HetGNN", "rwr_prob")
            self.mini_batch_flag = conf.getboolean("HetGNN", "mini_batch_flag")
            pass
        elif self.model_name == "Metapath2vec":
            self.lr = conf.getfloat("Metapath2vec", "learning_rate")
            self.max_epoch = conf.getint("Metapath2vec", "max_epoch")
            self.dim = conf.getint("Metapath2vec", "dim")
            self.batch_size = conf.getint("Metapath2vec", "batch_size")
            self.window_size = conf.getint("Metapath2vec", "window_size")
            self.num_workers = conf.getint("Metapath2vec", "num_workers")
            self.neg_size = conf.getint("Metapath2vec", "neg_size")
            self.rw_length = conf.getint("Metapath2vec", "rw_length")
            self.rw_walks = conf.getint("Metapath2vec", "rw_walks")
            self.meta_path_key = conf.get("Metapath2vec", "meta_path_key")

        elif self.model_name == "HERec":
            self.lr = conf.getfloat("HERec", "learning_rate")
            self.max_epoch = conf.getint("HERec", "max_epoch")
            self.dim = conf.getint("HERec", "dim")
            self.batch_size = conf.getint("HERec", "batch_size")
            self.window_size = conf.getint("HERec", "window_size")
            self.num_workers = conf.getint("HERec", "num_workers")
            self.neg_size = conf.getint("HERec", "neg_size")
            self.rw_length = conf.getint("HERec", "rw_length")
            self.rw_walks = conf.getint("HERec", "rw_walks")
            self.meta_path_key = conf.get("HERec", "meta_path_key")

        elif self.model_name == "HAN":
            self.lr = conf.getfloat("HAN", "learning_rate")
            self.weight_decay = conf.getfloat("HAN", "weight_decay")
            self.seed = conf.getint("HAN", "seed")
            self.dropout = conf.getfloat("HAN", "dropout")

            self.hidden_dim = conf.getint("HAN", "hidden_dim")
            self.out_dim = conf.getint("HAN", "out_dim")
            num_heads = conf.get("HAN", "num_heads").split("-")
            self.num_heads = [int(i) for i in num_heads]
            self.patience = conf.getint("HAN", "patience")
            self.max_epoch = conf.getint("HAN", "max_epoch")
            self.mini_batch_flag = conf.getboolean("HAN", "mini_batch_flag")

        elif self.model_name == "RoHe":
            self.lr = conf.getfloat("RoHe", "learning_rate")
            self.weight_decay = conf.getfloat("RoHe", "weight_decay")
            self.seed = conf.getint("RoHe", "seed")
            self.dropout = conf.getfloat("RoHe", "dropout")

            self.hidden_dim = conf.getint("RoHe", "hidden_dim")
            self.out_dim = conf.getint("RoHe", "out_dim")
            num_heads = conf.get("RoHe", "num_heads").split("-")
            self.num_heads = [int(i) for i in num_heads]
            self.patience = conf.getint("RoHe", "patience")
            self.max_epoch = conf.getint("RoHe", "max_epoch")
            self.mini_batch_flag = conf.getboolean("RoHe", "mini_batch_flag")

        elif self.model_name == "NARS":
            self.lr = conf.getfloat("NARS", "learning_rate")
            self.weight_decay = conf.getfloat("NARS", "weight_decay")
            self.seed = conf.getint("NARS", "seed")
            self.dropout = conf.getfloat("NARS", "dropout")
            self.patience = conf.getint("HAN", "patience")
            self.hidden_dim = conf.getint("NARS", "hidden_dim")
            self.out_dim = conf.getint("NARS", "out_dim")
            num_heads = conf.get("NARS", "num_heads").split("-")
            self.num_heads = [int(i) for i in num_heads]
            self.num_hops = conf.getint("NARS", "num_hops")

            self.max_epoch = conf.getint("NARS", "max_epoch")
            self.mini_batch_flag = conf.getboolean("NARS", "mini_batch_flag")
            self.R = conf.getint("NARS", "R")
            self.cpu_preprocess = conf.getboolean("NARS", "cpu_preprocess")
            self.input_dropout = conf.getboolean("NARS", "input_dropout")

            self.ff_layer = conf.getint("NARS", "ff_layer")


        elif self.model_name == 'MAGNN':
          
            self.graph_address = ''
            self.user_name = ''
            self.password = ''
            self.lr = conf.getfloat("MAGNN", "learning_rate")
            self.weight_decay = conf.getfloat("MAGNN", "weight_decay")
            self.seed = conf.getint("MAGNN", "seed")
            self.dropout = conf.getfloat("MAGNN", "dropout")
            self.inter_attn_feats = conf.getint("MAGNN", "inter_attn_feats")
            self.hidden_dim = conf.getint("MAGNN", "hidden_dim")
            self.out_dim = conf.getint("MAGNN", "out_dim")
            self.num_heads = conf.getint("MAGNN", "num_heads")
            self.num_layers = conf.getint("MAGNN", "num_layers")

            self.patience = conf.getint("MAGNN", "patience")
            self.max_epoch = conf.getint("MAGNN", "max_epoch")
            self.encoder_type = conf.get("MAGNN", "encoder_type")
            self.mini_batch_flag = conf.getboolean("MAGNN", "mini_batch_flag")
            if self.mini_batch_flag:
                self.batch_size = conf.getint("MAGNN", "batch_size")
                self.num_samples = conf.getint("MAGNN", "num_samples")

        elif self.model_name == "RHGNN":
            self.lr = conf.getfloat("RHGNN", "learning_rate")
            self.num_heads = conf.getint("RHGNN", "num_heads")
            self.hidden_dim = conf.getint("RHGNN", "hidden_dim")
            self.relation_hidden_units = conf.getint("RHGNN", "relation_hidden_units")
            self.drop_out = conf.getfloat("RHGNN", "drop_out")
            self.num_layers = conf.getint("RHGNN", "num_layers")
            self.residual = conf.getboolean("RHGNN", "residual")
            self.batch_size = conf.getint("RHGNN", "batch_size")
            self.node_neighbors_min_num = conf.getint("RHGNN", "node_neighbors_min_num")
            # self.optimizer = conf.get
            self.weight_decay = conf.getfloat("RHGNN", "weight_decay")
            self.max_epoch = conf.getint("RHGNN", "max_epoch")
            self.patience = conf.getint("RHGNN", "patience")
            self.mini_batch_flag = conf.getboolean("RHGNN", "mini_batch_flag")
            self.negative_slope = conf.getfloat("RHGNN", "negative_slope")
            self.norm = conf.getboolean("RHGNN", "norm")
            self.dropout = conf.getfloat("RHGNN", "dropout")
            self.n_heads = conf.getint("RHGNN", "n_heads")
            self.category = conf.get("RHGNN", "category")
            self.out_dim = conf.getint("RHGNN", "out_dim")
            self.use_uva = conf.getboolean("RHGNN", "use_uva")
            self.fanout = conf.getint("RHGNN", "fanout")

        elif self.model_name == "HGNN_AC":
            self.feats_drop_rate = conf.getfloat("HGNN_AC", "feats_drop_rate")
            self.attn_vec_dim = conf.getint("HGNN_AC", "attn_vec_dim")
            self.feats_opt = conf.get("HGNN_AC", "feats_opt")
            self.loss_lambda = conf.getfloat("HGNN_AC", "loss_lambda")
            self.src_node_type = conf.getint("HGNN_AC", "src_node_type")
            self.HIN = conf.get("HGNN_AC", "HIN")
            if self.HIN == "MAGNN":
                self.lr = conf.getfloat("MAGNN", "learning_rate")
                self.weight_decay = conf.getfloat("MAGNN", "weight_decay")
                self.seed = conf.getint("MAGNN", "seed")
                self.dropout = conf.getfloat("MAGNN", "dropout")

                self.inter_attn_feats = conf.getint("MAGNN", "inter_attn_feats")
                self.hidden_dim = conf.getint("MAGNN", "hidden_dim")
                self.out_dim = conf.getint("MAGNN", "out_dim")
                self.num_heads = conf.getint("MAGNN", "num_heads")
                self.num_layers = conf.getint("MAGNN", "num_layers")

                self.patience = conf.getint("MAGNN", "patience")
                self.max_epoch = conf.getint("MAGNN", "max_epoch")
                self.mini_batch_flag = conf.getboolean("MAGNN", "mini_batch_flag")
                self.encoder_type = conf.get("MAGNN", "encoder_type")
            elif self.HIN == "GTN":
                self.lr = conf.getfloat("GTN", "learning_rate")
                self.weight_decay = conf.getfloat("GTN", "weight_decay")
                self.seed = conf.getint("GTN", "seed")
                # np.random.seed(self.seed)

                self.hidden_dim = conf.getint("GTN", "hidden_dim")
                self.out_dim = conf.getint("GTN", "out_dim")
                self.num_channels = conf.getint("GTN", "num_channels")
                self.num_layers = conf.getint("GTN", "num_layers")
                self.max_epoch = conf.getint("GTN", "max_epoch")
                self.patience = conf.getint("GTN", "patience")

                self.identity = conf.getboolean("GTN", "identity")
                self.norm_emd_flag = conf.getboolean("GTN", "norm_emd_flag")
                self.adaptive_lr_flag = conf.getboolean("GTN", "adaptive_lr_flag")
                self.mini_batch_flag = conf.getboolean("GTN", "mini_batch_flag")
                self.dropout = conf.getfloat("HGNN_AC", "dropout")
                self.num_heads = conf.getint("HGNN_AC", "num_heads")
            elif self.HIN == "MHNF":
                self.lr = conf.getfloat("MHNF", "learning_rate")
                self.weight_decay = conf.getfloat("MHNF", "weight_decay")
                self.seed = conf.getint("MHNF", "seed")
                # np.random.seed(self.seed)

                self.hidden_dim = conf.getint("MHNF", "hidden_dim")
                self.out_dim = conf.getint("MHNF", "out_dim")
                self.num_channels = conf.getint("MHNF", "num_channels")
                self.num_layers = conf.getint("MHNF", "num_layers")
                self.max_epoch = conf.getint("MHNF", "max_epoch")
                self.patience = conf.getint("MHNF", "patience")

                self.identity = conf.getboolean("MHNF", "identity")
                self.norm_emd_flag = conf.getboolean("MHNF", "norm_emd_flag")
                self.adaptive_lr_flag = conf.getboolean("MHNF", "adaptive_lr_flag")
                self.mini_batch_flag = conf.getboolean("MHNF", "mini_batch_flag")
                self.dropout = 0.2
                self.num_heads = 8

        elif self.model_name == "HGT":
            self.lr = conf.getfloat("HGT", "learning_rate")
            self.weight_decay = conf.getfloat("HGT", "weight_decay")
            self.seed = conf.getint("HGT", "seed")
            self.dropout = conf.getfloat("HGT", "dropout")

            self.batch_size = conf.getint("HGT", "batch_size")
            self.hidden_dim = conf.getint("HGT", "hidden_dim")
            self.out_dim = conf.getint("HGT", "out_dim")
            self.num_heads = conf.getint("HGT", "num_heads")
            self.patience = conf.getint("HGT", "patience")
            self.max_epoch = conf.getint("HGT", "max_epoch")
            self.num_workers = conf.getint("HGT", "num_workers")
            self.mini_batch_flag = conf.getboolean("HGT", "mini_batch_flag")
            self.fanout = conf.getint("HGT", "fanout")
            self.norm = conf.getboolean("HGT", "norm")
            self.num_layers = conf.getint("HGT", "num_layers")
            self.num_heads = conf.getint("HGT", "num_heads")
            self.use_uva = conf.getboolean("HGT", "use_uva")
        elif self.model_name == "HeCo":
            self.lr = conf.getfloat("HeCo", "learning_rate")
            self.weight_decay = conf.getfloat("HeCo", "weight_decay")
            self.seed = conf.getint("HeCo", "seed")

            self.hidden_dim = conf.getint("HeCo", "hidden_dim")
            self.patience = conf.getint("HeCo", "patience")
            self.max_epoch = conf.getint("HeCo", "max_epoch")
            self.mini_batch_flag = conf.getboolean("HeCo", "mini_batch_flag")

            self.feat_drop = conf.getfloat("HeCo", "feat_drop")
            self.attn_drop = conf.getfloat("HeCo", "attn_drop")
            self.eva_lr = conf.getfloat("HeCo", "eva_lr")
            self.eva_wd = conf.getfloat("HeCo", "eva_wd")
            sample_rate = conf.get("HeCo", "sample_rate").split("_")
            # self.sample_rate = [int(i) for i in sample_rate]
            self.sample_rate = {}
            for i in sample_rate:
                one = i.split("-")
                self.sample_rate[one[0]] = int(one[1])
            self.tau = conf.getfloat("HeCo", "tau")
            self.lam = conf.getfloat("HeCo", "lam")

        elif self.model_name == "DMGI":
            self.lr = conf.getfloat("DMGI", "learning_rate")
            self.weight_decay = conf.getfloat("DMGI", "weight_decay")
            self.sc = conf.getint("DMGI", "sc")
            self.seed = conf.getint("DMGI", "seed")
            self.sup_coef = conf.getfloat("DMGI", "sup_coef")
            self.reg_coef = conf.getfloat("DMGI", "reg_coef")
            self.dropout = conf.getfloat("DMGI", "dropout")
            self.hidden_dim = conf.getint("DMGI", "hidden_dim")
            self.num_heads = conf.getint("DMGI", "num_heads")
            self.patience = conf.getint("DMGI", "patience")
            self.max_epoch = conf.getint("DMGI", "max_epoch")
            self.isSemi = conf.getboolean("DMGI", "isSemi")
            self.isBias = conf.getboolean("DMGI", "isBias")
            self.isAttn = conf.getboolean("DMGI", "isAttn")

        elif self.model_name == "SLiCE":
            self.data_name = conf.get("SLiCE", "data_name")
            self.num_walks_per_node = conf.getint("SLiCE", "num_walks_per_node")
            self.beam_width = conf.getint("SLiCE", "beam_width")
            self.max_length = conf.getint("SLiCE", "max_length")
            self.walk_type = conf.get("SLiCE", "walk_type")
            self.batch_size = conf.getint("SLiCE", "batch_size")
            self.outdir = conf.get("SLiCE", "outdir")
            self.n_pred = conf.getint("SLiCE", "n_pred")
            self.max_pred = conf.getint("SLiCE", "max_pred")
            self.lr = conf.getfloat("SLiCE", "lr")
            self.n_epochs = conf.getint("SLiCE", "n_epochs")
            self.get_bert_encoder_embeddings = conf.getboolean(
                "SLiCE", "get_bert_encoder_embeddings"
            )
            self.checkpoint = conf.getint("SLiCE", "checkpoint")
            self.path_option = conf.get("SLiCE", "path_option")
            self.ft_batch_size = conf.getint("SLiCE", "ft_batch_size")
            # self.embed_dir=conf.get('SLiCE','embed_dir')
            self.d_model = conf.getint("SLiCE", "d_model")
            self.ft_d_ff = conf.getint("SLiCE", "ft_d_ff")
            self.ft_layer = conf.get("SLiCE", "ft_layer")
            self.ft_drop_rate = conf.getfloat("SLiCE", "ft_drop_rate")
            self.ft_input_option = conf.get("SLiCE", "ft_input_option")
            self.num_layers = conf.getint("SLiCE", "num_layers")
            self.ft_lr = conf.getfloat("SLiCE", "ft_lr")
            self.ft_n_epochs = conf.getint("SLiCE", "ft_n_epochs")
            self.ft_checkpoint = conf.getint("SLiCE", "ft_checkpoint")
            self.pretrained_embeddings = conf.get("SLiCE", "pretrained_embeddings")
        elif self.model_name == "HPN":
            self.lr = conf.getfloat("HPN", "learning_rate")
            self.weight_decay = conf.getfloat("HPN", "weight_decay")
            self.seed = conf.getint("HPN", "seed")
            self.dropout = conf.getfloat("HPN", "dropout")
            self.hidden_dim = conf.getint("HPN", "hidden_dim")
            self.k_layer = conf.getint("HPN", "k_layer")
            self.alpha = conf.getfloat("HPN", "alpha")
            self.edge_drop = conf.getfloat("HPN", "edge_drop")
            self.patience = conf.getint("HPN", "patience")
            self.max_epoch = conf.getint("HPN", "max_epoch")
            self.mini_batch_flag = conf.getboolean("HPN", "mini_batch_flag")
        elif self.model_name == "KGCN":
            self.weight_decay = conf.getfloat("KGCN", "weight_decay")
            self.batch_size = conf.getint("KGCN", "batch_size")
            self.in_dim = conf.getint("KGCN", "in_dim")
            self.out_dim = conf.getint("KGCN", "out_dim")
            self.lr = conf.getfloat("KGCN", "lr")
            self.n_neighbor = conf.getint("KGCN", "n_neighbor")
            self.n_relation = conf.getint("KGCN", "n_relation")
            self.aggregate = conf.get("KGCN", "aggregate")
            self.n_item = conf.getint("KGCN", "n_relation")
            self.n_user = conf.getint("KGCN", "n_user")
            # self.epoch_iter = conf.getint("KGCN", "epoch_iter")
            self.max_epoch = conf.getint("KGCN", "max_epoch")

        elif self.model_name == "general_HGNN":
            self.lr = conf.getfloat("general_HGNN", "lr")
            self.weight_decay = conf.getfloat("general_HGNN", "weight_decay")
            self.dropout = conf.getfloat("general_HGNN", "dropout")

            self.hidden_dim = conf.getint("general_HGNN", "hidden_dim")
            self.num_heads = conf.getint("general_HGNN", "num_heads")
            self.patience = conf.getint("general_HGNN", "patience")
            self.max_epoch = conf.getint("general_HGNN", "max_epoch")
            self.mini_batch_flag = conf.getboolean("general_HGNN", "mini_batch_flag")
            self.layers_gnn = conf.getint("general_HGNN", "layers_gnn")
            self.layers_pre_mp = conf.getint("general_HGNN", "layers_pre_mp")
            self.layers_post_mp = conf.getint("general_HGNN", "layers_post_mp")
            self.stage_type = conf.get("general_HGNN", "stage_type")
            self.gnn_type = conf.get("general_HGNN", "gnn_type")
            self.activation = conf.get("general_HGNN", "activation")
            self.activation = act_dict[self.activation]
            self.subgraph_extraction = conf.get("general_HGNN", "subgraph_extraction")
            self.feat = conf.getint("general_HGNN", "feat")
            self.has_bn = conf.getboolean("general_HGNN", "has_bn")
            self.has_l2norm = conf.getboolean("general_HGNN", "has_l2norm")
            self.macro_func = conf.get("general_HGNN", "macro_func")

        elif self.model_name == "homo_GNN":
            self.lr = conf.getfloat("homo_GNN", "lr")
            self.weight_decay = conf.getfloat("homo_GNN", "weight_decay")
            self.dropout = conf.getfloat("homo_GNN", "dropout")

            self.hidden_dim = conf.getint("homo_GNN", "hidden_dim")
            self.num_heads = conf.getint("homo_GNN", "num_heads")
            self.patience = conf.getint("homo_GNN", "patience")
            self.max_epoch = conf.getint("homo_GNN", "max_epoch")
            self.mini_batch_flag = conf.getboolean("homo_GNN", "mini_batch_flag")
            self.layers_gnn = conf.getint("homo_GNN", "layers_gnn")
            self.layers_pre_mp = conf.getint("homo_GNN", "layers_pre_mp")
            self.layers_post_mp = conf.getint("homo_GNN", "layers_post_mp")
            self.stage_type = conf.get("homo_GNN", "stage_type")
            self.gnn_type = conf.get("homo_GNN", "gnn_type")
            self.activation = conf.get("homo_GNN", "activation")
            self.activation = act_dict[self.activation]
            self.subgraph = conf.get("homo_GNN", "subgraph")
            self.feat = conf.getint("homo_GNN", "feat")
            self.has_bn = conf.getboolean("homo_GNN", "has_bn")
            self.has_l2norm = conf.getboolean("homo_GNN", "has_l2norm")
        elif self.model_name == "HeGAN":
            self.lr_gen = conf.getfloat("HeGAN", "lr_gen")
            self.lr_dis = conf.getfloat("HeGAN", "lr_dis")
            self.sigma = conf.getfloat("HeGAN", "sigma")
            self.n_sample = conf.getint("HeGAN", "n_sample")
            self.max_epoch = conf.getint("HeGAN", "max_epoch")
            self.epoch_dis = conf.getint("HeGAN", "epoch_dis")
            self.epoch_gen = conf.getint("HeGAN", "epoch_gen")
            self.wd_dis = conf.getfloat("HeGAN", "wd_dis")
            self.wd_gen = conf.getfloat("HeGAN", "wd_gen")
            self.mini_batch_flag = conf.getboolean("HeGAN", "mini_batch_flag")
            self.validation = conf.getboolean("HeGAN", "validation")
            self.emb_size = conf.getint("HeGAN", "emb_size")
            self.patience = conf.getint("HeGAN", "patience")
            self.label_smooth = conf.getfloat("HeGAN", "label_smooth")
        elif self.model_name == "HDE":
            self.emb_dim = conf.getint("HDE", "emb_dim")
            self.num_neighbor = conf.getint("HDE", "num_neighbor")
            self.use_bias = conf.getboolean("HDE", "use_bias")
            self.k_hop = conf.getint("HDE", "k_hop")
            self.max_epoch = conf.getint("HDE", "max_epoch")
            self.batch_size = conf.getint("HDE", "batch_size")
            self.max_dist = conf.getint("HDE", "max_dist")
            self.lr = conf.getfloat("HDE", "lr")
        elif self.model_name == "SimpleHGN":
            self.weight_decay = conf.getfloat("SimpleHGN", "weight_decay")
            self.lr = conf.getfloat("SimpleHGN", "lr")
            self.max_epoch = conf.getint("SimpleHGN", "max_epoch")
            self.seed = conf.getint("SimpleHGN", "seed")
            self.patience = conf.getint("SimpleHGN", "patience")
            self.edge_dim = conf.getint("SimpleHGN", "edge_dim")
            self.slope = conf.getfloat("SimpleHGN", "slope")
            self.feats_drop_rate = conf.getfloat("SimpleHGN", "feats_drop_rate")
            self.num_heads = conf.getint("SimpleHGN", "num_heads")
            self.hidden_dim = conf.getint("SimpleHGN", "hidden_dim")
            self.num_layers = conf.getint("SimpleHGN", "num_layers")
            self.beta = conf.getfloat("SimpleHGN", "beta")
            self.residual = conf.getboolean("SimpleHGN", "residual")
            self.mini_batch_flag = conf.getboolean("SimpleHGN", "mini_batch_flag")
            self.fanout = conf.getint("SimpleHGN", "fanout")
            self.batch_size = conf.getint("SimpleHGN", "batch_size")
            self.use_uva = conf.getboolean("SimpleHGN", "use_uva")

        elif self.model_name == "GATNE-T":
            self.learning_rate = conf.getfloat("GATNE-T", "learning_rate")
            self.patience = conf.getint("GATNE-T", "patience")
            self.max_epoch = conf.getint("GATNE-T", "max_epoch")
            self.batch_size = conf.getint("GATNE-T", "batch_size")
            self.num_workers = conf.getint("GATNE-T", "num_workers")
            self.dim = conf.getint("GATNE-T", "dim")
            self.edge_dim = conf.getint("GATNE-T", "edge_dim")
            self.att_dim = conf.getint("GATNE-T", "att_dim")
            self.rw_length = conf.getint("GATNE-T", "rw_length")
            self.rw_walks = conf.getint("GATNE-T", "rw_walks")
            self.window_size = conf.getint("GATNE-T", "window_size")
            self.neg_size = conf.getint("GATNE-T", "neg_size")
            self.neighbor_samples = conf.getint("GATNE-T", "neighbor_samples")
            self.score_fn = conf.get("GATNE-T", "score_fn")

        elif self.model_name == "HetSANN":
            self.lr = conf.getfloat("HetSANN", "lr")
            self.weight_decay = conf.getfloat("HetSANN", "weight_decay")
            self.dropout = conf.getfloat("HetSANN", "dropout")
            self.seed = conf.getint("HetSANN", "seed")
            self.hidden_dim = conf.getint("HetSANN", "hidden_dim")
            self.num_layers = conf.getint("HetSANN", "num_layers")
            self.num_heads = conf.getint("HetSANN", "num_heads")
            self.max_epoch = conf.getint("HetSANN", "max_epoch")
            self.patience = conf.getint("HetSANN", "patience")
            self.slope = conf.getfloat("HetSANN", "slope")
            self.residual = conf.getboolean("HetSANN", "residual")
            self.mini_batch_flag = conf.getboolean("HetSANN", "mini_batch_flag")
            self.batch_size = conf.getint("HetSANN", "batch_size")
            self.fanout = conf.getint("HetSANN", "fanout")
            self.use_uva = conf.getboolean("HetSANN", "use_uva")
        elif self.model_name == "ieHGCN":
            self.weight_decay = conf.getfloat("ieHGCN", "weight_decay")
            self.lr = conf.getfloat("ieHGCN", "lr")
            self.max_epoch = conf.getint("ieHGCN", "max_epoch")
            self.seed = conf.getint("ieHGCN", "seed")
            self.attn_dim = conf.getint("ieHGCN", "attn_dim")
            self.num_layers = conf.getint("ieHGCN", "num_layers")
            self.mini_batch_flag = conf.getboolean("ieHGCN", "mini_batch_flag")
            self.fanout = conf.getint("ieHGCN", "fanout")
            self.batch_size = conf.getint("ieHGCN", "batch_size")
            self.hidden_dim = conf.getint("ieHGCN", "hidden_dim")
            self.out_dim = conf.getint("ieHGCN", "out_dim")
            self.patience = conf.getint("ieHGCN", "patience")
            self.bias = conf.getboolean("ieHGCN", "bias")
            self.batchnorm = conf.getboolean("ieHGCN", "batchnorm")
            self.dropout = conf.getfloat("ieHGCN", "dropout")
        elif self.model_name == "HGAT":
            self.weight_decay = conf.getfloat("HGAT", "weight_decay")
            self.lr = conf.getfloat("HGAT", "lr")
            self.max_epoch = conf.getint("HGAT", "max_epoch")
            self.seed = conf.getint("HGAT", "seed")
            self.attn_dim = conf.getint("HGAT", "attn_dim")
            self.num_layers = conf.getint("HGAT", "num_layers")
            self.mini_batch_flag = False
            self.hidden_dim = conf.getint("HGAT", "hidden_dim")
            self.num_classes = conf.getint("HGAT", "num_classes")
            self.patience = conf.getint("HGAT", "patience")
            self.negative_slope = conf.getfloat("HGAT", "negative_slope")

        elif self.model_name == "HGSL":
            self.undirected_relations = conf.get("HGSL", "undirected_relations")
            self.gnn_dropout = conf.getfloat("HGSL", "gnn_dropout")
            self.fs_eps = conf.getfloat("HGSL", "fs_eps")
            self.fp_eps = conf.getfloat("HGSL", "fp_eps")
            self.mp_eps = conf.getfloat("HGSL", "mp_eps")
            self.hidden_dim = conf.getint("HGSL", "hidden_dim")
            self.num_heads = conf.getint("HGSL", "num_heads")
            self.gnn_emd_dim = conf.getint("HGSL", "gnn_emd_dim")
            self.lr = conf.getfloat("HGSL", "lr")
            self.weight_decay = conf.getfloat("HGSL", "weight_decay")
            self.mini_batch_flag = False
            self.max_epoch = conf.getint("HGSL", "max_epoch")

        elif self.model_name == "TransE":
            self.seed = conf.getint("TransE", "seed")
            self.patience = conf.getint("TransE", "patience")
            self.batch_size = conf.getint("TransE", "batch_size")
            self.neg_size = conf.getint("TransE", "neg_size")
            self.dis_norm = conf.getint("TransE", "dis_norm")
            self.margin = conf.getfloat("TransE", "margin")
            self.hidden_dim = conf.getint("TransE", "hidden_dim")
            self.optimizer = conf.get("TransE", "optimizer")
            self.lr = conf.getfloat("TransE", "lr")
            self.weight_decay = conf.getfloat("TransE", "weight_decay")
            self.max_epoch = conf.getint("TransE", "max_epoch")
            self.score_fn = conf.get("TransE", "score_fn")
            self.filtered = conf.get("TransE", "filtered")
            self.valid_percent = conf.getfloat("TransE", "valid_percent")
            self.test_percent = conf.getfloat("TransE", "test_percent")
            self.mini_batch_flag = True

        elif self.model_name == "TransH":
            self.seed = conf.getint("TransH", "seed")
            self.patience = conf.getint("TransH", "patience")
            self.batch_size = conf.getint("TransH", "batch_size")
            self.neg_size = conf.getint("TransH", "neg_size")
            self.dis_norm = conf.getint("TransH", "dis_norm")
            self.margin = conf.getfloat("TransH", "margin")
            self.hidden_dim = conf.getint("TransH", "hidden_dim")
            self.optimizer = conf.get("TransH", "optimizer")
            self.lr = conf.getfloat("TransH", "lr")
            self.weight_decay = conf.getfloat("TransH", "weight_decay")
            self.max_epoch = conf.getint("TransH", "max_epoch")
            self.score_fn = conf.get("TransH", "score_fn")
            self.filtered = conf.get("TransH", "filtered")
            self.valid_percent = conf.getfloat("TransH", "valid_percent")
            self.test_percent = conf.getfloat("TransH", "test_percent")
            self.mini_batch_flag = True

        elif self.model_name == "TransR":
            self.seed = conf.getint("TransR", "seed")
            self.patience = conf.getint("TransR", "patience")
            self.batch_size = conf.getint("TransR", "batch_size")
            self.neg_size = conf.getint("TransR", "neg_size")
            self.dis_norm = conf.getint("TransR", "dis_norm")
            self.margin = conf.getfloat("TransR", "margin")
            self.ent_dim = conf.getint("TransR", "ent_dim")
            self.rel_dim = conf.getint("TransR", "rel_dim")
            self.optimizer = conf.get("TransR", "optimizer")
            self.lr = conf.getfloat("TransR", "lr")
            self.weight_decay = conf.getfloat("TransR", "weight_decay")
            self.max_epoch = conf.getint("TransR", "max_epoch")
            self.score_fn = conf.get("TransR", "score_fn")
            self.filtered = conf.get("TransR", "filtered")
            self.valid_percent = conf.getfloat("TransR", "valid_percent")
            self.test_percent = conf.getfloat("TransR", "test_percent")
            self.mini_batch_flag = True

        elif self.model_name == "TransD":
            self.seed = conf.getint("TransD", "seed")
            self.patience = conf.getint("TransD", "patience")
            self.batch_size = conf.getint("TransD", "batch_size")
            self.neg_size = conf.getint("TransD", "neg_size")
            self.dis_norm = conf.getint("TransD", "dis_norm")
            self.margin = conf.getfloat("TransD", "margin")
            self.ent_dim = conf.getint("TransD", "ent_dim")
            self.rel_dim = conf.getint("TransD", "rel_dim")
            self.optimizer = conf.get("TransD", "optimizer")
            self.lr = conf.getfloat("TransD", "lr")
            self.weight_decay = conf.getfloat("TransD", "weight_decay")
            self.max_epoch = conf.getint("TransD", "max_epoch")
            self.score_fn = conf.get("TransD", "score_fn")
            self.filtered = conf.get("TransD", "filtered")
            self.valid_percent = conf.getfloat("TransD", "valid_percent")
            self.test_percent = conf.getfloat("TransD", "test_percent")
            self.mini_batch_flag = True

        elif self.model_name == "GIE":
            self.seed = conf.getint("GIE", "seed")
            self.patience = conf.getint("GIE", "patience")
            self.batch_size = conf.getint("GIE", "batch_size")
            self.neg_size = conf.getint("GIE", "neg_size")
            self.dis_norm = conf.getint("GIE", "dis_norm")
            self.margin = conf.getfloat("GIE", "margin")
            self.hidden_dim = conf.getint("GIE", "hidden_dim")
            self.optimizer = conf.get("GIE", "optimizer")
            self.lr = conf.getfloat("GIE", "lr")
            self.weight_decay = conf.getfloat("GIE", "weight_decay")
            self.max_epoch = conf.getint("GIE", "max_epoch")
            self.score_fn = conf.get("GIE", "score_fn")
            self.filtered = conf.get("GIE", "filtered")
            self.valid_percent = conf.getfloat("GIE", "valid_percent")
            self.test_percent = conf.getfloat("GIE", "test_percent")
            self.mini_batch_flag = True

        elif self.model_name == "GIN":
            self.hidden_dim = conf.getint("GIN", "hidden_dim")
            self.batch_size = conf.getint("GIN", "batch_size")
            self.lr = conf.getfloat("GIN", "lr")
            self.num_layers = conf.getint("GIN", "num_layers")
            self.out_dim = conf.getint("GIN", "out_dim")
            self.input_dim = conf.getint("GIN", "input_dim")
            self.weight_decay = conf.getfloat("GIN", "weight_decay")
            self.max_epoch = conf.getint("GIN", "max_epoch")
            self.patience = conf.getint("GIN", "patience")
            self.mini_batch_flag = conf.getboolean("GIN", "mini_batch_flag")
            self.learn_eps = conf.getboolean("GIN", "learn_eps")
            self.aggregate = conf.get("GIN", "aggregate")
            self.fanout = conf.getint("GIN", "fanout")

        elif self.model_name == "RGAT":
            self.weight_decay = conf.getfloat("RGAT", "weight_decay")
            self.lr = conf.getfloat("RGAT", "lr")
            self.max_epoch = conf.getint("RGAT", "max_epoch")
            self.seed = conf.getint("RGAT", "seed")
            self.num_layers = conf.getint("RGAT", "num_layers")
            self.mini_batch_flag = False
            self.hidden_dim = conf.getint("RGAT", "hidden_dim")
            self.in_dim = conf.getint("RGAT", "in_dim")
            self.patience = conf.getint("RGAT", "patience")
            self.num_heads = conf.getint("RGAT", "num_heads")
            self.dropout = conf.getfloat("RGAT", "dropout")
            self.out_dim = conf.getint("RGAT", "out_dim")

        elif self.model_name == "Rsage":
            self.weight_decay = conf.getfloat("Rsage", "weight_decay")
            self.lr = conf.getfloat("Rsage", "lr")
            self.max_epoch = conf.getint("Rsage", "max_epoch")
            self.seed = conf.getint("Rsage", "seed")
            self.num_layers = conf.getint("Rsage", "num_layers")
            self.mini_batch_flag = False
            self.hidden_dim = conf.getint("Rsage", "hidden_dim")
            self.in_dim = conf.getint("Rsage", "in_dim")
            self.patience = conf.getint("Rsage", "patience")
            self.aggregator_type = conf.get("Rsage", "aggregator_type")
            self.dropout = conf.getfloat("Rsage", "dropout")
            self.out_dim = conf.getint("Rsage", "out_dim")

        elif self.model_name == "Mg2vec":
            self.lr = conf.getfloat("MG2VEC", "learning_rate")
            self.max_epoch = conf.getint("MG2VEC", "max_epoch")
            self.emb_dimension = conf.getint("MG2VEC", "dim")
            self.batch_size = conf.getint("MG2VEC", "batch_size")
            self.num_workers = conf.getint("MG2VEC", "num_workers")
            self.sample_num = conf.getint("MG2VEC", "sample_num")
            self.alpha = conf.getfloat("MG2VEC", "alpha")
            self.seed = conf.getint("MG2VEC", "seed")

        elif self.model_name == "DHNE":
            self.lr = conf.getfloat("DHNE", "lr")
            emb_size = conf.getint("DHNE", "embedding_sizes")
            self.embedding_sizes = [emb_size, emb_size, emb_size]
            self.prefix_path = conf.get("DHNE", "prefix_path")
            self.hidden_size = conf.getint("DHNE", "hidden_size")
            self.epochs_to_train = conf.getint("DHNE", "epochs_to_train")
            self.max_epoch = conf.getint("DHNE", "max_epoch")
            self.batch_size = conf.getint("DHNE", "batch_size")
            self.alpha = conf.getfloat("DHNE", "alpha")
            self.num_neg_samples = conf.getint("DHNE", "num_neg_samples")
            self.seed = conf.getint("DHNE", "seed")
            self.dim_features = conf.get("DHNE", "dim_features")
            self.max_epoch = conf.getint("DHNE", "max_epoch")
            self.mini_batch_flag = True

        elif self.model_name == "DiffMG":
            self.lr = conf.getfloat("DiffMG", "lr")
            self.wd = conf.getfloat("DiffMG", "wd")
            self.dropout = conf.getfloat("DiffMG", "dropout")
            self.max_epoch = conf.getint("DiffMG", "max_epoch")
            self.hidden_dim = conf.getint("DiffMG", "hidden_dim")
            self.Amazon_train_seed = conf.getint("DiffMG", "Amazon_train_seed")
            self.Amazon_preprocess_seed = conf.getint(
                "DiffMG", "Amazon_preprocess_seed"
            )
            self.Amazon_gen_neg_seed = conf.getint("DiffMG", "Amazon_gen_neg_seed")
            self.embedding_sizes = conf.getint("DiffMG", "embedding_sizes")
            self.mini_batch_flag = conf.getboolean("DiffMG", "mini_batch_flag")
            self.attn_dim = conf.getint("DiffMG", "attn_dim")
            self.Amazon_search_seed = conf.getint("DiffMG", "Amazon_search_seed")
            self.search_lr = conf.getfloat("DiffMG", "search_lr")
            self.search_wd = conf.getfloat("DiffMG", "search_wd")
            self.search_alr = conf.getfloat("DiffMG", "search_alr")
            self.search_eps = conf.getfloat("DiffMG", "search_eps")
            self.search_decay = conf.getfloat("DiffMG", "search_decay")
            self.search_steps_s = conf.getint("DiffMG", "search_steps_s")
            self.search_steps_t = conf.getint("DiffMG", "search_steps_t")
            self.search_epochs = conf.getint("DiffMG", "search_epochs")
            # self.use_norm = conf.get("DiffMG", "use_norm")
            # self.out_nl = conf.get("DiffMG", "out_nl")

        elif self.model_name == "MeiREC":
            self.lr = conf.getfloat("MeiREC", "lr")
            self.weight_decay = conf.getfloat("MeiREC", "weight_decay")
            self.vocab = conf.getint("MeiREC", "vocab_size")
            self.max_epoch = conf.getint("MeiREC", "train_epochs")
            self.batch_num = conf.getint("MeiREC", "batch_num")

        elif self.model_name == "AEHCL":
            self.lr = conf.getfloat("AEHCL", "lr")
            self.hidden_dim = conf.getint("AEHCL", "hidden_dim")
            self.weight_intra_pair = conf.getfloat("AEHCL", "weight_intra_pair")
            self.weight_intra_multi = conf.getfloat("AEHCL", "weight_intra_multi")
            self.weight_inter = conf.getfloat("AEHCL", "weight_inter")
            self.num_of_attention_heads = conf.getint("AEHCL", "num_of_attention_heads")
            self.t = conf.getfloat("AEHCL", "t")
            self.batch_size = conf.getint("AEHCL", "batch_size")
            self.weight_decay = conf.getfloat("AEHCL", "weight_decay")
            self.eval_epoch = conf.getint("AEHCL", "eval_epoch")
            self.max_epoch = conf.getint("AEHCL", "max_epoch")
            self.neg_num = conf.getint("AEHCL", "neg_num")

        elif self.model_name == "KGAT":
            self.seed = conf.getint("KGAT", "seed")
            self.max_epoch = conf.getint("KGAT", "max_epoch")
            self.stopping_steps = conf.getint("KGAT", "stopping_steps")
            self.use_pretrain = conf.getint("KGAT", "use_pretrain")
            self.lr = conf.getfloat("KGAT", "lr")
            self.aggregation_type = conf.get("KGAT", "aggregation_type")
            self.entity_dim = conf.getint("KGAT", "entity_dim")
            self.relation_dim = conf.getint("KGAT", "relation_dim")
            self.conv_dim_list = conf.get("KGAT", "conv_dim_list")
            self.mess_dropout = conf.get("KGAT", "mess_dropout")
            self.cf_l2loss_lambda = conf.getfloat("KGAT", "cf_l2loss_lambda")
            self.kg_l2loss_lambda = conf.getfloat("KGAT", "kg_l2loss_lambda")
            self.cf_batch_size = conf.getint("KGAT", "cf_batch_size")
            self.kg_batch_size = conf.getint("KGAT", "kg_batch_size")
            self.test_batch_size = conf.getint("KGAT", "test_batch_size")
            self.multi_gpu = conf.getboolean("KGAT", "multi_gpu")
            self.K = conf.getint("KGAT", "K")

        elif self.model_name == "DSSL":
            self.epochs = conf.getint("DSSL", "epochs")
            self.lr = conf.getfloat("DSSL", "lr")
            self.weight_decay = conf.getfloat("DSSL", "weight_decay")
            self.hidden_channels = conf.getint("DSSL", "hidden_channels")
            self.num_layers = conf.getint("DSSL", "num_layers")
            self.dropout = conf.getfloat("DSSL", "dropout")
            self.normalize_features = conf.getboolean("DSSL", "normalize_features")
            self.seed = conf.getint("DSSL", "seed")
            self.display_step = conf.getint("DSSL", "display_step")
            self.train_prop = conf.getfloat("DSSL", "train_prop")
            self.valid_prop = conf.getfloat("DSSL", "valid_prop")
            self.batch_size = conf.getint("DSSL", "batch_size")
            self.rand_split = conf.getboolean("DSSL", "rand_split")
            self.embedding_dim = conf.getint("DSSL", "embedding_dim")
            self.neighbor_max = conf.getint("DSSL", "neighbor_max")
            self.cluster_num = conf.getint("DSSL", "cluster_num")
            self.no_bn = conf.getboolean("DSSL", "no_bn")
            self.alpha = conf.getfloat("DSSL", "alpha")
            self.gamma = conf.getfloat("DSSL", "gamma")
            self.entropy = conf.getfloat("DSSL", "entropy")
            self.tau = conf.getfloat("DSSL", "tau")
            self.encoder = conf.get("DSSL", "encoder")
            self.mlp_bool = conf.getint("DSSL", "mlp_bool")
            self.tao = conf.getfloat("DSSL", "tao")
            self.beta = conf.getfloat("DSSL", "beta")
            self.mlp_inference_bool = conf.getint("DSSL", "mlp_inference_bool")
            self.neg_alpha = conf.getint("DSSL", "neg_alpha")
            self.load_json = conf.getint("DSSL", "load_json")


        elif model == 'SHGP':
            self.dataset = conf.get("SHGP", 'dataset')
            self.target_type = conf.get("SHGP", 'target_type')
            self.train_percent = conf.getfloat("SHGP", 'train_percent')
            self.hidden_dim = re.findall(r'\[(.*?)\]', conf.get("SHGP", 'hidden_dim'))[0]
            self.hidden_dim = [int(s) for s in self.hidden_dim.split(',')]
            self.epochs = conf.getint("SHGP", 'epochs')
            self.lr = conf.getfloat("SHGP", 'lr')
            self.l2_coef = conf.getfloat("SHGP", 'l2_coef')
            self.type_fusion = conf.get("SHGP", 'type_fusion')
            self.type_att_size = conf.getint("SHGP", 'type_att_size')
            self.warm_epochs = conf.getint("SHGP", 'warm_epochs')
            self.compress_ratio = conf.getfloat("SHGP", 'compress_ratio')
            self.cuda = conf.getint("SHGP", 'cuda')

        elif model == 'HGCL':
            self.lr = conf.getfloat("HGCL", "lr")
            self.batch = conf.getint("HGCL", "batch")
            self.wu1 = conf.getfloat("HGCL", "wu1")
            self.wu2 = conf.getfloat("HGCL", "wu2")
            self.wi1 = conf.getfloat("HGCL", "wi1")
            self.wi2 = conf.getfloat("HGCL", "wi2")
            self.epochs = conf.getint("HGCL", "epochs")
            self.topk = conf.getint("HGCL", "topk")
            self.hide_dim = conf.getint("HGCL", "hide_dim")
            self.reg = conf.getfloat("HGCL", "reg")
            self.metareg = conf.getfloat("HGCL", "metareg")
            self.ssl_temp = conf.getfloat("HGCL", "ssl_temp")
            self.ssl_ureg = conf.getfloat("HGCL", "ssl_ureg")
            self.ssl_ireg = conf.getfloat("HGCL", "ssl_ireg")
            self.ssl_reg = conf.getfloat("HGCL", "ssl_reg")
            self.ssl_beta = conf.getfloat("HGCL", "ssl_beta")
            self.rank = conf.getint("HGCL", "rank")
            self.Layers = conf.getint("HGCL", "Layers")


        elif self.model_name == 'lightGCN':
            self.lr = conf.getfloat("lightGCN", "lr")
            self.weight_decay = conf.getfloat("lightGCN", "weight_decay")
            self.max_epoch = conf.getint("lightGCN", "max_epoch")
            self.batch_size = conf.getint("lightGCN", "batch_size")
            self.embedding_size = conf.getint("lightGCN", "embedding_size")
            self.num_layers = conf.getint("lightGCN", "num_layers")
            self.test_u_batch_size = conf.getint("lightGCN", "test_u_batch_size")
            self.topks = conf.getint("lightGCN", "topks")
            # self.alpha = conf.getfloat("lightGCN", "alpha")
        elif self.model_name == 'SeHGNN':
            self.seeds = conf.getint("SeHGNN", 'seeds')
            self.dataset = conf.get("SeHGNN", 'dataset')
            self.gpu = conf.getint("SeHGNN", 'gpu')
            self.cpu = conf.getboolean("SeHGNN", 'cpu')
            self.root = conf.get("SeHGNN", 'root')
            self.stages = conf.get("SeHGNN", 'stages')
            self.emb_path = conf.get("SeHGNN", 'emb_path')
            self.extra_embedding = conf.get("SeHGNN", 'extra_embedding')
            self.embed_size = conf.getint("SeHGNN", 'embed_size')
            self.num_hops = conf.getint("SeHGNN", 'num_hops')
            self.label_feats = conf.getboolean("SeHGNN", 'label_feats')
            self.num_label_hops = conf.getint("SeHGNN", 'num_label_hops')
            self.hidden = conf.getint("SeHGNN", 'hidden')
            self.dropout = conf.getfloat("SeHGNN", 'dropout')
            self.n_layers_1 = conf.getint("SeHGNN", 'n_layers_1')
            self.n_layers_2 = conf.getint("SeHGNN", 'n_layers_2')
            self.n_layers_3 = conf.getint("SeHGNN", 'n_layers_3')
            self.input_drop = conf.getfloat("SeHGNN", 'input_drop')
            self.att_drop = conf.getfloat("SeHGNN", 'att_drop')
            self.label_drop = conf.getfloat("SeHGNN", 'label_drop')
            self.residual = conf.getboolean("SeHGNN", 'residual')
            self.act = conf.get("SeHGNN", 'act')
            self.bns = conf.getboolean("SeHGNN", 'bns')
            self.label_bns = conf.getboolean("SeHGNN", 'label_bns')
            self.amp = conf.getboolean("SeHGNN", 'amp')
            self.lr = conf.getfloat("SeHGNN", 'lr')
            self.weight_decay = conf.getint("SeHGNN", 'weight_decay')
            self.eval_every = conf.getint("SeHGNN", 'eval_every')
            self.batch_size = conf.getint("SeHGNN", 'batch_size')
            self.patience = conf.getint("SeHGNN", 'patience')
            self.threshold = conf.getfloat("SeHGNN", 'threshold')
            self.gama = conf.getint("SeHGNN", 'gama')
            self.start_stage = conf.getint("SeHGNN", 'start_stage')
            self.reload = conf.get("SeHGNN", 'reload')
            self.label_residual = conf.getboolean("SeHGNN", 'label_residual')

        elif self.model_name == 'Ingram':
            self.margin = conf.getint("Ingram", "margin")
            self.lr = conf.getfloat("Ingram", "lr")
            self.nle = conf.getint("Ingram", "nle")
            self.nlr = conf.getint("Ingram", "nlr")
            self.d_e = conf.getint("Ingram", "d_e")
            self.d_r = conf.getint("Ingram", "d_r")
            self.hdr_e = conf.getint("Ingram", "hdr_e")
            self.hdr_r = conf.getint("Ingram", "hdr_r")
            self.num_bin = conf.getint("Ingram", "num_bin")
            self.num_epoch = conf.getint("Ingram", "num_epoch")
            self.validation_epoch = conf.getint("Ingram", "validation_epoch")
            self.num_head = conf.getint("Ingram", "num_head")
            self.num_neg = conf.getint("Ingram", "num_neg")

        elif self.model_name == "AdapropT":
            self.lr = conf.getfloat("AdapropT", "lr")
            self.decay_rate = conf.getfloat("AdapropT", "decay_rate")
            self.lamb = conf.getfloat("AdapropT", "lamb")
            self.hidden_dim = conf.getint("AdapropT", "hidden_dim")
            self.attn_dim = conf.getint("AdapropT", "attn_dim")
            self.dropout = conf.getfloat("AdapropT", "dropout")
            self.n_edge_topk = conf.getint("AdapropT", "n_edge_topk")
            self.n_layer = conf.getint("AdapropT", "n_layer")
            self.n_batch = conf.getint("AdapropT", "n_batch")
            self.n_node_topk = conf.getint("AdapropT", "n_node_topk")
            self.seed = conf.getint("AdapropT", "seed")
            self.topk = conf.getint("AdapropT", "topk")
            self.data_path = conf.get("AdapropT", "data_path")
            self.layers = conf.getint("AdapropT", "layers")
            self.sampling = conf.get("AdapropT", "sampling")
            self.train = conf.getboolean("AdapropT", "train")
            self.scheduler = conf.get("AdapropT", "scheduler")
            self.fact_ratio = conf.getfloat("AdapropT", "fact_ratio")
            self.epoch = conf.getint("AdapropT", "epoch")
            self.eval_interval = conf.getint("AdapropT", "eval_interval")
            self.remove_1hop_edges = conf.getboolean("AdapropT", "remove_1hop_edges")
            self.act = conf.get("AdapropT", "act")
            self.tau = conf.getfloat("AdapropT", "tau")
            self.weight = conf.get("AdapropT", "weight")
            self.n_tbatch = conf.getint("AdapropT", "n_tbatch")
            self.eval = conf.getboolean("AdapropT", 'eval')

        elif self.model_name == "AdapropI":
            self.data_path = conf.get("AdapropI", "data_path")
            self.seed = conf.getint("AdapropI", "seed")

        elif self.model_name == 'LTE':
            self.model_name_GCN=conf.get("LTE", "model_name_GCN")
            self.name=conf.get("LTE", "name")
            self.data=conf.get("LTE", "data")
            self.score_func=conf.get("LTE", "score_func")
            self.opn=conf.get("LTE", "opn")
            self.hid_drop=conf.getfloat("LTE", "hid_drop")
            self.gpu=conf.getint("LTE", "gpu")
            self.x_ops=conf.get("LTE", "x_ops")
            self.n_layer=conf.getint("LTE", "n_layer")
            self.init_dim=conf.getint("LTE", "init_dim")
            self.batch_size=conf.getint("LTE", "batch_size")
            self.epoch=conf.getint("LTE", "epoch")
            self.l2=conf.getfloat("LTE", "l2")
            self.lr=conf.getfloat("LTE", "lr")
            self.lbl_smooth=conf.getfloat("LTE", "lbl_smooth")
            self.num_workers=conf.getint("LTE", "num_workers")
            self.seed=conf.getint("LTE", "seed")
            self.restore=conf.getboolean("LTE", "restore")
            self.bias=conf.getboolean("LTE", "bias")
            self.num_bases=conf.getint("LTE", "num_bases")
            self.gcn_dim=conf.getint("LTE", "gcn_dim")
            self.gcn_drop=conf.getfloat("LTE", "gcn_drop")
            self.conve_hid_drop=conf.getfloat("LTE", "conve_hid_drop")
            self.feat_drop=conf.getfloat("LTE", "feat_drop")
            self.input_drop=conf.getfloat("LTE", "input_drop")
            self.k_w=conf.getint("LTE", "k_w")
            self.k_h=conf.getint("LTE", "k_h")
            self.num_filt=conf.getint("LTE", "num_filt")
            self.ker_sz=conf.getint("LTE", "ker_sz")
            self.gamma=conf.getfloat("LTE", "gamma")
            self.rat=conf.getboolean("LTE", "rat")
            self.wni=conf.getboolean("LTE", "wni")
            self.wsi=conf.getboolean("LTE", "wsi")
            self.ss=conf.getboolean("LTE", "ss")
            self.nobn=conf.getboolean("LTE", "nobn")
            self.noltr=conf.getboolean("LTE", "noltr")
            self.encoder=conf.get("LTE", "encoder")
            self.max_epochs=conf.getint("LTE", "max_epochs")
            
        elif self.model_name == 'SACN':
            self.seed=conf.getint("SACN","seed")
            self.init_emb_size=conf.getint("SACN","init_emb_size")
            self.gc1_emb_size=conf.getint("SACN","gc1_emb_size")
            self.embedding_dim=conf.getint("SACN","embedding_dim")
            self.input_dropout=conf.getint("SACN","input_dropout")
            self.dropout_rate=conf.getfloat("SACN","dropout_rate")
            self.channels=conf.getint("SACN","channels")
            self.kernel_size=conf.getint("SACN","kernel_size")
            self.gpu=conf.getint("SACN","gpu")
            self.lr=conf.getfloat("SACN","lr")
            self.n_epochs=conf.getint("SACN","n_epochs")
            self.num_workers=conf.getint("SACN","num_workers")
            self.eval_every=conf.getint("SACN","eval_every")
            self.dataset_data=conf.get("SACN","dataset_data")
            self.batch_size=conf.getint("SACN","batch_size")
            self.patience=conf.getint("SACN","patience")
            self.decoder=conf.get("SACN","decoder")
            self.gamma=conf.getfloat("SACN","gamma")
            self.name=conf.get("SACN","name")
            self.n_layer=conf.getint("SACN","n_layer")
            self.rat=conf.getboolean("SACN","rat")
            self.wsi=conf.getboolean("SACN","wsi")
            self.wni=conf.getboolean("SACN","wni")
            self.ss=conf.getint("SACN","ss")
            self.final_act=conf.getboolean("SACN","final_act")
            self.final_bn=conf.getboolean("SACN","final_bn")
            self.final_drop=conf.getboolean("SACN","final_drop")

        elif self.model_name == 'Ingram':
            self.margin = conf.getint("Ingram", "margin")
            self.lr = conf.getfloat("Ingram", "lr")
            self.nle = conf.getint("Ingram", "nle")
            self.nlr = conf.getint("Ingram", "nlr")
            self.d_e = conf.getint("Ingram", "d_e")
            self.d_r = conf.getint("Ingram", "d_r")
            self.hdr_e = conf.getint("Ingram", "hdr_e")
            self.hdr_r = conf.getint("Ingram", "hdr_r")
            self.num_bin = conf.getint("Ingram", "num_bin")
            self.num_epoch = conf.getint("Ingram", "num_epoch")
            self.validation_epoch = conf.getint("Ingram", "validation_epoch")
            self.num_head = conf.getint("Ingram", "num_head")
            self.num_neg = conf.getint("Ingram", "num_neg")
        elif self.model_name == 'RedGNN':
            self.seed = conf.getint("RedGNN", "seed")
            self.patience = conf.getint("RedGNN", "patience")
            self.batch_size = conf.getint("RedGNN", "batch_size")
            self.optimizer = conf.get("RedGNN", "optimizer")
            self.lr = conf.getfloat("RedGNN", "lr")
            self.weight_decay = conf.getfloat("RedGNN", "weight_decay")
            self.max_epoch = conf.getint("RedGNN", "max_epoch")
            self.decay_rate = conf.getfloat("RedGNN", "decay_rate")
            self.hidden_dim = conf.getint("RedGNN", "hidden_dim")
            self.attn_dim = conf.getint("RedGNN", "attn_dim")
            self.dropout = conf.getfloat("RedGNN", "dropout")
            self.act = conf.get("RedGNN", "act")
            self.n_layer = conf.getint("RedGNN", "n_layer")

        elif self.model_name == 'RedGNNT':
            self.seed = conf.getint("RedGNNT", "seed")
            self.patience = conf.getint("RedGNNT", "patience")
            self.batch_size = conf.getint("RedGNNT", "batch_size")
            self.n_tbatch = conf.getint("RedGNNT", "n_tbatch")
            self.optimizer = conf.get("RedGNNT", "optimizer")
            self.lr = conf.getfloat("RedGNNT", "lr")
            self.weight_decay = conf.getfloat("RedGNNT", "weight_decay")
            self.max_epoch = conf.getint("RedGNNT", "max_epoch")
            self.decay_rate = conf.getfloat("RedGNNT", "decay_rate")
            self.hidden_dim = conf.getint("RedGNNT", "hidden_dim")
            self.attn_dim = conf.getint("RedGNNT", "attn_dim")
            self.dropout = conf.getfloat("RedGNNT", "dropout")
            self.act = conf.get("RedGNNT", "act")
            self.n_layer = conf.getint("RedGNNT", "n_layer")

        elif self.model_name == 'ExpressGNN':
            self.embedding_size = conf.getint('ExpressGNN', 'embedding_size')
            self.gcn_free_size = conf.getint("ExpressGNN", "gcn_free_size")
            self.filtered = conf.get("ExpressGNN", "filtered")
            self.hidden_dim = conf.getint("ExpressGNN", "hidden_dim")
            self.rule_weights_learning = conf.getint("ExpressGNN", "rule_weights_learning")
            self.load_method = conf.getint("ExpressGNN", "load_method")
            self.num_epochs = conf.getint("ExpressGNN", "num_epochs")

            self.slice_dim = conf.getint("ExpressGNN", "slice_dim")
            self.no_train = conf.getint("ExpressGNN", "no_train")
            self.hidden_dim = conf.getint("ExpressGNN", "hidden_dim")
            self.num_epochs = conf.getint("ExpressGNN", "num_epochs")
            self.batchsize = conf.getint("ExpressGNN", "batchsize")
            self.trans = conf.getint("ExpressGNN", "trans")
            self.num_hops = conf.getint("ExpressGNN", "num_hops")
            self.num_mlp_layers = conf.getint("ExpressGNN", "num_mlp_layers")
            self.num_epochs = conf.getint("ExpressGNN", "num_epochs")

            self.num_batches = conf.getint("ExpressGNN", "num_batches")
            self.learning_rate = conf.getfloat("ExpressGNN", "learning_rate")
            self.lr_decay_factor = conf.getfloat("ExpressGNN", "lr_decay_factor")
            self.lr_decay_patience = conf.getint("ExpressGNN", "lr_decay_patience")
            self.lr_decay_min = conf.getfloat("ExpressGNN", "lr_decay_min")
            self.patience = conf.getint("ExpressGNN", "patience")
            self.l2_coef = conf.getfloat("ExpressGNN", "l2_coef")
            self.observed_prob = conf.getfloat("ExpressGNN", "observed_prob")
            self.entropy_temp = conf.getint("ExpressGNN", "entropy_temp")
            self.no_entropy = conf.getint("ExpressGNN", "no_entropy")
            self.learning_rate_rule_weights = conf.getfloat("ExpressGNN", "learning_rate_rule_weights")
            self.epoch_mode = conf.getint("ExpressGNN", "epoch_mode")
            self.shuffle_sampling = conf.getint("ExpressGNN", "shuffle_sampling")

            self.load_method = conf.getint("ExpressGNN", "load_method")
            self.load_s = conf.getint("ExpressGNN", "load_s")
            self.use_gcn = conf.getint("ExpressGNN", "use_gcn")
            self.filter_latent = conf.getint("ExpressGNN", "filter_latent")
            self.closed_world = conf.getint("ExpressGNN", "closed_world")
            self.seed = conf.getint("ExpressGNN", "seed")

        elif self.model_name =='Grail':
            self.num_epochs = conf.getint("Grail", "num_epochs")
            self.eval_every = conf.getint("Grail","eval_every")
            self.eval_every_iter = conf.getint("Grail","eval_every_iter")
            self.save_every = conf.getint("Grail","save_every")
            self.early_stop = conf.getint("Grail","early_stop")
            self.optimizer = conf.get("Grail","optimizer")
            self.lr = conf.getfloat("Grail","lr")
            self.clip = conf.getint("Grail","clip")
            self.l2 = conf.getfloat("Grail","l2")
            self.margin = conf.getint("Grail","margin")
            self.max_links = conf.getint("Grail","max_links")
            self.hop = conf.getint("Grail","hop")
            self.max_nodes_per_hop= conf.getint("Grail","max_nodes_per_hop")
            self.use_kge_embeddings = conf.getboolean("Grail","use_kge_embeddings")
            self.kge_model = conf.get("Grail","kge_model")
            self.model_type =conf.get("Grail","model_type")
            self.constrained_neg_prob= conf.getfloat("Grail","constrained_neg_prob")
            self.batch_size  = conf.getint("Grail","batch_size")
            self.num_neg_samples_per_link = conf.getint("Grail","num_neg_samples_per_link")
            self.num_workers = conf.getint("Grail","num_workers")
            self.add_traspose_rels = conf.getboolean("Grail","add_traspose_rels")
            self.enclosing_sub_graph = conf.getboolean("Grail","enclosing_sub_graph")
            self.rel_emb_dim = conf.getint("Grail","rel_emb_dim")
            self.attn_rel_emb_dim = conf.getint("Grail","attn_rel_emb_dim")
            self.emb_dim = conf.getint("Grail","emb_dim")
            self.num_gcn_layers = conf.getint("Grail","num_gcn_layers")
            self.num_bases = conf.getint("Grail","num_bases")
            self.dropout = conf.getfloat("Grail","dropout")
            self.edge_dropout = conf.getfloat("Grail", "edge_dropout")
            self.gnn_agg_type = conf.get("Grail","gnn_agg_type")
            self.add_ht_emb = conf.getboolean("Grail","add_ht_emb")
            self.has_attn = conf.getboolean("Grail", "has_attn")
            self.mode = conf.get("Grail","mode")

        elif self.model_name =='ComPILE':
            self.num_epochs = conf.getint("ComPILE", "num_epochs")
            self.eval_every = conf.getint("ComPILE","eval_every")
            self.eval_every_iter = conf.getint("ComPILE","eval_every_iter")
            self.save_every = conf.getint("ComPILE","save_every")
            self.early_stop = conf.getint("ComPILE","early_stop")
            self.optimizer = conf.get("ComPILE","optimizer")
            self.lr = conf.getfloat("ComPILE","lr")
            self.clip = conf.getint("ComPILE","clip")
            self.l2 = conf.getfloat("ComPILE","l2")
            self.margin = conf.getint("ComPILE","margin")
            self.max_links = conf.getint("ComPILE","max_links")
            self.hop = conf.getint("ComPILE","hop")
            self.max_nodes_per_hop= conf.getint("ComPILE","max_nodes_per_hop")
            self.use_kge_embeddings = conf.getboolean("ComPILE","use_kge_embeddings")
            self.kge_model = conf.get("ComPILE","kge_model")
            self.model_type =conf.get("ComPILE","model_type")
            self.constrained_neg_prob= conf.getfloat("ComPILE","constrained_neg_prob")
            self.batch_size  = conf.getint("ComPILE","batch_size")
            self.num_neg_samples_per_link = conf.getint("Grail","num_neg_samples_per_link")
            self.num_workers = conf.getint("ComPILE","num_workers")
            self.add_traspose_rels = conf.getboolean("ComPILE","add_traspose_rels")
            self.enclosing_sub_graph = conf.getboolean("ComPILE","enclosing_sub_graph")
            self.rel_emb_dim = conf.getint("ComPILE","rel_emb_dim")
            self.attn_rel_emb_dim = conf.getint("ComPILE","attn_rel_emb_dim")
            self.emb_dim = conf.getint("ComPILE","emb_dim")
            self.num_gcn_layers = conf.getint("ComPILE","num_gcn_layers")
            self.num_bases = conf.getint("ComPILE","num_bases")
            self.dropout = conf.getfloat("ComPILE","dropout")
            self.edge_dropout = conf.getfloat("ComPILE", "edge_dropout")
            self.gnn_agg_type = conf.get("ComPILE","gnn_agg_type")
            self.add_ht_emb = conf.getboolean("ComPILE","add_ht_emb")
            self.has_attn = conf.getboolean("ComPILE", "has_attn")
            self.mode = conf.get("ComPILE","mode")

        elif self.model_name == 'SIAN':
            self.user_num = conf.getint("SIAN", "user_num")
            self.item_num = conf.getint("SIAN","item_num")
            self.profile_size = conf.getint("SIAN","profile_size")
            self.batch_size = conf.getint("SIAN", "batch_size")
            self.emb_size = conf.getint("SIAN", "emb_size")
            self.lr = conf.getfloat("SIAN", "lr")
            self.weight_decay = conf.getfloat("SIAN", "weight_decay")
            self.test_batch_size = conf.getint("SIAN", "test_batch_size")
            self.epochs = conf.getint("SIAN", "epochs")
            self.eval_num = conf.getint("SIAN", "eval_num")
            self.top_k = conf.getint("SIAN", "top_k")
            self.worker_num = conf.getint("SIAN", "worker_num")
            self.seed = conf.getint("SIAN", "seed")


        elif self.model_name == "HMPNN":
            self.lr = conf.getfloat("HMPNN", "lr")
            self.num_layers = conf.getint("HMPNN", "num_layers")
            self.hid_dim = conf.getint("HMPNN", "hid_dim")
            self.max_epoch = conf.getint("HMPNN", "max_epoch")
            self.batch_size= conf.getint("HMPNN", "batch_size")



        if hasattr(self, "device"):
            self.device = th.device(self.device)
        elif gpu == -1:
            self.device = th.device("cpu")
        elif gpu >= 0:

            if not th.cuda.is_available( ):
                self.device = th.device('cpu')
                warnings.warn("cuda is unavailable, the program will use cpu instead. please set 'gpu' to -1.")
            else:
                self.device = th.device("cuda", int(gpu))

        if getattr(self, "use_uva", None):  # use_uva is set True
            self.use_uva = False
            warnings.warn(
                "'use_uva' is only available when using cuda. please set 'use_uva' to False."
            )

    def __repr__(self):
        return "[Config Info]\tModel: {},\tTask: {},\tDataset: {}".format(
            self.model_name, self.task, self.dataset
        )
