import torch


class BaseConfig:
    def __init__(self,
                 vocab_size_dic,
                 id_embed_dim=64,
                 simple_embed_dim=8,
                 seq_len=200,
                 heads=4,
                 k=20,
                 sim_mode = 'soft',
                 short_time = 20,
                 out_channel = 30,
                 num_blocks=2,
                 att_hidden_units=[80, 40],
                 mlp_hidden_units=[200, 80],
                 device_id = 0):
        self.vocab_size_dic = vocab_size_dic
        self.id_embed_dim = id_embed_dim
        self.simple_embed_dim = simple_embed_dim
        self.seq_len = seq_len
        self.heads = heads
        self.k = k
        self.sim_mode = sim_mode
        self.num_blocks = num_blocks
        self.att_hidden_units = att_hidden_units
        self.mlp_hidden_units = mlp_hidden_units
        self.device = torch.device(f"cuda:{device_id}" if torch.cuda.is_available() else "cpu")
        self.device_id = device_id
        self.short_time = short_time
        self.out_channel = out_channel

    def print_info(self, model_name:str, dataset_name:str):
        print(f"model:{model_name}")
        print(f"dataset:{dataset_name}")
        print(f"id_embed_dim:{self.id_embed_dim}")
        print(f"simple_embed_dim:{self.simple_embed_dim}")
        print(f"seq_len:{self.seq_len}")
        print(f"heads:{self.heads}")
        print(f"k:{self.k}")
        print(f"device_id:{self.device_id}")
        if model_name.lower() == "sim":
            print(f"sim_mode:{self.sim_mode}")
        if model_name.lower() == "TWIN":
            print(f"short_time:{self.short_time}")
            print(f"out_channel:{self.out_channel}")



class KuaiFormerConfig:
    def __init__(self,
                 vocab_size_dic,
                 heads,
                 split_range,
                 total_behavior_num,
                 q_num=4,
                 middle_group_num=16,
                 early_group_num=64,
                 latest_group_num=64,
                 id_embedding_dim=64,
                 simple_embedding_dim=8,
                 layers=4,
                 device_id = 0,
                 ):
        self.vocab_size_dic = vocab_size_dic
        self.heads = heads
        self.q_num = q_num
        self.split_range = split_range
        self.total_behavior_num = total_behavior_num
        self.middle_group_num = middle_group_num
        self.early_group_num = early_group_num
        self.latest_group_num = latest_group_num
        self.id_embedding_dim = id_embedding_dim
        self.simple_embedding_dim = simple_embedding_dim
        self.layers = layers
        self.device = torch.device(f"cuda:{device_id}" if torch.cuda.is_available() else "cpu")
        self.device_id = device_id

    def print_info(self, model_name:str, dataset_name:str):
        print(f"model:{model_name}")
        print(f"dataset:{dataset_name}")
        print(f"id_embed_dim:{self.id_embedding_dim}")
        print(f"simple_embed_dim:{self.simple_embedding_dim}")
        print(f"seq_len:{self.total_behavior_num}")
        print(f"heads:{self.heads}")
        print(f"layers:{self.layers}")
        print(f"interest_num:{self.q_num}")
        print(f"device_id:{self.device_id}")

class FEAConfig():
    def __init__(self, vocab_size_dic, id_embed_dim=64, simple_embed_dim=8, seq_len=200, batch_size=512,
                 device_id=0):
        self.vocab_size_dic = vocab_size_dic
        self.id_embed_dim = id_embed_dim
        self.simple_embed_dim = simple_embed_dim
        self.n_layers= 2  # (int) The number of transformer layers in transformer encoder.
        self.n_heads= 2  # (int) The number of attention heads for multi-head attention layer.
        self.hidden_size= 64  # (int) The number of features in the hidden state
        self.inner_size= 256  # (int) The inner hidden size in feed-forward layer.
        self.hidden_dropout_prob= 0.5  # (float) The probability of an element to be zeroed.
        self.attn_dropout_prob= 0.5  # (float) The probability of an attention score to be zeroed.
        self.hidden_act= 'gelu'  # (str) The activation function in feed-forward layer.
        self.layer_norm_eps= 1e-12  # (float) A value added to the denominator for numerical stability.
        self.initializer_range= 0.02  # (float) The standard deviation for normal initialization.
        self.loss_type= 'CE'  # (str) The type of loss function.
        self.lmd= 0.1  # (float) The weight of unsupervised normalized CE loss.
        self.lmd_sem= 0.1  # (float) The weight of supervised normalized CE loss.
        self.seq_len = seq_len
        self.batch_size = batch_size
        self.global_ratio= 1  # (float) The ratio of frequency components
        self.dual_domain= False  # (bool) Frequency domain processing or not
        self.std= False  # (bool) Use the specific time index or not
        self.spatial_ratio= 0  # (float) The ratio of the spatial domain and frequency domain
        self.fredom= False  # (bool)  Regularization in the frequency domain or not
        self.fredom_type= None  # (str)  The type of loss in different scenarios
        self.topk_factor= 1  # (int)  To aggregate time delayed sequences with high autocorrelation
        self.use_filter=False
        self.device = torch.device(f"cuda:{device_id}" if torch.cuda.is_available() else "cpu")
        self.device_id = device_id

    def print_info(self, model_name:str, dataset_name:str):
        print(f"model:{model_name}")
        print(f"dataset:{dataset_name}")
        print(f"id_embed_dim:{self.id_embed_dim}")
        print(f"simple_embed_dim:{self.simple_embed_dim}")
        print(f"device_id:{self.device_id}")
