# params for lfads
def get_lfads_params():
    training_params = {}
    # num_epochs: 训练批次
    training_params['num_epochs'] = 50
    # batch_size: 默认为1，无需改动
    training_params['batch_size'] = 1
    # input_size: 通道数或者神经元数，根据数据改变
    training_params['input_size'] = 67
    # hidden_size: 模型隐藏层大小
    training_params['hidden_size'] = 256
    # output_size: 模型输出大小
    training_params['output_size'] = 67
    # factor_size: 模型潜在因子大小
    training_params['factor_size'] = 128
    # controller_size: 模型控制因子大小
    training_params['controller_size'] = 128
    # learning_rate: 默认为0.001
    training_params['learning_rate'] = 0.001

    return training_params


# params for cyclegan
def get_cyclegan_params():
    training_params = {}
    # loss_type: 损失函数类型('MSE','L1')
    training_params['loss_type'] = 'MSE'
    # optim_type: 优化器类型('SGD','Adam','RMSProp')
    training_params['optim_type'] = 'Adam'
    # num_epochs: 训练批次
    training_params['epochs'] = 400
    # batch_size: 可改动
    training_params['batch_size'] = 256
    # D_lr: Cycle-gan辨别器的学习率，默认为0.01
    training_params['D_lr'] = 0.001 * 10
    # G_lr: Cycle-gan生成器的学习率，默认为0.001
    training_params['G_lr'] = 0.001
    # ID_loss_p: Identity损失权重
    training_params['ID_loss_p'] = 5
    # cycle_loss_p: cycle损失权重
    training_params['cycle_loss_p'] = 5
    # drop_out_D: 辨别器模型的drop_out大小
    training_params['drop_out_D'] = 0.2
    # drop_out_G: 生成器模型的drop_out大小
    training_params['drop_out_G'] = 0.2

    return training_params

# params for wiener
def get_wiener_params():
    training_params = {}
    # n_lags: 延迟数，1个lag延迟50ms
    training_params['n_lags'] = 4
    # save_model: 是否保存模型，可选择
    training_params['save_model'] = False

    return training_params

# params for kalman
def get_kalman_params():
    training_params = {}
    # C: 默认为1，改变转移矩阵的协方差矩阵的权重
    training_params['C'] = 1
    # n_lags: 延迟数，1个lag延迟50ms
    training_params['n_lags'] = 4
    # save_model: 是否保存模型，可选择
    training_params['save_model'] = False
    # dt: 是解码器的采样间隔
    training_params['dt'] = 0.01
    # h: 为所需的半衰期，可修改
    training_params['h'] = 1000

    return training_params

# params for lstm
def get_lstm_params():
    training_params = {}
    # num_epochs: 训练批次
    training_params['num_epochs'] = 50
    # batch_size: 默认为1，无需改动
    training_params['batch_size'] = 1
    # input_size: 通道数或者神经元数，根据数据改变
    training_params['input_size'] = 67
    # hidden_size: 模型隐藏层大小
    training_params['hidden_size'] = 256
    # num_layers: LSTM层的层数
    training_params['num_layers'] = 2
    # output_size: 模型输出大小，根据标签特征维度改变
    training_params['output_size'] = 2
    # learning_rate: 默认为0.001
    training_params['learning_rate'] = 0.001

    return training_params


if __name__ == '__main__':
    a = get_cyclegan_params()
    b = get_lstm_params()
    print(b)
