import numpy as np

# config for nnmnkii
DATA_ROOT = "./data/slt_arctic_demo_data"
# DATA_ROOT = "./data/slt_arctic_full_data"
test_size = 0.112  # This means 1000 utterances for training data
random_state = 1234

mgc_dim = 180
lf0_dim = 3
vuv_dim = 1
bap_dim = 3

duration_linguistic_dim = 416
acoustic_linguisic_dim = 425
a_in = 512
duration_dim = 5
acoustic_dim = mgc_dim + lf0_dim + vuv_dim + bap_dim

fs = 16000
frame_period = 5
hop_length = 80
fftlen = 1024
alpha = 0.41

mgc_start_idx = 0
lf0_start_idx = 180
vuv_start_idx = 183
bap_start_idx = 184

windows = [
    (0, 0, np.array([1.0])),
    (1, 1, np.array([-0.5, 0.0, 0.5])),
    (1, 1, np.array([1.0, -2.0, 1.0])),
]

num_hidden_layers = 3
hidden_size = 256
batch_size = 2
# # We use PyTorch's multiprocess iterator. Note that large n_workers causes
# # dataset copies across proccess.
# n_workers = 4
# pin_memory = True
# nepoch = 25
lr = 0.001
weight_decay = 1e-6

## added config


is_shuffle_train = True
train_set_repeat_count = None
# batch_size = 32
log_dir = './logs'
d_train_steps = 2000
a_train_steps = 8

log_steps = 10
save_steps = 900
gpu = '2'

keep_prob = 0.8

u_net_layers = 4

# hidden_channels = 256
# last_channels = 512
