import re
import os
import argparse
import json
# from env import AttrDict

import torch
import torch.nn.functional as F
import numpy as np
# from scipy.io.wavfile import write
from tqdm import tqdm
from ais_bench.infer.interface import InferSession

# from meldataset import mel_spectrogram, MAX_WAV_VALUE, load_wav


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--input_wavs_dir', type=str, default='LJSpeech-1.1/wavs')
    parser.add_argument('--output_wavs_dir', type=str, default='output/gen_wavs')
    parser.add_argument('--om', type=str, default='OM/dynamic_dims_linux_x86_64.om')
    parser.add_argument('--config_file', type=str, default='config_v1.json')
    parser.add_argument('--batch-size', type=int, default=1, help='om batch size')
    parser.add_argument('--device_id', type=int, default=0)
    args = parser.parse_args()

    # load config
    # with open(args.config_file) as f:
    #     data = f.read()
    # json_config = json.loads(data)
    # config = AttrDict(json_config)

    # load model
    generator_om = InferSession(args.device_id, args.om)
    print("load model success")

    # when dymshape, unnesessary
    #generator_om.set_custom_outsize([1200 * 32 * 32])
    print(generator_om.get_outputs())

    # load dataset
    # dataloader = BatchDataLoader(args.input_wavs_dir, config, args.batch_size)

    # infer om
    # inference(generator_om, dataloader, config)
    input_datas = np.random.randint(1, 32, (32, 32, 80))/1000.0
    input_datas = input_datas.astype(np.float32)

    
    wavs = generator_om.infer([input_datas], mode='dymshape', custom_sizes=1228800)
    # print(wavs)
    print(wavs[0].shape)
