from ais_bench.infer.interface import InferSession
import numpy as np

if __name__ == "__main__":
  #self.acoustic = AmOnnxRuntime(input_am_onnx_model, PROVIDERS)
#  tf.enable_control_flow_v2()
#  tf.enable_resource_variables()
  om = InferSession(0, 'OM/fs2_model_linux_x86_64.om', debug=True)


  input_phone_ids = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype= np.int32)
  input_tone_ids  = np.array([1, 2, 3, 4, 1, 1, 1, 1, 1, 1], dtype= np.int32)
  input_prosody_ids = np.array([1, 1, 1, 1, 2, 2, 2, 2, 3, 3], dtype= np.int32)

  input_phone_ids   = np.expand_dims(input_phone_ids, 0)
  input_tone_ids    = np.expand_dims(input_tone_ids, 0)
  input_prosody_ids = np.expand_dims(input_prosody_ids, 0)

  speed_ratios=np.array([1.0], dtype=np.float32)
  f0_ratios =np.array([1.0], dtype=np.float32)
  energy_ratios =np.array([1.0], dtype=np.float32)

  output = om.infer([input_prosody_ids, input_phone_ids,speed_ratios, input_tone_ids,f0_ratios,energy_ratios], "dymshape", [2147583648,2147583648,1000,2147583648,1000,1000])
  print('---------------')
  print(output)
  print(output[0].shape)

