psakamoori commited on
Commit
50b4d3f
1 Parent(s): 606f39a

Add backend and target device check

Browse files
Files changed (1) hide show
  1. app.py +32 -8
app.py CHANGED
@@ -1,6 +1,7 @@
1
  from speechbrain.inference.interfaces import foreign_class
2
  from custom_interface import CustomEncoderWav2vec2Classifier
3
  from speechbrain.pretrained import EncoderClassifier
 
4
 
5
  # Function in SpeechBrain to load and use custom PyTorch models
6
  classifier = foreign_class(
@@ -18,22 +19,45 @@ checkpoint = EncoderClassifier.from_hparams(
18
  # Convert hparams to a dictionary
19
  hparams_dict = vars(checkpoint.hparams)
20
 
21
- # OpenVINO inference optimization parameters
22
- device = "cpu"
23
- ov_opts = {"device_name": device, "PERFORMANCE_HINT": "LATENCY"}
24
 
25
- instance = CustomEncoderWav2vec2Classifier(modules=checkpoint.mods,
 
 
 
 
 
 
26
  hparams=hparams_dict, model=classifier.mods["wav2vec2"].model,
27
  audio_file_path="speechbrain/emotion-recognition-wav2vec2-IEMOCAP/anger.wav",
28
  backend="openvino",
29
- ov_opts=ov_opts,
 
30
  save_ov_model=False)
 
 
 
31
 
 
 
 
 
 
 
32
 
33
  # OpenVINO inference
34
  print("=" * 30)
35
- print(f"[INFO] Inference Device: {ov_opts['device_name']}")
36
- print("=" * 30)
37
- print("\n[INFO] Performing OpenVINO inference...")
 
 
 
 
 
 
 
38
  out_prob, score, index, text_lab = instance.classify_file("speechbrain/emotion-recognition-wav2vec2-IEMOCAP/anger.wav")
39
  print(f"[RESULT] OpenVINO Inference Output: {text_lab[index-1]}")
 
1
  from speechbrain.inference.interfaces import foreign_class
2
  from custom_interface import CustomEncoderWav2vec2Classifier
3
  from speechbrain.pretrained import EncoderClassifier
4
+ import openvino.properties.hint as hints
5
 
6
  # Function in SpeechBrain to load and use custom PyTorch models
7
  classifier = foreign_class(
 
19
  # Convert hparams to a dictionary
20
  hparams_dict = vars(checkpoint.hparams)
21
 
22
+ # inference backend
23
+ backend = "openvino"
24
+ torch_device = "cpu"
25
 
26
+ if backend == "openvino" and torch_device == "cpu":
27
+
28
+ # OpenVINO inference optimization parameters
29
+ #config = {hints.performance_mode: hints.PerformanceMode.THROUGHPUT, hints.num_requests: "1"}
30
+ config = {hints.performance_mode: hints.PerformanceMode.THROUGHPUT}
31
+ ov_opts = {"ov_device": "CPU", "config": config}
32
+ instance = CustomEncoderWav2vec2Classifier(modules=checkpoint.mods,
33
  hparams=hparams_dict, model=classifier.mods["wav2vec2"].model,
34
  audio_file_path="speechbrain/emotion-recognition-wav2vec2-IEMOCAP/anger.wav",
35
  backend="openvino",
36
+ opts=ov_opts,
37
+ torch_device=torch_device,
38
  save_ov_model=False)
39
+ elif backend == "openvino" and torch_device == "cuda":
40
+ raise ValueError("OpenVINO backend does not support CUDA devices. \
41
+ Please use cpu for torch_device.")
42
 
43
+ if backend == "pytorch":
44
+ torch_opts = {"torch_device": torch_device}
45
+ instance = CustomEncoderWav2vec2Classifier(modules=checkpoint.mods,
46
+ hparams=hparams_dict, model=classifier.mods["wav2vec2"].model,
47
+ audio_file_path="speechbrain/emotion-recognition-wav2vec2-IEMOCAP/anger.wav",
48
+ backend="pytorch", opts=torch_opts, torch_device=torch_device)
49
 
50
  # OpenVINO inference
51
  print("=" * 30)
52
+ if backend == "openvino":
53
+ print(f"[INFO] Inference Device: {ov_opts['ov_device']}")
54
+ print("=" * 30)
55
+ print("\n[INFO] Performing OpenVINO inference...")
56
+ else:
57
+ print(f"[INFO] Inference Device: {torch_opts['torch_device']}")
58
+ print("=" * 30)
59
+ print("\n[INFO] Performing PyTorch inference...")
60
+
61
+
62
  out_prob, score, index, text_lab = instance.classify_file("speechbrain/emotion-recognition-wav2vec2-IEMOCAP/anger.wav")
63
  print(f"[RESULT] OpenVINO Inference Output: {text_lab[index-1]}")