mskov commited on
Commit
6615174
1 Parent(s): 87e9ad0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -83,6 +83,8 @@ def classify_toxicity(audio_file, text_input, classify_anxiety):
83
  return toxicity_score, classification_output, emo_dict[text_lab[0]], transcribed_text
84
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
85
  else:
 
 
86
  # model = model_cache[model_name]
87
  # class_names = classify_anxiety.split(",")
88
  class_names_list = class_options.get(classify_anxiety, [])
@@ -98,7 +100,7 @@ def classify_toxicity(audio_file, text_input, classify_anxiety):
98
  model = "whisper-large"
99
 
100
  internal_lm_average_logprobs = classify.calculate_internal_lm_average_logprobs(
101
- model=Whisper,
102
  class_names=class_names,
103
  # class_names=classify_anxiety,
104
  tokenizer=tokenizer,
 
83
  return toxicity_score, classification_output, emo_dict[text_lab[0]], transcribed_text
84
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
85
  else:
86
+ model = whisper.load_model("large")
87
+ model_cache[model_name] = model
88
  # model = model_cache[model_name]
89
  # class_names = classify_anxiety.split(",")
90
  class_names_list = class_options.get(classify_anxiety, [])
 
100
  model = "whisper-large"
101
 
102
  internal_lm_average_logprobs = classify.calculate_internal_lm_average_logprobs(
103
+ model=model,
104
  class_names=class_names,
105
  # class_names=classify_anxiety,
106
  tokenizer=tokenizer,