ysharma HF staff commited on
Commit
6e17fca
1 Parent(s): ea180c8
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -9,15 +9,16 @@ from neon_tts_plugin_coqui import CoquiTTS
9
  # Whisper: Speech-to-text
10
  model = whisper.load_model("base")
11
 
12
- # The LLM : Bloom
13
  API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
14
  HF_TOKEN = os.environ["HF_TOKEN"]
15
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
16
- #en, fr, esp, arb, hn, portu, Indonesian, Vietnamese, Chinese, tamil, telugu, bengali
17
 
18
  # Text-to-Speech
19
  LANGUAGES = list(CoquiTTS.langs.keys())
20
  print(f"Languages for Coqui are: {LANGUAGES}")
 
21
  coquiTTS = CoquiTTS()
22
 
23
  # Whisper - speeech-to-text
@@ -46,12 +47,12 @@ def fun_engine(audio) :
46
  text1 = whisper_stt(audio)
47
  #text1 = model.transcribe(audio)["text"]
48
  text2 = lang_model_response(text1)
49
- speech = tts(text, 'en')
50
  return text1, text2, speech
51
 
52
  # LLM - Bloom Response
53
  def lang_model_response(prompt):
54
- print(f"*****Inside meme_generate - Prompt is :{prompt}")
55
  if len(prompt) == 0:
56
  prompt = """Can you help me please?"""
57
 
 
9
  # Whisper: Speech-to-text
10
  model = whisper.load_model("base")
11
 
12
+ # LLM : Bloom as inference
13
  API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
14
  HF_TOKEN = os.environ["HF_TOKEN"]
15
  headers = {"Authorization": f"Bearer {HF_TOKEN}"}
16
+ #Language covered in Bloom : en, fr, esp, arb, hn, portu, Indonesian, Vietnamese, Chinese, tamil, telugu, bengali
17
 
18
  # Text-to-Speech
19
  LANGUAGES = list(CoquiTTS.langs.keys())
20
  print(f"Languages for Coqui are: {LANGUAGES}")
21
+ #Languages for Coqui are: ['en', 'es', 'fr', 'de', 'pl', 'uk', 'ro', 'hu', 'el', 'bg', 'nl', 'fi', 'sl', 'lv', 'ga']
22
  coquiTTS = CoquiTTS()
23
 
24
  # Whisper - speeech-to-text
 
47
  text1 = whisper_stt(audio)
48
  #text1 = model.transcribe(audio)["text"]
49
  text2 = lang_model_response(text1)
50
+ speech = tts(text2, 'en')
51
  return text1, text2, speech
52
 
53
  # LLM - Bloom Response
54
  def lang_model_response(prompt):
55
+ print(f"*****Inside lang_model_response - Prompt is :{prompt}")
56
  if len(prompt) == 0:
57
  prompt = """Can you help me please?"""
58