Baghdad99 commited on
Commit
d75e5af
1 Parent(s): d0c4294

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -21
app.py CHANGED
@@ -1,10 +1,11 @@
1
  import torch
2
  import gradio as gr
3
- from transformers import pipeline, AutoTokenizer, T5ForConditionalGeneration, T5Tokenizer
 
4
  import numpy as np
5
  from pydub import AudioSegment
6
 
7
- # Load the pipeline for speech recognition and translation
8
  pipe = pipeline(
9
  "automatic-speech-recognition",
10
  model="DrishtiSharma/whisper-large-v2-hausa",
@@ -12,9 +13,9 @@ pipe = pipeline(
12
  )
13
 
14
  # Load the new translation model and tokenizer
15
- model_name = 'jbochi/madlad400-3b-mt'
16
- model = T5ForConditionalGeneration.from_pretrained(model_name)
17
- tokenizer = T5Tokenizer.from_pretrained(model_name)
18
 
19
  tts = pipeline("text-to-speech", model="Baghdad99/english_voice_tts")
20
 
@@ -44,22 +45,11 @@ def translate_speech(audio_file):
44
  print("The output does not contain 'text'")
45
  return
46
 
47
- # Use the translation pipeline to translate the transcription
48
- translated_text = translator(transcription, return_tensors="pt")
49
- print(f"Translated text: {translated_text}") # Print the translated text to see what it contains
50
-
51
- # Check if the translated text contains 'generated_token_ids'
52
- if 'generated_token_ids' in translated_text[0]:
53
- # Decode the tokens into text
54
- translated_text_str = translator.tokenizer.decode(translated_text[0]['generated_token_ids'])
55
- else:
56
- print("The translated text does not contain 'generated_token_ids'")
57
- return
58
-
59
- # Use the new translation model to translate the transcription
60
  text = "translate Hausa to English: " + transcription
61
- input_ids = tokenizer.encode(text, return_tensors="pt")
62
- outputs = model.generate(input_ids=input_ids)
 
63
 
64
  # Decode the tokens into text
65
  translated_text_str = tokenizer.decode(outputs[0], skip_special_tokens=True)
@@ -93,4 +83,3 @@ iface = gr.Interface(
93
  )
94
 
95
  iface.launch()
96
-
 
1
  import torch
2
  import gradio as gr
3
+ from transformers import pipeline, AutoTokenizer, M2M100ForConditionalGeneration
4
+ from tokenization_small100 import SMALL100Tokenizer
5
  import numpy as np
6
  from pydub import AudioSegment
7
 
8
+ # Load the pipeline for speech recognition
9
  pipe = pipeline(
10
  "automatic-speech-recognition",
11
  model="DrishtiSharma/whisper-large-v2-hausa",
 
13
  )
14
 
15
  # Load the new translation model and tokenizer
16
+ model_name = 'alirezamsh/small100'
17
+ model = M2M100ForConditionalGeneration.from_pretrained(model_name)
18
+ tokenizer = SMALL100Tokenizer.from_pretrained(model_name)
19
 
20
  tts = pipeline("text-to-speech", model="Baghdad99/english_voice_tts")
21
 
 
45
  print("The output does not contain 'text'")
46
  return
47
 
48
+ # Use the new translation model to translate the transcription
 
 
 
 
 
 
 
 
 
 
 
 
49
  text = "translate Hausa to English: " + transcription
50
+ tokenizer.tgt_lang = "en"
51
+ encoded_text = tokenizer(text, return_tensors="pt")
52
+ outputs = model.generate(**encoded_text)
53
 
54
  # Decode the tokens into text
55
  translated_text_str = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
83
  )
84
 
85
  iface.launch()