Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
-
from transformers import pipeline
|
2 |
-
|
3 |
-
|
4 |
|
5 |
# Initialize the translation pipeline for Russian to English
|
6 |
translator = pipeline("translation_ru_to_en", model="Helsinki-NLP/opus-mt-ru-en")
|
@@ -20,14 +20,9 @@ summary = summarizer(translation, max_length=140, min_length=110, do_sample=Fals
|
|
20 |
|
21 |
print("Summary: ", summary)
|
22 |
|
23 |
-
|
24 |
-
model = TFAutoModelForCausalLM.from_pretrained("facebook/fastspeech2-en-ljspeech")
|
25 |
-
|
26 |
-
inputs = tokenizer(summary, return_tensors="tf")
|
27 |
-
|
28 |
-
# Generate speech
|
29 |
-
with torch.no_grad():
|
30 |
-
logits = model.generate(**inputs)
|
31 |
|
32 |
-
#
|
33 |
-
|
|
|
|
|
|
1 |
+
from transformers import pipeline
|
2 |
+
|
3 |
+
tts = pipeline("text-to-speech", model="facebook/fastspeech2-en-ljspeech")
|
4 |
|
5 |
# Initialize the translation pipeline for Russian to English
|
6 |
translator = pipeline("translation_ru_to_en", model="Helsinki-NLP/opus-mt-ru-en")
|
|
|
20 |
|
21 |
print("Summary: ", summary)
|
22 |
|
23 |
+
speech = tts(summary)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
+
# The output is a list of PyTorch tensors containing the audio data
|
26 |
+
# Let's save the first (and only) audio sample to a file
|
27 |
+
with open("output1.wav", "wb") as f:
|
28 |
+
f.write(speech[0]["file"].read())
|