Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -96,8 +96,14 @@ from scipy.io.wavfile import write
|
|
96 |
from io import BytesIO
|
97 |
|
98 |
# Load the tokenizer and model
|
99 |
-
tokenizer = SeamlessM4TTokenizer.from_pretrained("facebook/seamless-m4t-v2-large")
|
100 |
-
model = SeamlessM4Tv2Model.from_pretrained("facebook/seamless-m4t-v2-large")
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
# Set the device (CUDA if available, else CPU)
|
103 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
96 |
from io import BytesIO
|
97 |
|
98 |
# Load the tokenizer and model
|
99 |
+
# tokenizer = SeamlessM4TTokenizer.from_pretrained("facebook/seamless-m4t-v2-large")
|
100 |
+
# model = SeamlessM4Tv2Model.from_pretrained("facebook/seamless-m4t-v2-large")
|
101 |
+
|
102 |
+
# Load model directly
|
103 |
+
from transformers import AutoProcessor, AutoModelForTextToSpectrogram
|
104 |
+
|
105 |
+
processor = AutoProcessor.from_pretrained("Beehzod/speecht5_finetuned_uz_customData")
|
106 |
+
model = AutoModelForTextToSpectrogram.from_pretrained("Beehzod/speecht5_finetuned_uz_customData")
|
107 |
|
108 |
# Set the device (CUDA if available, else CPU)
|
109 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|