Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -11,19 +11,27 @@ assert torch.cuda.is_available(), "Demo requires a GPU."
|
|
| 11 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
| 12 |
print(DEVICE)
|
| 13 |
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
-
SAMPLE_RATE = 32000
|
| 23 |
|
|
|
|
| 24 |
|
| 25 |
@spaces.GPU
|
| 26 |
def tts_stream(text, temperature, top_p, repetition_penalty, state):
|
|
|
|
|
|
|
| 27 |
if not text.strip():
|
| 28 |
yield None, state
|
| 29 |
return
|
|
|
|
| 11 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
| 12 |
print(DEVICE)
|
| 13 |
|
| 14 |
+
model = None
|
| 15 |
+
|
| 16 |
+
def load_model():
|
| 17 |
+
global model
|
| 18 |
+
if model is None:
|
| 19 |
+
# Load model once
|
| 20 |
+
model = SopranoTTS(
|
| 21 |
+
backend="auto",
|
| 22 |
+
device=DEVICE,
|
| 23 |
+
cache_size_mb=100,
|
| 24 |
+
decoder_batch_size=1,
|
| 25 |
+
)
|
| 26 |
+
return model
|
| 27 |
|
|
|
|
| 28 |
|
| 29 |
+
SAMPLE_RATE = 32000
|
| 30 |
|
| 31 |
@spaces.GPU
|
| 32 |
def tts_stream(text, temperature, top_p, repetition_penalty, state):
|
| 33 |
+
model = load_model()
|
| 34 |
+
|
| 35 |
if not text.strip():
|
| 36 |
yield None, state
|
| 37 |
return
|