Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException
|
2 |
+
from pydantic import BaseModel
|
3 |
+
import torch
|
4 |
+
import torchaudio
|
5 |
+
import ChatTTS
|
6 |
+
|
7 |
+
# Configure Torch
|
8 |
+
torch._dynamo.config.cache_size_limit = 64
|
9 |
+
torch._dynamo.config.suppress_errors = True
|
10 |
+
torch.set_float32_matmul_precision('high')
|
11 |
+
|
12 |
+
# Initialize FastAPI app and ChatTTS model
|
13 |
+
app = FastAPI()
|
14 |
+
chat = ChatTTS.Chat()
|
15 |
+
chat.load_models(compile=False)
|
16 |
+
|
17 |
+
# Define the request model
|
18 |
+
class TextRequest(BaseModel):
|
19 |
+
text: str
|
20 |
+
|
21 |
+
@app.post("/synthesize/")
|
22 |
+
async def synthesize_speech(request: TextRequest):
|
23 |
+
try:
|
24 |
+
# Perform inference
|
25 |
+
wavs = chat.infer([request.text])
|
26 |
+
|
27 |
+
# Save the generated audio
|
28 |
+
output_file = "output.wav"
|
29 |
+
torchaudio.save(output_file, torch.from_numpy(wavs[0]), 24000)
|
30 |
+
|
31 |
+
return {"message": "Speech synthesized successfully", "audio_file": output_file}
|
32 |
+
except Exception as e:
|
33 |
+
raise HTTPException(status_code=500, detail=str(e))
|
34 |
+
|
35 |
+
# Root endpoint
|
36 |
+
@app.get("/")
|
37 |
+
def read_root():
|
38 |
+
return {"message": "Welcome to the ChatTTS API. Use the /synthesize/ endpoint to generate speech."}
|