Spaces:
Paused
Paused
import os | |
import time | |
from pydantic import BaseModel | |
from fastapi import FastAPI, HTTPException, Query | |
from fastapi.responses import FileResponse | |
from fastapi.middleware.cors import CORSMiddleware | |
from langchain.chains import LLMChain | |
from langchain.prompts import PromptTemplate | |
from TextGen.suno import custom_generate_audio, get_audio_information | |
from langchain_google_genai import ( | |
ChatGoogleGenerativeAI, | |
HarmBlockThreshold, | |
HarmCategory, | |
) | |
from TextGen import app | |
from gradio_client import Client | |
song_base_api=os.environ["VERCEL_API"] | |
my_hf_token=os.environ["HF_TOKEN"] | |
tts_client = Client("https://jofthomas-xtts.hf.space/",hf_token=my_hf_token) | |
class Generate(BaseModel): | |
text:str | |
def generate_text(prompt: str): | |
if prompt == "": | |
return {"detail": "Please provide a prompt."} | |
else: | |
prompt = PromptTemplate(template=prompt, input_variables=['Prompt']) | |
# Initialize the LLM | |
llm = ChatGoogleGenerativeAI( | |
model="gemini-pro", | |
safety_settings={ | |
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE, | |
}, | |
) | |
llmchain = LLMChain( | |
prompt=prompt, | |
llm=llm | |
) | |
llm_response = llmchain.run({"Prompt": prompt}) | |
return Generate(text=llm_response) | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=["*"], | |
allow_credentials=True, | |
allow_methods=["*"], | |
allow_headers=["*"], | |
) | |
def api_home(): | |
return {'detail': 'Welcome to FastAPI TextGen Tutorial!'} | |
def inference(input_prompt: str): | |
return generate_text(prompt=input_prompt) | |
async def generate_wav(text: str, language: str = "en"): | |
try: | |
# Use the Gradio client to generate the wav file | |
result = tts_client.predict( | |
text, # str in 'Text Prompt' Textbox component | |
language, # str in 'Language' Dropdown component | |
"./blacksmith.mp3", # str (filepath on your computer (or URL) of file) in 'Reference Audio' Audio component | |
"./blacksmith.mp3", # str (filepath on your computer (or URL) of file) in 'Use Microphone for Reference' Audio component | |
False, # bool in 'Use Microphone' Checkbox component | |
False, # bool in 'Cleanup Reference Voice' Checkbox component | |
False, # bool in 'Do not use language auto-detect' Checkbox component | |
True, # bool in 'Agree' Checkbox component | |
fn_index=1 | |
) | |
# Get the path of the generated wav file | |
wav_file_path = result[1] | |
# Return the generated wav file as a response | |
return FileResponse(wav_file_path, media_type="audio/wav", filename="output.wav") | |
except Exception as e: | |
raise HTTPException(status_code=500, detail=str(e)) | |
async def generate_song(text: str): | |
try: | |
data = custom_generate_audio({ | |
"prompt": f"{text}", | |
"make_instrumental": False, | |
"wait_audio": False | |
}) | |
ids = f"{data[0]['id']},{data[1]['id']}" | |
print(f"ids: {ids}") | |
for _ in range(60): | |
data = get_audio_information(ids) | |
if data[0]["status"] == 'streaming': | |
print(f"{data[0]['id']} ==> {data[0]['audio_url']}") | |
print(f"{data[1]['id']} ==> {data[1]['audio_url']}") | |
break | |
# sleep 5s | |
time.sleep(5) | |
except: | |
print("Error") |