Spaces:
Sleeping
Sleeping
| import os | |
| os.system('pip install ipython') | |
| from IPython.display import clear_output | |
| os.system('pip install python-dotenv pydub ffmpeg-python nltk gradio==3.48.0 OpenAI gradio_client emoji') | |
| from utils.tts import * | |
| from utils.llm import * | |
| clear_output() | |
| import gradio as gr | |
| # Define the main function for the API endpoint that takes the input text and chatbot role | |
| def generate_story(secret_token, input_text, chatbot_role): | |
| if secret_token != SECRET_TOKEN: | |
| raise gr.Error( | |
| f'Invalid secret token. Secret Token: secret') | |
| # Initialize a list of lists for history with the user input as the first entry | |
| history = [[input_text, None]] | |
| story_sentences = get_sentence(history, chatbot_role) # get_sentence function generates text | |
| story_text = "" # Initialize variable to hold the full story text | |
| last_history = None # To store the last history after all sentences | |
| # Iterate over the sentences generated by get_sentence and concatenate them | |
| for sentence, updated_history in story_sentences: | |
| if sentence: | |
| story_text += sentence.strip() + " " # Add each sentence to the story_text | |
| last_history = updated_history # Keep track of the last history update | |
| if last_history is not None: | |
| # Convert the list of lists back into a list of tuples for the history | |
| history_tuples = [tuple(entry) for entry in last_history] | |
| #return history_tuples, chatbot_role, story_text | |
| return generate_speech_from_history2(history_tuples, chatbot_role, story_text) | |
| else: | |
| return [] | |
| # Create a Gradio Interface using only the `generate_story_and_speech()` function and the 'json' output type | |
| demo = gr.Interface( | |
| fn=generate_story, | |
| inputs=[gr.Text(label='Secret Token'),gr.Textbox(placeholder="Enter your text here"), gr.Dropdown(choices=ROLES, label="Select Chatbot Role")], | |
| outputs="json" | |
| ) | |
| demo.queue() | |
| demo.launch(debug=True) |