import gradio as gr from transformers import pipeline classifier = pipeline('sentiment-analysis') ################################################################# #1: Image to text from OCR_Image_to_Text import get_OCR_demo ################################################################# #2: Text to Speech title = "Text to Speech Translation" tts_examples = [ "I love learning machine learning", "How do you do?", ] tts_demo = gr.Interface.load( "huggingface/facebook/fastspeech2-en-ljspeech", title = title, examples=tts_examples, description="Give me something to say!", ) ################################################################# # #2_1: Text to Speech_1 title = "Text FILE to Speech Translation" import gradio as gr from gtts import gTTS from io import BytesIO def text_to_speech(file): # Create a gTTS (Google Text-to-Speech) object with open(file.name, 'r') as f: text = f.read() tts = gTTS(text) # Save the speech as an audio file in memory audio_stream = BytesIO() tts.save(audio_stream) audio_stream.seek(0) return audio_stream file.close() inputs = gr.inputs.File(label="Upload a Text File") outputs = gr.outputs.Audio(label="Audio") interface_demo = gr.Interface.load("huggingface/facebook/fastspeech2-en-ljspeech",fn=text_to_speech,title = title, inputs=inputs, outputs=outputs,description="Give me something to say!",capture_session=True) ################################################################# #3: Sentiment Analysis def get_sentiment(input_text): return classifier(input_text) sentiment_demo= gr.Interface(fn=get_sentiment, inputs = 'text',outputs=['text'],title = 'Sentiment Analysis', description = "Enter a sentence and know about it's sentiment",examples = [ ["We are very happy to show you the 🤗 Transformers library."], ["I am happy with the performance of Indian Hockey team"], ["Pizza was not at all hot and restaurant was having the pathetic service"] ]) #sentiment_demo.launch(inline = False) ################################################################# #4 POS Tagging import nltk nltk.download('punkt') nltk.download('averaged_perceptron_tagger') def get_POS(input_text): sent_tag = nltk.word_tokenize(input_text) for token in sent_tag: return nltk.pos_tag([token]) title = 'Know about Parts of Speech (POS) Tags' examples = [ ["We are very happy to show you the 🤗 Transformers library."], ["I am happy with the performance of Indian Hockey team"], ["Pizza was not at all hot and restaurant was having the pathetic service"], ["Scientist Dr. Evangeline Starlight of Technopolish announced a breakthrough in quantum computing at Nova University. Mayor Orion Pulsor commended her. The discovery will be shared at the Galactic Quantum Computing Symposium in Cosmos"] ] POS_demo = gr.Interface(fn=get_POS, inputs = 'text',outputs=['text'],title = title,description = 'Get POS tags',examples = examples) #POS_demo.launch(debug=True) ################################################################# #5 Language Translation from language_translation import translation_demo ################################################################# #6: Gradio ASR from SpeechTranscription_ASR import ST_ASR_demo ################################################################# #7 YouTube to Text Script generation from Text_Script_from_YouTube import textScript_demo ######################################################################################################## demo = gr.TabbedInterface([get_OCR_demo, tts_demo, interface_demo, sentiment_demo, POS_demo, translation_demo, ST_ASR_demo, textScript_demo], ["Image to Text","Text to Speech","Text file to Speech", "Sentiment Analysis","POS findings", "Language Translation","ASR", "YouTube video to Text Script"]) if __name__ == "__main__": demo.launch()