# Imports import gradio as gr import whisper from pytube import YouTube from transformers import pipeline, T5Tokenizer, T5ForConditionalGeneration, AutoTokenizer, AutoModelForSeq2SeqLM import torch from wordcloud import WordCloud import re import os class GradioInference: def __init__(self): # OpenAI's Whisper model sizes self.sizes = list(whisper._MODELS.keys()) # Whisper's available languages for ASR self.langs = ["none"] + sorted(list(whisper.tokenizer.LANGUAGES.values())) # Default size self.current_size = "base" # Default model size self.loaded_model = whisper.load_model(self.current_size) # Initialize Pytube Object self.yt = None # Initialize summary model for English self.summarizer = pipeline("summarization", model="facebook/bart-large-cnn") # Initialize VoiceLabT5 model and tokenizer self.keyword_model = T5ForConditionalGeneration.from_pretrained( "Voicelab/vlt5-base-keywords" ) self.keyword_tokenizer = T5Tokenizer.from_pretrained( "Voicelab/vlt5-base-keywords" ) # Sentiment Classifier self.classifier = pipeline("text-classification", model="lxyuan/distilbert-base-multilingual-cased-sentiments-student", return_all_scores=False) # Initialize Multilingual summary model self.tokenizer = AutoTokenizer.from_pretrained("csebuetnlp/mT5_multilingual_XLSum") self.model = AutoModelForSeq2SeqLM.from_pretrained("csebuetnlp/mT5_multilingual_XLSum") # self.llm_tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b-instruct") # self.pipeline = pipeline( # "text-generation", #task # model="tiiuae/falcon-7b-instruct", # tokenizer=self.llm_tokenizer, # trust_remote_code=True, # do_sample=True, # top_k=10, # num_return_sequences=1, # eos_token_id=self.tokenizer.eos_token_id # ) def __call__(self, link, lang, size, progress=gr.Progress()): """ Call the Gradio Inference python class. This class gets access to a YouTube video using python's library Pytube and downloads its audio. Then it uses the Whisper model to perform Automatic Speech Recognition (i.e Speech-to-Text). Once the function has the transcription of the video it proccess it to obtain: - Summary: using Facebook's BART transformer. - KeyWords: using VoiceLabT5 keyword extractor. - Sentiment Analysis: using Hugging Face's default sentiment classifier - WordCloud: using the wordcloud python library. """ progress(0, desc="Starting analysis") if self.yt is None: self.yt = YouTube(link) # Pytube library to access to YouTube audio stream path = self.yt.streams.filter(only_audio=True)[0].download(filename="tmp.mp4") if lang == "none": lang = None if size != self.current_size: self.loaded_model = whisper.load_model(size) self.current_size = size progress(0.20, desc="Transcribing") # Transcribe the audio extracted from pytube results = self.loaded_model.transcribe(path, language=lang) progress(0.40, desc="Summarizing") # Perform summarization on the transcription transcription_summary = self.summarizer( results["text"], max_length=150, min_length=30, do_sample=False ) #### Prueba WHITESPACE_HANDLER = lambda k: re.sub('\s+', ' ', re.sub('\n+', ' ', k.strip())) input_ids_sum = self.tokenizer( [WHITESPACE_HANDLER(results["text"])], return_tensors="pt", padding="max_length", truncation=True, max_length=512 )["input_ids"] output_ids_sum = self.model.generate( input_ids=input_ids_sum, max_length=130, no_repeat_ngram_size=2, num_beams=4 )[0] summary = self.tokenizer.decode( output_ids_sum, skip_special_tokens=True, clean_up_tokenization_spaces=False ) #### Fin prueba progress(0.50, desc="Extracting Keywords") # Extract keywords using VoiceLabT5 task_prefix = "Keywords: " input_sequence = task_prefix + results["text"] input_ids = self.keyword_tokenizer( input_sequence, return_tensors="pt", truncation=False ).input_ids output = self.keyword_model.generate( input_ids, no_repeat_ngram_size=3, num_beams=4 ) predicted = self.keyword_tokenizer.decode(output[0], skip_special_tokens=True) keywords = [x.strip() for x in predicted.split(",") if x.strip()] formatted_keywords = "\n".join([f"β€’ {keyword}" for keyword in keywords]) progress(0.80, desc="Extracting Sentiment") # Define a dictionary to map labels to emojis sentiment_emojis = { "positive": "Positive πŸ‘πŸΌ", "negative": "Negative πŸ‘ŽπŸΌ", "neutral": "Neutral 😢", } # Sentiment label label = self.classifier(summary)[0]["label"] # Format the label with emojis formatted_sentiment = sentiment_emojis.get(label, label) progress(0.90, desc="Generating Wordcloud") # Generate WordCloud object wordcloud = WordCloud(colormap = "Oranges").generate(results["text"]) # WordCloud image to display wordcloud_image = wordcloud.to_image() if lang == "english": return ( results["text"], transcription_summary[0]["summary_text"], formatted_keywords, formatted_sentiment, wordcloud_image, ) else: return ( results["text"], summary, formatted_keywords, formatted_sentiment, wordcloud_image, ) def populate_metadata(self, link): """ Access to the YouTube video title and thumbnail image to further display it params: - link: a YouTube URL. """ if not link: return None, None self.yt = YouTube(link) return self.yt.thumbnail_url, self.yt.title def from_audio_input(self, lang, size, audio_file, progress=gr.Progress()): """ Call the Gradio Inference python class. Uses it directly the Whisper model to perform Automatic Speech Recognition (i.e Speech-to-Text). Once the function has the transcription of the video it proccess it to obtain: - Summary: using Facebook's BART transformer. - KeyWords: using VoiceLabT5 keyword extractor. - Sentiment Analysis: using Hugging Face's default sentiment classifier - WordCloud: using the wordcloud python library. """ progress(0, desc="Starting analysis") if lang == "none": lang = None if size != self.current_size: self.loaded_model = whisper.load_model(size) self.current_size = size progress(0.20, desc="Transcribing") results = self.loaded_model.transcribe(audio_file, language=lang) progress(0.40, desc="Summarizing") # Perform summarization on the transcription transcription_summary = self.summarizer( results["text"], max_length=150, min_length=30, do_sample=False ) ########################## PRUEBA LLM ################################# # from langchain import HuggingFacePipeline, PromptTemplate, LLMChain # llm = HuggingFacePipeline(pipeline = self.pipeline, model_kwargs = {'temperature':0}) # template = """ # Write a concise summary of the following text delimited by triple backquotes. # ```{text}``` # CONCISE SUMMARY: # """ # prompt = PromptTemplate(template=template, input_variables=["text"]) # llm_chain = LLMChain(prompt=prompt, llm=llm) # text = results["text"] # summ = llm_chain.run(text) ########################## FIN PRUEBA LLM ################################# #### Prueba WHITESPACE_HANDLER = lambda k: re.sub('\s+', ' ', re.sub('\n+', ' ', k.strip())) input_ids_sum = self.tokenizer( [WHITESPACE_HANDLER(results["text"])], return_tensors="pt", padding="max_length", truncation=True, max_length=512 )["input_ids"] output_ids_sum = self.model.generate( input_ids=input_ids_sum, max_length=130, no_repeat_ngram_size=2, num_beams=4 )[0] summary = self.tokenizer.decode( output_ids_sum, skip_special_tokens=True, clean_up_tokenization_spaces=False ) #### Fin prueba progress(0.50, desc="Extracting Keywords") # Extract keywords using VoiceLabT5 task_prefix = "Keywords: " input_sequence = task_prefix + results["text"] input_ids = self.keyword_tokenizer( input_sequence, return_tensors="pt", truncation=False ).input_ids output = self.keyword_model.generate( input_ids, no_repeat_ngram_size=3, num_beams=4 ) predicted = self.keyword_tokenizer.decode(output[0], skip_special_tokens=True) keywords = [x.strip() for x in predicted.split(",") if x.strip()] formatted_keywords = "\n".join([f"β€’ {keyword}" for keyword in keywords]) progress(0.80, desc="Extracting Sentiment") # Define a dictionary to map labels to emojis sentiment_emojis = { "positive": "Positive πŸ‘πŸΌ", "negative": "Negative πŸ‘ŽπŸΌ", "neutral": "Neutral 😢", } # Sentiment label label = self.classifier(summary)[0]["label"] # Format the label with emojis formatted_sentiment = sentiment_emojis.get(label, label) progress(0.90, desc="Generating Wordcloud") # WordCloud object wordcloud = WordCloud(colormap = "Oranges").generate( results["text"] ) wordcloud_image = wordcloud.to_image() if lang == "english": return ( results["text"], # summ, transcription_summary[0]["summary_text"], formatted_keywords, formatted_sentiment, wordcloud_image, ) else: return ( results["text"], # summ, summary, formatted_keywords, formatted_sentiment, wordcloud_image, ) gio = GradioInference() title = "YouTube Insights" description = "Your AI-powered video analytics tool" block = gr.Blocks() with block as demo: gr.HTML( """

YouTube Insights πŸ’‘

Your AI-powered video analytics tool ✨

""" ) with gr.Group(): with gr.Tab("From YouTube πŸ“Ή"): with gr.Box(): with gr.Row().style(equal_height=True): size = gr.Dropdown( label="Speech-to-text Model Size", choices=gio.sizes, value="base" ) lang = gr.Dropdown( label="Language (Optional)", choices=gio.langs, value="none" ) link = gr.Textbox( label="YouTube Link", placeholder="Enter YouTube link..." ) title = gr.Label(label="Video Title") with gr.Row().style(equal_height=True): img = gr.Image(label="Thumbnail") text = gr.Textbox( label="Transcription", placeholder="Transcription Output...", lines=10, ).style(show_copy_button=True, container=True) with gr.Row().style(equal_height=True): summary = gr.Textbox( label="Summary", placeholder="Summary Output...", lines=5 ).style(show_copy_button=True, container=True) keywords = gr.Textbox( label="Keywords", placeholder="Keywords Output...", lines=5 ).style(show_copy_button=True, container=True) label = gr.Label(label="Sentiment Analysis") wordcloud_image = gr.Image(label="WordCloud") with gr.Row().style(equal_height=True): clear = gr.ClearButton( [link, title, img, text, summary, keywords, label, wordcloud_image], scale=1, value="Clear πŸ—‘οΈ" ) btn = gr.Button("Get video insights πŸ”Ž", variant="primary", scale=1) btn.click( gio, inputs=[link, lang, size], outputs=[text, summary, keywords, label, wordcloud_image], ) link.change(gio.populate_metadata, inputs=[link], outputs=[img, title]) with gr.Tab("From Audio file πŸŽ™οΈ"): with gr.Box(): with gr.Row().style(equal_height=True): size = gr.Dropdown( label="Model Size", choices=gio.sizes, value="base" ) lang = gr.Dropdown( label="Language (Optional)", choices=gio.langs, value="none" ) audio_file = gr.Audio(type="filepath") with gr.Row().style(equal_height=True): text = gr.Textbox( label="Transcription", placeholder="Transcription Output...", lines=10, ).style(show_copy_button=True, container=False) with gr.Row().style(equal_height=True): summary = gr.Textbox( label="Summary", placeholder="Summary Output", lines=5 ) keywords = gr.Textbox( label="Keywords", placeholder="Keywords Output", lines=5 ) label = gr.Label(label="Sentiment Analysis") wordcloud_image = gr.Image(label="WordCloud") with gr.Row().style(equal_height=True): clear = gr.ClearButton([audio_file,text, summary, keywords, label, wordcloud_image], scale=1, value="Clear πŸ—‘οΈ") btn = gr.Button( "Get audio insights πŸ”Ž", variant="primary", scale=1 ) btn.click( gio.from_audio_input, inputs=[lang, size, audio_file], outputs=[text, summary, keywords, label, wordcloud_image], ) with block: gr.Markdown("### Video Examples") gr.Examples(["https://www.youtube.com/shorts/xDNzz8yAH7I","https://www.youtube.com/watch?v=kib6uXQsxBA&pp=ygURc3RldmUgam9icyBzcGVlY2g%3D"], inputs=link) gr.Markdown("### Audio Examples") gr.Examples( [[os.path.join(os.path.dirname(__file__),"audios/TED_lagrange_point.wav")],[os.path.join(os.path.dirname(__file__),"audios/TED_platon.wav")]], inputs=audio_file) gr.Markdown("### About the app:") with gr.Accordion("What is YouTube Insights?", open=False): gr.Markdown( "YouTube Insights is a tool developed for academic purposes that allows you to analyze YouTube videos or audio files. It provides features like transcription, summarization, keyword extraction, sentiment analysis, and word cloud generation for multimedia content." ) with gr.Accordion("How does YouTube Insights work?", open=False): gr.Markdown( "YouTube Insights leverages several powerful AI models and libraries. It uses OpenAI's Whisper for Automatic Speech Recognition (ASR) to transcribe audio content. It summarizes the transcribed text using Facebook's BART model, extracts keywords with VoiceLabT5, performs sentiment analysis with DistilBERT, and generates word clouds." ) with gr.Accordion("What languages are supported for the analysis?", open=False): gr.Markdown( "YouTube Insights supports multiple languages for transcription and analysis. You can select your preferred language from the available options when using the app." ) with gr.Accordion("Can I analyze audio files instead of YouTube videos?", open=False): gr.Markdown( "Yes, you can analyze audio files directly. Simply upload your audio file to the app, and it will provide the same transcription, summarization, keyword extraction, sentiment analysis, and word cloud generation features." ) with gr.Accordion("What are the different model sizes available for transcription?", open=False): gr.Markdown( "The app uses a Speech-to-text model that has different training sizes, from tiny to large. Hence, the bigger the model the accurate the transcription." ) with gr.Accordion("How long does it take to analyze a video or audio file?", open=False): gr.Markdown( "The time taken for analysis may vary based on the duration of the video or audio file and the selected model size. Shorter content will be processed more quickly." ) with gr.Accordion("Who developed YouTube Insights?" ,open=False): gr.Markdown( "YouTube Insights was developed by students as part of the 2022/23 Master's in Big Data & Data Science program at Universidad Complutense de Madrid for academic purposes (Trabajo de Fin de Master)." ) gr.HTML( """

Trabajo de Fin de MΓ‘ster - Grupo 3

2023 Master in Big Data & Data Science - Universidad Complutense de Madrid

""" ) demo.launch()