Spaces:
Build error
Build error
RaviRaj988
commited on
Commit
•
87074fa
1
Parent(s):
9d91371
first commit
Browse files- app.py +174 -0
- requirements.txt +5 -0
app.py
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from youtube_transcript_api import YouTubeTranscriptApi
|
3 |
+
from transformers import AutoTokenizer
|
4 |
+
from transformers import pipeline
|
5 |
+
from transformers import AutoModelForQuestionAnswering
|
6 |
+
import pandas as pd
|
7 |
+
from sentence_transformers import SentenceTransformer, util
|
8 |
+
import torch
|
9 |
+
|
10 |
+
model_ckpt = "deepset/minilm-uncased-squad2"
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
|
12 |
+
model = AutoModelForQuestionAnswering.from_pretrained(model_ckpt)
|
13 |
+
modelST = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
|
14 |
+
|
15 |
+
#input - video link, output - full transcript
|
16 |
+
def get_transcript(link):
|
17 |
+
print("******** Inside get_transcript ********")
|
18 |
+
print(f"link to be extracted is : {link}")
|
19 |
+
video_id = link.split("=")[1]
|
20 |
+
# Handle additional query parameters such as timestamp, ...
|
21 |
+
video_id = video_id.split("&")[0]
|
22 |
+
print(f"video id extracted is : {video_id}")
|
23 |
+
transcript = YouTubeTranscriptApi.get_transcript(video_id)
|
24 |
+
FinalTranscript = ' '.join([i['text'] for i in transcript])
|
25 |
+
return FinalTranscript,transcript, video_id
|
26 |
+
|
27 |
+
|
28 |
+
#input - question and transcript, output - answer timestamp
|
29 |
+
def get_answers_timestamp(question, final_transcript, transcript):
|
30 |
+
print("******** Inside get_answers_timestamp ********")
|
31 |
+
|
32 |
+
context = final_transcript
|
33 |
+
print(f"Input Question is : {question}")
|
34 |
+
print(f"Type of trancript is : {type(context)}, Length of transcript is : {len(context)}")
|
35 |
+
inputs = tokenizer(question, context, return_overflowing_tokens=True, max_length=512, stride = 25)
|
36 |
+
|
37 |
+
#getting a list of contexts available after striding
|
38 |
+
contx=[]
|
39 |
+
for window in inputs["input_ids"]:
|
40 |
+
#print(f"{tokenizer.decode(window)} \n")
|
41 |
+
contx.append(tokenizer.decode(window).split('[SEP]')[1].strip())
|
42 |
+
#print(ques)
|
43 |
+
#print(contx)
|
44 |
+
|
45 |
+
lst=[]
|
46 |
+
pipe = pipeline("question-answering", model=model, tokenizer=tokenizer)
|
47 |
+
for contexts in contx:
|
48 |
+
lst.append(pipe(question=question, context=contexts))
|
49 |
+
|
50 |
+
print(f"contx list is : {contx}")
|
51 |
+
lst_scores = [dicts['score'] for dicts in lst]
|
52 |
+
print(f"lst_scores is : {lst_scores}")
|
53 |
+
#getting highest and second highest scores
|
54 |
+
idxmax = lst_scores.index(max(lst_scores))
|
55 |
+
lst_scores.remove(max(lst_scores))
|
56 |
+
idxmax2 = lst_scores.index(max(lst_scores))
|
57 |
+
|
58 |
+
sentence_for_timestamp = lst[idxmax]['answer']
|
59 |
+
sentence_for_timestamp_secondbest = lst[idxmax2]['answer']
|
60 |
+
|
61 |
+
dftranscript = pd.DataFrame(transcript)
|
62 |
+
|
63 |
+
embedding_1= modelST.encode(dftranscript.text, convert_to_tensor=True)
|
64 |
+
embedding_2 = modelST.encode(sentence_for_timestamp, convert_to_tensor=True)
|
65 |
+
embedding_3 = modelST.encode(sentence_for_timestamp_secondbest, convert_to_tensor=True)
|
66 |
+
|
67 |
+
similarity_tensor = util.pytorch_cos_sim(embedding_1, embedding_2)
|
68 |
+
idx = torch.argmax(similarity_tensor)
|
69 |
+
start_timestamp = dftranscript.iloc[[int(idx)-3]].start.values[0]
|
70 |
+
start_timestamp = round(start_timestamp)
|
71 |
+
|
72 |
+
similarity_tensor_secondbest = util.pytorch_cos_sim(embedding_1, embedding_3)
|
73 |
+
idx_secondbest = torch.argmax(similarity_tensor_secondbest)
|
74 |
+
start_timestamp_secondbest = dftranscript.iloc[[int(idx_secondbest)-3]].start.values[0]
|
75 |
+
start_timestamp_secondbest = round(start_timestamp_secondbest)
|
76 |
+
|
77 |
+
return start_timestamp, start_timestamp_secondbest
|
78 |
+
|
79 |
+
|
80 |
+
def display_vid(url, question, sample_question=None, example_video=None):
|
81 |
+
print("******** display_vid ********")
|
82 |
+
if question == '':
|
83 |
+
question = sample_question
|
84 |
+
|
85 |
+
#get embedding and youtube link for initial video
|
86 |
+
html_in = "<iframe width='560' height='315' src=" + url + " frameborder='0' allowfullscreen></iframe>"
|
87 |
+
#print(html)
|
88 |
+
|
89 |
+
if len(example_video) !=0 : #is not None:
|
90 |
+
print(f"example_video is : {example_video}")
|
91 |
+
url = example_video[0]
|
92 |
+
#get transcript
|
93 |
+
final_transcript, transcript, video_id = get_transcript(url)
|
94 |
+
|
95 |
+
#get answer timestamp
|
96 |
+
#input - question and transcript, output - answer timestamp
|
97 |
+
ans_timestamp, ans_timestamp_secondbest = get_answers_timestamp(question, final_transcript, transcript)
|
98 |
+
|
99 |
+
#created embedding width='560' height='315'
|
100 |
+
html_out = "<iframe width='730' height='400' src='https://www.youtube.com/embed/" + video_id + "?start=" + str(ans_timestamp) + "' title='YouTube video player' frameborder='0' allow='accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture' allowfullscreen></iframe>"
|
101 |
+
print(f"html output is : {html_out}")
|
102 |
+
html_out_secondbest = "<iframe width='730' height='400' src='https://www.youtube.com/embed/" + video_id + "?start=" + str(ans_timestamp_secondbest) + "' title='YouTube video player' frameborder='0' allow='accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture' allowfullscreen></iframe>"
|
103 |
+
|
104 |
+
if question == '':
|
105 |
+
print(f"Inside display_vid(), Sample_Question coming from Radio box is BEFORE : {sample_question}")
|
106 |
+
sample_ques = set_example_question(sample_question)
|
107 |
+
print(f"Inside display_vid(), Sample Question coming from Radio box is AFTER : {sample_ques}")
|
108 |
+
else:
|
109 |
+
sample_ques = question
|
110 |
+
return html_out, html_out_secondbest, sample_ques, url
|
111 |
+
|
112 |
+
def set_example_question(sample_question):
|
113 |
+
print(f"******* Inside Sample Questions ********")
|
114 |
+
print(f"Sample Question coming from Radio box is : {sample_question}")
|
115 |
+
print("What is the Return value : {gr.Radio.update(value=sample_question)}")
|
116 |
+
return gr.Radio.update(value=sample_question) #input_ques.update(example)
|
117 |
+
|
118 |
+
demo = gr.Blocks()
|
119 |
+
|
120 |
+
with demo:
|
121 |
+
gr.Markdown("<h1><center>Ask a Question to a YouTube Video and get the Video played from the answer timestamp</center></h1>")
|
122 |
+
gr.Markdown(
|
123 |
+
"""### How many times have you seen a long video/podcast on Youtube and wondered only if there would have been 'explanatory' timestamps it would have been so much better..
|
124 |
+
**A Space by [Yuvraj Sharma](https://huggingface.co/ysharma). How to use this space:** You can either provide a new YouTube video link or can use the sample video link provided. Then provide a Questions that you would like about exploring the content in the given video.
|
125 |
+
The App will generate timestamps and Play the video at those timestamps for you in the space provided. You will see two video displays, corresponding to two of the best guesses by the underlying models. Chances are that both videos might start with same timestamp, which will depend on the question and the content in the video, please bear!
|
126 |
+
Also, couple small caveats -
|
127 |
+
- The App will perform as good as the available English Transcripts are for the given YouTube Video. If there are no transcripts, the App will not work.
|
128 |
+
- Please make sure the YouTube video links that you paste here don't have the trailing values like *&t=8077s*
|
129 |
+
- Lastly, once you have queried a video, you might have to refresh the page for next query (will try and fix this)
|
130 |
+
|
131 |
+
**Motivation behind building this App:** When we see a long video without timestamps, we often wonder 'if' the content we are looking for is in there, or 'where' in the video is the content we are looking for? The Idea is that we might have questions like 'Is the xxxx thing covered in this video?', or maybe 'does the host talks about the architecture of the xxxxx model', or maybe 'Does host talk about alien doorway on Mars?' and so on.
|
132 |
+
|
133 |
+
**So this App could help you in reaching to that timestamp in 'Record time'!**
|
134 |
+
|
135 |
+
**Best part:** You don't even have to move away from the Space tab in your browser as the YouTube video gets played within the given View.
|
136 |
+
"""
|
137 |
+
)
|
138 |
+
with gr.Row():
|
139 |
+
input_url = gr.Textbox(label="Input a Youtube video link")
|
140 |
+
input_ques = gr.Textbox(label="Ask a Question")
|
141 |
+
|
142 |
+
with gr.Row():
|
143 |
+
output_vid = gr.HTML(label="Video from timestamp 1", show_label=True)
|
144 |
+
output_vid_secondbest = gr.HTML(label="Video from timestamp 2", show_label=True)
|
145 |
+
|
146 |
+
with gr.Row():
|
147 |
+
example_question = gr.Dropdown(
|
148 |
+
["Choose a sample question", "Does video talk about different modalities",
|
149 |
+
"does the model uses perceiver architecture?",
|
150 |
+
"when does the video talk about locked image tuning or lit?",
|
151 |
+
"comparison between gpt3 and jurassic?",
|
152 |
+
"Has flamingo passed turing test yet?",
|
153 |
+
"Any funny examples in video?",
|
154 |
+
"is it possible to download the stylegan model?",
|
155 |
+
"what was very cool?",
|
156 |
+
"what is the cool library?"], label= "Choose a sample Question", value=None)
|
157 |
+
with gr.Row():
|
158 |
+
example_video = gr.CheckboxGroup( ["https://www.youtube.com/watch?v=smUHQndcmOY"], label= "Choose a sample YouTube video")
|
159 |
+
|
160 |
+
b1 = gr.Button("Publish Video")
|
161 |
+
|
162 |
+
b1.click(display_vid, inputs=[input_url, input_ques, example_question, example_video], outputs=[output_vid, output_vid_secondbest, input_ques, input_url])
|
163 |
+
|
164 |
+
with gr.Row():
|
165 |
+
gr.Markdown('''
|
166 |
+
#### Model Credits
|
167 |
+
1. [Question Answering](https://huggingface.co/deepset/minilm-uncased-squad2)
|
168 |
+
1. [Sentence Transformer](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2)
|
169 |
+
''')
|
170 |
+
|
171 |
+
with gr.Row():
|
172 |
+
gr.Markdown("![visitor badge](https://visitor-badge.glitch.me/badge?page_id=gradio-blocks_ask_questions_to_youtube_videos)")
|
173 |
+
|
174 |
+
demo.launch(enable_queue=True, debug=True)
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
youtube-transcript-api
|
3 |
+
pandas
|
4 |
+
sentence-transformers
|
5 |
+
torch
|