project7 / app.py
kanishka207004's picture
Update app.py
fb11513 verified
import tensorflow as tf
import gradio as gr
# importing necessary libraries
from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering
tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
model = TFAutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad",return_dict=False)
from transformers import pipeline
nlp = pipeline("question-answering", model=model, tokenizer=tokenizer)
context = "My name is Kanishka, i am a data scientist and machine learning engineer."
question = "what is my profession?"
result = nlp(question = question, context=context)
print(f"QUESTION: {question}")
print(f"ANSWER: {result['answer']}")
# creating the function
def func(context, question):
result = nlp(question = question, context=context)
return result['answer']
example_1 = "(1) Kanishka,Preeti,Hema and Shaksham are the team members.They are working on a machine learning project"
qst_1 = "who are the team members?"
example_2 = "(2) Natural Language Processing (NLP) allows machines to break down and interpret human language. It's at the core of tools we use every day – from translation software, chatbots, spam filters, and search engines, to grammar correction software, voice assistants, and social media monitoring tools."
qst_2 = "What is NLP used for?"
from transformers import ViltProcessor, ViltForQuestionAnswering
def getResult(query, image):
# prepare image + question
#image = Image.open(BytesIO(base64.b64decode(base64_encoded_image)))
text = query
processor = ViltProcessor.from_pretrained(
"dandelin/vilt-b32-finetuned-vqa")
model = ViltForQuestionAnswering.from_pretrained(
"dandelin/vilt-b32-finetuned-vqa")
# prepare inputs
encoding = processor(image, text, return_tensors="pt")
# forward pass
outputs = model(**encoding)
logits = outputs.logits
idx = logits.argmax(-1).item()
print("Predicted answer:", model.config.id2label[idx])
return model.config.id2label[idx]
#for youtube video summarization
from transformers import pipeline
from youtube_transcript_api import YouTubeTranscriptApi
import gradio as gr
def summarize(Youtube_Video_Link):
video_id = Youtube_Video_Link.split("=")[1]
try:
transcript = YouTubeTranscriptApi.get_transcript(video_id)
summarizer = pipeline('summarization',model='facebook/bart-large-cnn')
input_text = ""
for i in transcript:
input_text += ' ' + i['text']
num_iters = int(len(input_text)/1000)
summarized_text = []
for i in range(0, num_iters + 1):
start = 0
start = i * 1000
end = (i + 1) * 1000
print("input text \n" + input_text[start:end])
out = summarizer(input_text[start:end])
out = out[0]
out = out['summary_text']
print("Summarized text\n"+out)
summarized_text.append(out)
output_text=' '.join(summarized_text)
return output_text
except:
return "Some Error has occurred either with Video link passed is invalid or No Captions present for this video"
title = "YouTube Live Video Summarization"
examples = [("https://www.youtube.com/watch?v=zKvd1JwJ4Po"),("https://www.youtube.com/watch?v=9izcbNYmP8M"),]
description = "Get YouTube Video Summarization. Just Enter the YouTube Video link below. Make sure Video has Captions and it is not very long as Model Computation time will Increase."
Youtube_Video_Link = gr.Textbox("Input YouTube Link here (Note: This will take time if passed a long video)", show_label=False)
App= gr.Interface(fn=summarize, inputs=Youtube_Video_Link, outputs="text", examples=examples,description=description, title=title,)
## finished youtube video summarization
# creating the interface
iface = gr.Interface(fn=getResult, inputs=[
"text", gr.Image(type="pil")], outputs="text")
# creating the interface
app = gr.Interface(fn=func,
inputs = ['textbox', 'text'],
outputs = gr.Textbox( lines=10),
title = 'Question Answering bot',
description = 'Input context and question, then get answers!',
examples = [[example_1, qst_1],
[example_2, qst_2]],
theme = "darkhuggingface",
Timeout =120,
allow_flagging="manual",
flagging_options=["incorrect", "ambiguous", "offensive", "other"],
).queue()
# launching the app
gr.TabbedInterface([iface,app,App],["Visual QA","Text QA","Video Summarization"]).launch(auth = ('user','teamwork'), auth_message = "Check your Login details sent to your email")