subu4444's picture
Create app.py
f52b12a
raw
history blame
1.95 kB
import gradio as gr
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
import json
context_val = ''
q_n_a_model_name = "deepset/roberta-base-squad2"
q_n_a_model = AutoModelForQuestionAnswering.from_pretrained(q_n_a_model_name)
tokenizer = AutoTokenizer.from_pretrained(q_n_a_model_name) # Corrected this line
context = gr.Textbox(label="Add the Context (Paragraph or texts) for which you want to get insights", lines=10, outputs="text")
def q_n_a_fn(context, text):
QA_input = {'question': text, 'context': context}
nlp = pipeline('question-answering', model=q_n_a_model, tokenizer=tokenizer)
res = nlp(QA_input)
answer = res['answer']
return answer
def classification_fn(text):
return context
def translate_fn(text):
return context
with gr.Blocks(theme='gradio/soft') as demo:
gr.Markdown("<h1>Basic NLP Operations</h1>")
gr.Markdown("Bringing basic NLP operations together.")
with gr.Tab("Question and Answer"):
with gr.Row():
gr.Interface(fn=q_n_a_fn, inputs=[context, gr.Textbox(label="Ask question", lines=1)], outputs="text")
with gr.Tab("Classifier"):
with gr.Row():
gr.Interface(fn=classification_fn, inputs=[context], outputs="label")
with gr.Tab("Translation"):
with gr.Row():
gr.Interface(fn=translate_fn, inputs=[gr.Radio(["French", "Hindi", "Spanish"], label="Languages", info="Select language")], outputs="text")
with gr.Tab("Summarization"):
with gr.Row():
gr.Interface(fn=classification_fn, inputs=[context], outputs="label")
with gr.Tab("Text To Speech"):
with gr.Row():
gr.Interface(fn=classification_fn, inputs=[context], outputs="audio")
with gr.Tab("Text To Text"):
with gr.Row():
gr.Interface(fn=classification_fn, inputs=[context], outputs="text")
if __name__ == "__main__":
demo.launch()