|
import gradio as gr |
|
import random |
|
import time |
|
from transformers import pipeline,AutoModelForSeq2SeqLM,AutoTokenizer |
|
|
|
model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base") |
|
tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base") |
|
|
|
context="" |
|
|
|
def generate_answer(question): |
|
prompt = question +". \nAnswer this question given context in next line if answer is present in context otherwise say I don't know about that. Context: \n "+context |
|
inputs = tokenizer(prompt , return_tensors="pt") |
|
outputs = model.generate(**inputs) |
|
return (tokenizer.batch_decode(outputs, skip_special_tokens=True)) |
|
|
|
def upload_file(file): |
|
global context |
|
with open(file.name, encoding="utf-8") as f: |
|
context = f.read() |
|
|
|
with gr.Blocks() as demo: |
|
file_output = gr.File() |
|
upload_button = gr.UploadButton("Click to Upload a File", file_types=["txt", "pdf"]) |
|
upload_button.upload(upload_file, upload_button, file_output) |
|
chatbot = gr.Chatbot() |
|
msg = gr.Textbox() |
|
clear = gr.ClearButton([msg, chatbot,upload_button]) |
|
|
|
def respond(message, chat_history): |
|
ans=generate_answer(message) |
|
|
|
chat_history.append((message, f"\n {ans} ")) |
|
return "", chat_history |
|
|
|
msg.submit(respond, [msg, chatbot], [msg, chatbot]) |
|
|
|
with gr.Row(visible=True) as button_row: |
|
upvote_btn = gr.Button(value="π Upvote", interactive=True) |
|
downvote_btn = gr.Button(value="π Downvote", interactive=True) |
|
|
|
demo.queue() |
|
demo.launch(debug=True) |