Spaces:
Sleeping
Sleeping
# -*- coding: utf-8 -*- | |
"""gradio_deploy.ipynb | |
Automatically generated by Colaboratory. | |
""" | |
import os | |
import gradio | |
#import transformers | |
from PIL import Image | |
from timeit import default_timer as timer | |
from tensorflow import keras | |
from transformers import GPT2Tokenizer, GPT2LMHeadModel | |
from transformers import AutoModelWithLMHead, AutoTokenizer | |
import numpy as np | |
loaded_model = GPT2LMHeadModel.from_pretrained("gpt2") | |
loaded_tokenizer = GPT2Tokenizer.from_pretrained("gpt2") | |
def generate_query_response(prompt, max_length=200): | |
model = loaded_model | |
tokenizer = loaded_tokenizer | |
input_ids = tokenizer.encode(prompt, return_tensors="pt") | |
attention_mask = torch.ones_like(input_ids) | |
pad_token_id = tokenizer.eos_token_id | |
output = model.generate( | |
input_ids, | |
max_length=max_length, | |
num_return_sequences=1, | |
attention_mask=attention_mask, | |
pad_token_id=pad_token_id | |
) | |
return tokenizer.decode(output[0]) | |
# Gradio elements | |
# Input from user | |
in_prompt = gradio.inputs.Textbox(lines=2, label='Enter the question') | |
in_max_length = gradio.inputs.Number(label='Enter the max length') | |
# Output response | |
out_response = gradio.outputs.Textbox(label='Answer') | |
# Gradio interface to generate UI link | |
iface = gradio.Interface(fn=generate_query_response, | |
inputs = [in_prompt,in_max_length], | |
outputs = out_response | |
) | |
iface.launch(debug = True) | |