|
import gradio as gr |
|
import transformers |
|
from transformers import AutoModelForCausalLM, AutoModelWithLMHead, AutoTokenizer, pipeline |
|
from transformers import GPT2Tokenizer, GPT2Model |
|
|
|
|
|
general_model = AutoModelForCausalLM.from_pretrained('gpt2') |
|
general_generator = pipeline("text-generation", model=general_model, tokenizer="gpt2") |
|
general_result = general_generator("Today is ", max_length=700) |
|
general_result[0]["generated_text"] |
|
|
|
|
|
def generator(start_your_text = ''): |
|
result = general_generator(start_your_text) |
|
return result[0]["generated_text"] |
|
|
|
iface = gr.Interface(fn=generator, inputs="text", outputs="text") |
|
iface.launch() |