|
import gradio as gr |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
model_name = "tiiuae/falcon-7b-instruct" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True) |
|
|
|
def generate_text(prompt): |
|
inputs = tokenizer(prompt, return_tensors="pt", max_length=200, truncation=True) |
|
outputs = model.generate(**inputs) |
|
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return generated_text |
|
|
|
iface = gr.Interface( |
|
fn=generate_text, |
|
inputs=gr.inputs.Textbox(), |
|
outputs=gr.outputs.Textbox(), |
|
live=True, |
|
interpretation="default", |
|
layout="vertical", |
|
) |
|
|
|
iface.launch() |