|
import gradio as gr |
|
import torch |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
|
|
model_name = "DeepPavlov/rubert-base-cased" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
def generate_text(prompt): |
|
|
|
input_ids = tokenizer.encode(prompt, return_tensors='pt') |
|
|
|
output = model.generate(input_ids, max_length=100) |
|
|
|
return tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_text, |
|
inputs=gr.inputs.Textbox(placeholder="Введите текст для генерации"), |
|
outputs=gr.outputs.Textbox(label="Сгенерированный текст") |
|
) |
|
|
|
|
|
iface.launch() |
|
|