|
import streamlit as st |
|
from transformers import pipeline |
|
|
|
|
|
@st.cache_resource |
|
def load_model(): |
|
return pipeline("text-generation", model="DeepPavlov/rubert-base-cased-conversational") |
|
|
|
|
|
def process_text_with_model(text): |
|
|
|
|
|
generator = load_model() |
|
response = generator(text, max_length=100, num_return_sequences=1)[0]['generated_text'] |
|
return response |
|
|
|
|
|
st.title("Чат-бот по \"Аленькому цветочку\"") |
|
|
|
user_input = st.text_input("Введите ваш вопрос:") |
|
|
|
if user_input: |
|
response = process_text_with_model(user_input) |
|
st.write(response) |