File size: 1,763 Bytes
60cb352
 
 
 
 
aa9fefe
 
 
 
 
 
 
 
60cb352
 
 
 
 
 
60011c0
60cb352
aa16dec
 
60cb352
 
 
 
 
 
aa16dec
60cb352
 
 
 
 
 
29fde9f
60cb352
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import streamlit as st
import transformers
import torch
from transformers import GPT2LMHeadModel, GPT2Tokenizer

@st.cache_resource
def load_model():
    borgesian = GPT2LMHeadModel.from_pretrained('sberbank-ai/rugpt3small_based_on_gpt2', output_attentions = False, output_hidden_states = False)
    borgesian.load_state_dict(torch.load('borgesian_weights.pt', map_location=torch.device('cpu')))
    tokenizer = GPT2Tokenizer.from_pretrained("sberbank-ai/rugpt3small_based_on_gpt2")
    return borgesian, tokenizer

borgesian, tokenizer = load_model()
borgesian.to('cpu')
borgesian.eval()

def generate_response(text, temperature, length, top_p):
    input_ids = tokenizer.encode(text, return_tensors="pt")
    with torch.no_grad():
        out = borgesian.generate(input_ids, do_sample=True, num_beams=2, temperature=float(temperature), top_p=float(top_p), max_length=length)
    generated_text = list(map(tokenizer.decode, out))[0]
    last_full_stop_index = generated_text.rfind('.')
    st.write(generated_text[:last_full_stop_index + 1])

st.title('Borgesian')
st.image('borges.jpg')
st.write('Write a prompt in Russian, and the GPT-based model will follow up with a Borgesian text.')
st.write('Define the parameters of generation:')
temperature = st.slider('Temperature', value = 1.5, min_value = 1.0, max_value = 5.0, step = 0.1)
length = st.slider('Length', value = 50, min_value = 20, max_value = 250, step = 1)
top_p = st.slider('Top-p value', value = 0.9, min_value = 0.5, max_value = 1.0, step = 0.05)

user_input = st.text_area("Enter your text:")
if st.button("Send"):
    if user_input:
        generate_response(user_input, temperature, length, top_p)
        st.image('penrose_tiling.jpg')
    else:
        st.warning("Please enter some text.")