File size: 1,411 Bytes
7d5081f
 
4946d76
7d5081f
4946d76
7d5081f
bd5628e
 
 
 
 
c9ee852
 
7d5081f
8aa5b8d
bd5628e
 
 
 
 
 
 
 
 
8aa5b8d
7d5081f
ada482c
44df92e
8aa5b8d
bd5628e
7d5081f
e0ffc63
f11888f
7d5081f
 
 
68c6a22
7d5081f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import streamlit as st
import time
from transformers import pipeline
import torch
#from transformers import AutoModelForCausalLM, AutoTokenizer

#@st.cache(allow_output_mutation=True)
#def define_model():
#    model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b", torch_dtype=torch.float16).cuda()
#    tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b", use_fast=False)
#    return model, tokenizer
generator = pipeline('text-generation', model="facebook/opt-1.3b", skip_special_tokens=True)    



#@st.cache(allow_output_mutation=True)
#def opt_model(prompt, model, tokenizer, num_sequences = 1, max_length = 50):  
#    input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda()
#    generated_ids = model.generate(input_ids, num_return_sequences=num_sequences, max_length=max_length)
#    answer = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
#    return answer


#model, tokenizer = define_model()

prompt= st.text_area('Your prompt here',
 '''Hello, I'm am conscious and''') 
answer = generator(prompt, max_length=100,no_repeat_ngram_size=3, early_stopping=True, num_beams=10)
 
#answer = opt_model(prompt, model, tokenizer,)
#lst = ['ciao come stai sjfsbd dfhsdf  fuahfuf  feuhfu wefwu ']
#answer = define_model(prompt)
lst = answer[0]['generated_text']

t = st.empty()
for i in range(len(lst)):
    t.markdown(" %s..." % lst[0:i])
    time.sleep(0.04)