Spaces:
Runtime error
Runtime error
matthew null
commited on
Commit
·
831b959
0
Parent(s):
Duplicate from breadlicker45/gpt-ya-gen
Browse files- .gitattributes +27 -0
- README.md +13 -0
- app.py +57 -0
- requirements.txt +3 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
19 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: gpt-ya text gen
|
3 |
+
emoji: ⚡
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: white
|
6 |
+
sdk: streamlit
|
7 |
+
sdk_version: 1.9.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: true
|
10 |
+
duplicated_from: breadlicker45/gpt-ya-gen
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
app.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import time
|
3 |
+
from transformers import pipeline
|
4 |
+
import torch
|
5 |
+
trust_remote_code=True
|
6 |
+
st.markdown('## Text-generation gpt-ya from Breadlicker45')
|
7 |
+
use_auth_token=True
|
8 |
+
@st.cache(allow_output_mutation=True, suppress_st_warning =True, show_spinner=False)
|
9 |
+
def get_model():
|
10 |
+
return pipeline('text-generation', model=model, do_sample=False)
|
11 |
+
|
12 |
+
col1, col2 = st.columns([2,1])
|
13 |
+
|
14 |
+
with st.sidebar:
|
15 |
+
st.markdown('## Model Parameters')
|
16 |
+
|
17 |
+
max_length = st.slider('Max text length', 0, 500, 80)
|
18 |
+
|
19 |
+
num_beams = st.slider('N° tree beams search', 1, 15, 2)
|
20 |
+
|
21 |
+
early_stopping = st.selectbox(
|
22 |
+
'Early stopping text generation',
|
23 |
+
('True', 'False'), key={'True' : True, 'False': False}, index=0)
|
24 |
+
|
25 |
+
no_ngram_repeat = st.slider('Max repetition limit', 1, 5, 2)
|
26 |
+
|
27 |
+
with col1:
|
28 |
+
prompt= st.text_area('Your prompt here',
|
29 |
+
'''What is the meaning of life?''')
|
30 |
+
|
31 |
+
with col2:
|
32 |
+
select_model = st.radio(
|
33 |
+
"Select the model to use:",
|
34 |
+
('gpt-ya', 'gpt-ya-1-1', 'gpt-ya-1-1-160M'), index = 2)
|
35 |
+
|
36 |
+
if select_model == 'gpt-ya':
|
37 |
+
model = 'breadlicker45/gpt-ya'
|
38 |
+
elif select_model == 'gpt-ya-1-1':
|
39 |
+
model = 'BreadAi/gpt-YA-1-1_70M'
|
40 |
+
elif select_model == 'gpt-ya-1-1-160M':
|
41 |
+
model = 'BreadAi/gpt-YA-1-1_160M'
|
42 |
+
|
43 |
+
with st.spinner('Loading Model... (This may take a while)'):
|
44 |
+
generator = get_model()
|
45 |
+
st.success('Model loaded correctly!')
|
46 |
+
|
47 |
+
gen = st.info('Generating text...')
|
48 |
+
answer = generator(prompt, max_length=max_length, no_repeat_ngram_size=no_ngram_repeat,
|
49 |
+
early_stopping=early_stopping, num_beams=num_beams, do_sample=False)
|
50 |
+
gen.empty()
|
51 |
+
|
52 |
+
lst = answer[0]['generated_text']
|
53 |
+
|
54 |
+
t = st.empty()
|
55 |
+
for i in range(len(lst)):
|
56 |
+
t.markdown("#### %s" % lst[0:i])
|
57 |
+
time.sleep(0.04)
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
transformers
|
3 |
+
torch
|