Duplicate from togethercomputer/GPT-JT
Browse filesCo-authored-by: Jue Wang <juewang@users.noreply.huggingface.co>
- .gitattributes +34 -0
- README.md +15 -0
- app.py +291 -0
- requirements.txt +0 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: GPT-JT
|
3 |
+
emoji: 🚀
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: pink
|
6 |
+
sdk: streamlit
|
7 |
+
sdk_version: 1.10.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: true
|
10 |
+
duplicated_from: togethercomputer/GPT-JT
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
14 |
+
|
15 |
+
Join us on Discord at https://discord.gg/6ZVDU8tTD4
|
app.py
ADDED
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import requests
|
3 |
+
import time
|
4 |
+
from ast import literal_eval
|
5 |
+
from datetime import datetime
|
6 |
+
|
7 |
+
def to_md(text):
|
8 |
+
# return text.replace("\n", "<br />")
|
9 |
+
return text.replace("\n", "<br />")
|
10 |
+
|
11 |
+
# @st.cache
|
12 |
+
def infer(
|
13 |
+
prompt,
|
14 |
+
model_name,
|
15 |
+
max_new_tokens=10,
|
16 |
+
temperature=0.1,
|
17 |
+
top_p=1.0,
|
18 |
+
top_k=40,
|
19 |
+
num_completions=1,
|
20 |
+
repetition_penalty=1.0,
|
21 |
+
seed=42,
|
22 |
+
stop="\n"
|
23 |
+
):
|
24 |
+
model_name_map = {
|
25 |
+
"GPT-JT-6B-v1": "Together-gpt-JT-6B-v1",
|
26 |
+
}
|
27 |
+
max_new_tokens = int(max_new_tokens)
|
28 |
+
num_completions = int(num_completions)
|
29 |
+
temperature = float(temperature)
|
30 |
+
top_p = float(top_p)
|
31 |
+
top_k = int(top_k)
|
32 |
+
repetition_penalty = float(repetition_penalty)
|
33 |
+
stop = stop.split(";")
|
34 |
+
seed = seed
|
35 |
+
|
36 |
+
assert 1 <= max_new_tokens <= 256
|
37 |
+
assert 1 <= num_completions <= 5
|
38 |
+
assert 0.0 <= temperature <= 10.0
|
39 |
+
assert 0.0 <= top_p <= 1.0
|
40 |
+
assert 1 <= top_k <= 1000
|
41 |
+
assert 0.9 <= repetition_penalty <= 3.0
|
42 |
+
|
43 |
+
if temperature == 0.0:
|
44 |
+
temperature = 0.01
|
45 |
+
if prompt=="":
|
46 |
+
prompt = " "
|
47 |
+
my_post_dict = {
|
48 |
+
"model": "Together-gpt-JT-6B-v1",
|
49 |
+
"prompt": prompt,
|
50 |
+
"top_p": top_p,
|
51 |
+
"top_k": top_k,
|
52 |
+
"temperature": temperature,
|
53 |
+
"max_tokens": max_new_tokens,
|
54 |
+
"repetition_penalty": repetition_penalty,
|
55 |
+
"stop": stop,
|
56 |
+
}
|
57 |
+
print(f"send: {datetime.now()}")
|
58 |
+
headers = {
|
59 |
+
'Authorization': "Bearer " + st.secrets["TOGETHER_API_KEY"],
|
60 |
+
'User-Agent': 'GPT-JT HuggingFace Space'
|
61 |
+
}
|
62 |
+
response = requests.get("https://staging.together.xyz/api/inference", params=my_post_dict, headers=headers).json()
|
63 |
+
generated_text = response['output']['choices'][0]['text']
|
64 |
+
print(f"recv: {datetime.now()}")
|
65 |
+
|
66 |
+
for stop_word in stop:
|
67 |
+
if stop_word != '' and stop_word in generated_text:
|
68 |
+
generated_text = generated_text[:generated_text.find(stop_word)]
|
69 |
+
|
70 |
+
return generated_text
|
71 |
+
|
72 |
+
|
73 |
+
def set_preset():
|
74 |
+
if st.session_state.preset == "Question Answering":
|
75 |
+
|
76 |
+
st.session_state.prompt = '''
|
77 |
+
Please answer the following question:
|
78 |
+
|
79 |
+
Question: What is the capital of Canada?
|
80 |
+
Answer: Ottawa
|
81 |
+
|
82 |
+
Question: What is the currency of Switzerland?
|
83 |
+
Answer: Swiss franc
|
84 |
+
|
85 |
+
Question: In which country is Wisconsin located?
|
86 |
+
Answer:
|
87 |
+
'''.strip()
|
88 |
+
st.session_state.temperature = "0.0"
|
89 |
+
st.session_state.top_p = "1.0"
|
90 |
+
st.session_state.max_new_tokens = "5"
|
91 |
+
st.session_state.stop = r'\n'
|
92 |
+
|
93 |
+
elif st.session_state.preset == "Sentiment Analysis":
|
94 |
+
|
95 |
+
st.session_state.prompt = '''
|
96 |
+
Label the tweets as either "positive", "negative", "mixed", or "neutral":
|
97 |
+
|
98 |
+
Tweet: I can say that there isn't anything I would change.
|
99 |
+
Label: positive
|
100 |
+
|
101 |
+
Tweet: I'm not sure about this.
|
102 |
+
Label: neutral
|
103 |
+
|
104 |
+
Tweet: I liked some parts but I didn't like other parts.
|
105 |
+
Label: mixed
|
106 |
+
|
107 |
+
Tweet: I think the background image could have been better.
|
108 |
+
Label: negative
|
109 |
+
|
110 |
+
Tweet: I really like it.
|
111 |
+
Label:
|
112 |
+
'''.strip()
|
113 |
+
st.session_state.temperature = "0.0"
|
114 |
+
st.session_state.top_p = "1.0"
|
115 |
+
st.session_state.max_new_tokens = "2"
|
116 |
+
st.session_state.stop = r'\n'
|
117 |
+
|
118 |
+
elif st.session_state.preset == "Topic Classification":
|
119 |
+
|
120 |
+
st.session_state.prompt = '''
|
121 |
+
Given a news article, classify its topic.
|
122 |
+
Possible labels: 1. World 2. Sports 3. Business 4. Sci/Tech
|
123 |
+
|
124 |
+
Article: A nearby star thought to harbor comets and asteroids now appears to be home to planets, too.
|
125 |
+
Label: Sci/Tech
|
126 |
+
|
127 |
+
Article: Soaring crude prices plus worries about the economy and the outlook for earnings are expected to hang over the stock market next week during the depth of the summer doldrums.
|
128 |
+
Label: Business
|
129 |
+
|
130 |
+
Article: Murtagh a stickler for success Northeastern field hockey coach Cheryl Murtagh doesn't want the glare of the spotlight that shines on her to detract from a team that has been the America East champion for the past three years and has been to the NCAA tournament 13 times.
|
131 |
+
Label:
|
132 |
+
'''.strip()
|
133 |
+
st.session_state.temperature = "0.0"
|
134 |
+
st.session_state.top_p = "1.0"
|
135 |
+
st.session_state.max_new_tokens = "5"
|
136 |
+
st.session_state.stop = r'\n'
|
137 |
+
|
138 |
+
elif st.session_state.preset == "Paraphrasing":
|
139 |
+
|
140 |
+
st.session_state.prompt = '''
|
141 |
+
Paraphrase the given sentence into a different sentence.
|
142 |
+
|
143 |
+
Input: Can you recommend some upscale restaurants in New York?
|
144 |
+
Output: What upscale restaurants do you recommend in New York?
|
145 |
+
|
146 |
+
Input: What are the famous places we should not miss in Paris?
|
147 |
+
Output: Recommend some of the best places to visit in Paris?
|
148 |
+
|
149 |
+
Input: Could you recommend some hotels that have cheap price in Zurich?
|
150 |
+
Output:
|
151 |
+
'''.strip()
|
152 |
+
st.session_state.temperature = "0.8"
|
153 |
+
st.session_state.top_p = "1.0"
|
154 |
+
st.session_state.max_new_tokens = "20"
|
155 |
+
st.session_state.stop = r'\n'
|
156 |
+
|
157 |
+
elif st.session_state.preset == "Text Summarization":
|
158 |
+
|
159 |
+
st.session_state.prompt = '''
|
160 |
+
Given a review from Amazon's food products, the task is to generate a short summary of the given review in the input.
|
161 |
+
|
162 |
+
Input: I have bought several of the Vitality canned dog food products and have found them all to be of good quality. The product looks more like a stew than a processed meat and it smells better. My Labrador is finicky and she appreciates this product better than most.
|
163 |
+
Output: Good Quality Dog Food
|
164 |
+
|
165 |
+
Input: Product arrived labeled as Jumbo Salted Peanuts...the peanuts were actually small sized unsalted. Not sure if this was an error or if the vendor intended to represent the product as 'Jumbo'.
|
166 |
+
Output: Not as Advertised
|
167 |
+
|
168 |
+
Input: My toddler loves this game to a point where he asks for it. That's a big thing for me. Secondly, no glitching unlike one of their competitors (PlayShifu). Any tech I don’t have to reach out to support for help is a good tech for me. I even enjoy some of the games and activities in this. Overall, this is a product that shows that the developers took their time and made sure people would not be asking for refund. I’ve become bias regarding this product and honestly I look forward to buying more of this company’s stuff. Please keep up the great work.
|
169 |
+
Output:
|
170 |
+
'''.strip()
|
171 |
+
st.session_state.temperature = "0.0"
|
172 |
+
st.session_state.top_p = "1.0"
|
173 |
+
st.session_state.max_new_tokens = "10"
|
174 |
+
st.session_state.stop = r'\n'
|
175 |
+
|
176 |
+
elif st.session_state.preset == "Word Sense Disambiguation":
|
177 |
+
|
178 |
+
st.session_state.prompt = '''
|
179 |
+
Identify which sense of a word is meant in a given context.
|
180 |
+
|
181 |
+
Context: The river overflowed the bank.
|
182 |
+
Word: bank
|
183 |
+
Sense: river bank
|
184 |
+
|
185 |
+
Context: A mouse takes much more room than a trackball.
|
186 |
+
Word: mouse
|
187 |
+
Sense: computer mouse
|
188 |
+
|
189 |
+
Context: The bank will not be accepting cash on Saturdays.
|
190 |
+
Word: bank
|
191 |
+
Sense: commercial (finance) banks
|
192 |
+
|
193 |
+
Context: Bill killed the project
|
194 |
+
Word: kill
|
195 |
+
Sense:
|
196 |
+
'''.strip()
|
197 |
+
st.session_state.temperature = "0.0"
|
198 |
+
st.session_state.top_p = "1.0"
|
199 |
+
st.session_state.max_new_tokens = "10"
|
200 |
+
st.session_state.stop = r'\n'
|
201 |
+
|
202 |
+
elif st.session_state.preset == "Natural Language Inference":
|
203 |
+
|
204 |
+
st.session_state.prompt = '''
|
205 |
+
Given a pair of sentences, choose whether the two sentences agree (entailment)/disagree (contradiction) with each other.
|
206 |
+
Possible labels: 1. entailment 2. contradiction
|
207 |
+
|
208 |
+
Sentence 1: The skier was on the edge of the ramp. Sentence 2: The skier was dressed in winter clothes.
|
209 |
+
Label: entailment
|
210 |
+
|
211 |
+
Sentence 1: The boy skated down the staircase railing. Sentence 2: The boy is a newbie skater.
|
212 |
+
Label: contradiction
|
213 |
+
|
214 |
+
Sentence 1: Two middle-aged people stand by a golf hole. Sentence 2: A couple riding in a golf cart.
|
215 |
+
Label:
|
216 |
+
'''.strip()
|
217 |
+
st.session_state.temperature = "0.0"
|
218 |
+
st.session_state.top_p = "1.0"
|
219 |
+
st.session_state.max_new_tokens = "2"
|
220 |
+
st.session_state.stop = r'\n'
|
221 |
+
|
222 |
+
else:
|
223 |
+
pass
|
224 |
+
|
225 |
+
|
226 |
+
def main():
|
227 |
+
|
228 |
+
if 'preset' not in st.session_state:
|
229 |
+
st.session_state.preset = "Sentiment Analysis"
|
230 |
+
st.session_state.top_k = "40"
|
231 |
+
st.session_state.repetition_penalty = "1.0"
|
232 |
+
st.session_state.stop = r'\n'
|
233 |
+
set_preset()
|
234 |
+
|
235 |
+
st.title("GPT-JT")
|
236 |
+
|
237 |
+
col1, col2 = st.columns([1, 2])
|
238 |
+
|
239 |
+
with col1:
|
240 |
+
model_name = st.selectbox("Model", ["GPT-JT-6B-v1"])
|
241 |
+
|
242 |
+
with col2:
|
243 |
+
preset = st.selectbox(
|
244 |
+
label="Examples",
|
245 |
+
options=('Question Answering', 'Sentiment Analysis',
|
246 |
+
"Topic Classification", "Paraphrasing", "Text Summarization",
|
247 |
+
"Word Sense Disambiguation", "Natural Language Inference"),
|
248 |
+
on_change=set_preset,
|
249 |
+
key="preset",
|
250 |
+
)
|
251 |
+
|
252 |
+
col3, col4 = st.columns([1, 5])
|
253 |
+
|
254 |
+
with col3:
|
255 |
+
max_new_tokens = st.text_input('Max new tokens', st.session_state.max_new_tokens)
|
256 |
+
temperature = st.text_input('temperature', st.session_state.temperature)
|
257 |
+
top_k = st.text_input('top_k', st.session_state.top_k)
|
258 |
+
top_p = st.text_input('top_p', st.session_state.top_p)
|
259 |
+
# num_completions = st.text_input('num_completions (only the best one will be returend)', "1")
|
260 |
+
num_completions = "1"
|
261 |
+
repetition_penalty = st.text_input('repetition_penalty', st.session_state.repetition_penalty)
|
262 |
+
stop = st.text_input('stop, split by;', st.session_state.stop)
|
263 |
+
# seed = st.text_input('seed', "42")
|
264 |
+
seed = "42"
|
265 |
+
|
266 |
+
with col4:
|
267 |
+
|
268 |
+
prompt_area = st.empty()
|
269 |
+
prompt = prompt_area.text_area(
|
270 |
+
"Prompt",
|
271 |
+
value=st.session_state.prompt,
|
272 |
+
max_chars=8000,
|
273 |
+
height=500,
|
274 |
+
)
|
275 |
+
|
276 |
+
generated_area = st.empty()
|
277 |
+
generated_area.markdown("(Generate here)")
|
278 |
+
|
279 |
+
button_submit = st.button("Submit")
|
280 |
+
|
281 |
+
if button_submit:
|
282 |
+
generated_area.markdown("<b>" + to_md(prompt) + "</b>", unsafe_allow_html=True)
|
283 |
+
report_text = infer(
|
284 |
+
prompt, model_name=model_name, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k,
|
285 |
+
num_completions=num_completions, repetition_penalty=repetition_penalty,
|
286 |
+
seed=seed, stop=literal_eval("'''"+stop+"'''"),
|
287 |
+
)
|
288 |
+
generated_area.markdown("<b>" + to_md(prompt) + "</b><mark style='background-color: #cbeacd'>" + to_md(report_text)+"</mark>", unsafe_allow_html=True)
|
289 |
+
|
290 |
+
if __name__ == '__main__':
|
291 |
+
main()
|
requirements.txt
ADDED
File without changes
|