BigSalmon commited on
Commit
2c25e3b
1 Parent(s): ca4635f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +207 -45
app.py CHANGED
@@ -5,84 +5,246 @@ import os
5
  import torch
6
  import torch.nn as nn
7
  from transformers.activations import get_activation
8
- from transformers import AutoTokenizer, AutoModelWithLMHead, AutoModelForCausalLM, GPTJForCausalLM
9
- from transformers import XGLMTokenizer, XGLMForCausalLM
10
- st.title('GPT2: To see all prompt outlines: https://huggingface.co/BigSalmon/InformalToFormalLincoln46')
11
- #device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
- number_of_outputs = st.sidebar.slider("Number of Outputs", 50, 350)
 
13
 
14
  @st.cache(allow_output_mutation=True)
15
  def get_model():
16
- #model = #AutoModelWithLMHead.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln6")
17
- #model = AutoModelWithLMHead.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln5")
18
- #model = AutoModelWithLMHead.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln4")
19
- #model = AutoModelWithLMHead.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln3")
20
- #model = AutoModelWithLMHead.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln2")
21
- #model = AutoModelWithLMHead.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln")
22
- #model = AutoModelWithLMHead.from_pretrained("BigSalmon/InformalToFormalLincoln24")
23
- #model = AutoModelWithLMHead.from_pretrained("BigSalmon/InformalToFormalLincoln25")
24
- #model = AutoModelWithLMHead.from_pretrained("BigSalmon/InformalToFormalLincoln26")
25
- #model = AutoModelWithLMHead.from_pretrained("BigSalmon/InformalToFormalLincoln27")
26
- #model = AutoModelWithLMHead.from_pretrained("BigSalmon/InformalToFormalLincoln28")
27
- #model = AutoModelWithLMHead.from_pretrained("BigSalmon/InformalToFormalLincoln29")
28
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln30")
29
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln31")
30
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln32")
31
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln33")
32
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln34")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln35")
34
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln39")
35
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln40")
36
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln44")
37
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln46")
38
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln52")
39
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/Points4")
40
- #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln61Paraphrase")
41
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln61Paraphrase")
42
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln66Paraphrase")
43
- tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln65Paraphrase")
44
- model = AutoModelForCausalLM.from_pretrained("BigSalmon/PointsToParagraphNeo1.3B", low_cpu_mem_usage=True)
 
 
45
  return model, tokenizer
46
 
47
  model, tokenizer = get_model()
 
48
  g = """informal english: garage band has made people who know nothing about music good at creating music.
49
  Translated into the Style of Abraham Lincoln: garage band ( offers the uninitiated in music the ability to produce professional-quality compositions / catapults those for whom music is an uncharted art the ability the realize masterpieces / stimulates music novice's competency to yield sublime arrangements / begets individuals of rudimentary musical talent the proficiency to fashion elaborate suites ).
50
- informal english: chrome extensions can make doing regular tasks much easier to get done.
51
-
52
- ***
53
 
 
54
  Translated into the Style of Abraham Lincoln: chrome extensions ( yield the boon of time-saving convenience / ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks / turbocharges the velocity with which one can conduct their obligations ).
 
55
  informal english: broadband is finally expanding to rural areas, a great development that will thrust them into modern life.
56
  Translated into the Style of Abraham Lincoln: broadband is ( ( finally / at last / after years of delay ) arriving in remote locations / springing to life in far-flung outposts / inching into even the most backwater corners of the nation ) that will leap-frog them into the twenty-first century.
57
 
58
- ***
59
-
60
  informal english: google translate has made talking to people who do not share your language easier.
61
  Translated into the Style of Abraham Lincoln: google translate ( imparts communicability to individuals whose native tongue differs / mitigates the trials of communication across linguistic barriers / hastens the bridging of semantic boundaries / mollifies the complexity of multilingual communication / avails itself to the internationalization of discussion / flexes its muscles to abet intercultural conversation / calms the tides of linguistic divergence ).
62
 
63
- ***
64
-
65
  informal english: corn fields are all across illinois, visible once you leave chicago.
66
  Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.
67
 
68
- ***
69
-
70
  informal english: """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  with st.form(key='my_form'):
72
  prompt = st.text_area(label='Enter sentence', value=g)
73
  submit_button = st.form_submit_button(label='Submit')
 
 
 
 
74
  if submit_button:
75
  with torch.no_grad():
76
  text = tokenizer.encode(prompt)
77
  myinput, past_key_values = torch.tensor([text]), None
78
  myinput = myinput
79
- #myinput= myinput
80
  logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
81
  logits = logits[0,-1]
82
  probabilities = torch.nn.functional.softmax(logits)
83
- best_logits, best_indices = logits.topk(number_of_outputs)
84
  best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
85
  text.append(best_indices[0].item())
86
  best_probabilities = probabilities[best_indices].tolist()
87
  words = []
88
- st.write(best_words)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  import torch
6
  import torch.nn as nn
7
  from transformers.activations import get_activation
8
+ from transformers import AutoTokenizer, AutoModelForCausalLM
9
+
10
+
11
+ st.title('GPT2: To see all prompt outlines: https://huggingface.co/BigSalmon/InformalToFormalLincoln64Paraphrase')
12
+
13
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
 
15
  @st.cache(allow_output_mutation=True)
16
  def get_model():
17
+
18
+ tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln69Paraphrase")
19
+ model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln69Paraphrase")
20
+
21
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln60Paraphrase")
22
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln60Paraphrase")
23
+
24
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/GPTNeo1.3BInformalToFormal")
25
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo1.3BInformalToFormal")
26
+
27
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln55")
28
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln55")
29
+
30
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln51")
31
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln51")
32
+
33
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln45")
34
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln49")
35
+
36
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln43")
37
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln43")
38
+
39
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln41")
40
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln41")
41
+
42
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln38")
43
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln38")
44
+
45
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln37")
46
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln37")
47
+
48
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln36")
49
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln36")
50
+
51
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/MediumInformalToFormalLincoln")
52
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln")
53
+
54
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln35")
55
  #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln35")
56
+
57
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln31")
58
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln31")
59
+
60
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln21")
61
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln21")
62
+
63
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/PointsOneSent")
64
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/PointsOneSent")
65
+
66
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/PointsToSentence")
67
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/PointsToSentence")
68
+
69
  return model, tokenizer
70
 
71
  model, tokenizer = get_model()
72
+
73
  g = """informal english: garage band has made people who know nothing about music good at creating music.
74
  Translated into the Style of Abraham Lincoln: garage band ( offers the uninitiated in music the ability to produce professional-quality compositions / catapults those for whom music is an uncharted art the ability the realize masterpieces / stimulates music novice's competency to yield sublime arrangements / begets individuals of rudimentary musical talent the proficiency to fashion elaborate suites ).
 
 
 
75
 
76
+ informal english: chrome extensions can make doing regular tasks much easier to get done.
77
  Translated into the Style of Abraham Lincoln: chrome extensions ( yield the boon of time-saving convenience / ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks / turbocharges the velocity with which one can conduct their obligations ).
78
+
79
  informal english: broadband is finally expanding to rural areas, a great development that will thrust them into modern life.
80
  Translated into the Style of Abraham Lincoln: broadband is ( ( finally / at last / after years of delay ) arriving in remote locations / springing to life in far-flung outposts / inching into even the most backwater corners of the nation ) that will leap-frog them into the twenty-first century.
81
 
 
 
82
  informal english: google translate has made talking to people who do not share your language easier.
83
  Translated into the Style of Abraham Lincoln: google translate ( imparts communicability to individuals whose native tongue differs / mitigates the trials of communication across linguistic barriers / hastens the bridging of semantic boundaries / mollifies the complexity of multilingual communication / avails itself to the internationalization of discussion / flexes its muscles to abet intercultural conversation / calms the tides of linguistic divergence ).
84
 
 
 
85
  informal english: corn fields are all across illinois, visible once you leave chicago.
86
  Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.
87
 
 
 
88
  informal english: """
89
+
90
+ number_of_outputs = st.sidebar.slider("Number of Outputs", 5, 20)
91
+
92
+ def BestProbs(prompt):
93
+ prompt = prompt.strip()
94
+ text = tokenizer.encode(prompt)
95
+ myinput, past_key_values = torch.tensor([text]), None
96
+ myinput = myinput
97
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
98
+ logits = logits[0,-1]
99
+ probabilities = torch.nn.functional.softmax(logits)
100
+ best_logits, best_indices = logits.topk(10)
101
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
102
+ for i in best_words[0:10]:
103
+ print("_______")
104
+ st.write(f"${i} $\n")
105
+ f = (f"${i} $\n")
106
+ m = (prompt + f"{i}")
107
+ BestProbs2(m)
108
+ return f
109
+
110
+ def BestProbs2(prompt):
111
+ prompt = prompt.strip()
112
+ text = tokenizer.encode(prompt)
113
+ myinput, past_key_values = torch.tensor([text]), None
114
+ myinput = myinput
115
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
116
+ logits = logits[0,-1]
117
+ probabilities = torch.nn.functional.softmax(logits)
118
+ best_logits, best_indices = logits.topk(20)
119
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
120
+ for i in best_words[0:20]:
121
+ print(i)
122
+ st.write(i)
123
+
124
+ def LogProbs(prompt):
125
+ col1 = []
126
+ col2 = []
127
+ prompt = prompt.strip()
128
+ text = tokenizer.encode(prompt)
129
+ myinput, past_key_values = torch.tensor([text]), None
130
+ myinput = myinput
131
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
132
+ logits = logits[0,-1]
133
+ probabilities = torch.nn.functional.softmax(logits)
134
+ best_logits, best_indices = logits.topk(10)
135
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
136
+ for i in best_words[0:10]:
137
+ print("_______")
138
+ f = i
139
+ col1.append(f)
140
+ m = (prompt + f"{i}")
141
+ #print("^^" + f + " ^^")
142
+ prompt = m.strip()
143
+ text = tokenizer.encode(prompt)
144
+ myinput, past_key_values = torch.tensor([text]), None
145
+ myinput = myinput
146
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
147
+ logits = logits[0,-1]
148
+ probabilities = torch.nn.functional.softmax(logits)
149
+ best_logits, best_indices = logits.topk(20)
150
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
151
+ for i in best_words[0:20]:
152
+ #print(i)
153
+ col2.append(i)
154
+ #print(col1)
155
+ #print(col2)
156
+ d = {col1[0]: [col2[0], col2[1], col2[2], col2[3], col2[4], col2[5], col2[6], col2[7], col2[8], col2[9], col2[10], col2[11], col2[12], col2[13], col2[14], col2[15], col2[16], col2[17], col2[18], col2[19]],
157
+ col1[1]: [col2[20], col2[21], col2[22], col2[23], col2[24], col2[25], col2[26], col2[27], col2[28], col2[29], col2[30], col2[31], col2[32], col2[33], col2[34], col2[35], col2[36], col2[37], col2[38], col2[39]],
158
+ col1[2]: [col2[40], col2[41], col2[42], col2[43], col2[44], col2[45], col2[46], col2[47], col2[48], col2[49], col2[50], col2[51], col2[52], col2[53], col2[54], col2[55], col2[56], col2[57], col2[58], col2[59]],
159
+ col1[3]: [col2[60], col2[61], col2[62], col2[63], col2[64], col2[65], col2[66], col2[67], col2[68], col2[69], col2[70], col2[71], col2[72], col2[73], col2[74], col2[75], col2[76], col2[77], col2[78], col2[79]],
160
+ col1[4]: [col2[80], col2[81], col2[82], col2[83], col2[84], col2[85], col2[86], col2[87], col2[88], col2[89], col2[90], col2[91], col2[92], col2[93], col2[94], col2[95], col2[96], col2[97], col2[98], col2[99]],
161
+ col1[5]: [col2[100], col2[101], col2[102], col2[103], col2[104], col2[105], col2[106], col2[107], col2[108], col2[109], col2[110], col2[111], col2[112], col2[113], col2[114], col2[115], col2[116], col2[117], col2[118], col2[119]],
162
+ col1[6]: [col2[120], col2[121], col2[122], col2[123], col2[124], col2[125], col2[126], col2[127], col2[128], col2[129], col2[130], col2[131], col2[132], col2[133], col2[134], col2[135], col2[136], col2[137], col2[138], col2[139]],
163
+ col1[7]: [col2[140], col2[141], col2[142], col2[143], col2[144], col2[145], col2[146], col2[147], col2[148], col2[149], col2[150], col2[151], col2[152], col2[153], col2[154], col2[155], col2[156], col2[157], col2[158], col2[159]],
164
+ col1[8]: [col2[160], col2[161], col2[162], col2[163], col2[164], col2[165], col2[166], col2[167], col2[168], col2[169], col2[170], col2[171], col2[172], col2[173], col2[174], col2[175], col2[176], col2[177], col2[178], col2[179]],
165
+ col1[9]: [col2[180], col2[181], col2[182], col2[183], col2[184], col2[185], col2[186], col2[187], col2[188], col2[189], col2[190], col2[191], col2[192], col2[193], col2[194], col2[195], col2[196], col2[197], col2[198], col2[199]]}
166
+ df = pd.DataFrame(data=d)
167
+ print(df)
168
+ st.write(df)
169
+ return df
170
+
171
+ def BestProbs5(prompt):
172
+ prompt = prompt.strip()
173
+ text = tokenizer.encode(prompt)
174
+ myinput, past_key_values = torch.tensor([text]), None
175
+ myinput = myinput
176
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
177
+ logits = logits[0,-1]
178
+ probabilities = torch.nn.functional.softmax(logits)
179
+ best_logits, best_indices = logits.topk(number_of_outputs)
180
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
181
+ for i in best_words[0:number_of_outputs]:
182
+ #print(i)
183
+ print("\n")
184
+ g = (prompt + i)
185
+ st.write(g)
186
+ l = run_generate(g, "hey")
187
+ st.write(l)
188
+
189
+ def run_generate(text, bad_words):
190
+ yo = []
191
+ input_ids = tokenizer.encode(text, return_tensors='pt')
192
+ res = len(tokenizer.encode(text))
193
+ bad_words = bad_words.split()
194
+ bad_word_ids = [[7829], [40940]]
195
+ for bad_word in bad_words:
196
+ bad_word = " " + bad_word
197
+ ids = tokenizer(bad_word).input_ids
198
+ bad_word_ids.append(ids)
199
+ sample_outputs = model.generate(
200
+ input_ids,
201
+ do_sample=True,
202
+ max_length= res + 5,
203
+ min_length = res + 5,
204
+ top_k=50,
205
+ temperature=1.0,
206
+ num_return_sequences=3,
207
+ bad_words_ids=bad_word_ids
208
+ )
209
+ for i in range(3):
210
+ e = tokenizer.decode(sample_outputs[i])
211
+ e = e.replace(text, "")
212
+ yo.append(e)
213
+ print(yo)
214
+ return yo
215
+
216
  with st.form(key='my_form'):
217
  prompt = st.text_area(label='Enter sentence', value=g)
218
  submit_button = st.form_submit_button(label='Submit')
219
+ submit_button2 = st.form_submit_button(label='Fast Forward')
220
+ submit_button3 = st.form_submit_button(label='Fast Forward 2.0')
221
+ submit_button4 = st.form_submit_button(label='Get Top')
222
+
223
  if submit_button:
224
  with torch.no_grad():
225
  text = tokenizer.encode(prompt)
226
  myinput, past_key_values = torch.tensor([text]), None
227
  myinput = myinput
228
+ myinput= myinput.to(device)
229
  logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
230
  logits = logits[0,-1]
231
  probabilities = torch.nn.functional.softmax(logits)
232
+ best_logits, best_indices = logits.topk(250)
233
  best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
234
  text.append(best_indices[0].item())
235
  best_probabilities = probabilities[best_indices].tolist()
236
  words = []
237
+ st.write(best_words)
238
+ if submit_button2:
239
+ print("----")
240
+ st.write("___")
241
+ m = LogProbs(prompt)
242
+ st.write("___")
243
+ st.write(m)
244
+ st.write("___")
245
+ if submit_button3:
246
+ print("----")
247
+ st.write("___")
248
+ st.write(BestProbs)
249
+ if submit_button4:
250
+ BestProbs5(prompt)