Update app.py
Browse files
app.py
CHANGED
@@ -1,194 +1,85 @@
|
|
1 |
import streamlit as st
|
2 |
-
import
|
3 |
-
import pandas as pd
|
4 |
-
import os
|
5 |
import torch
|
6 |
-
import torch.nn as nn
|
7 |
-
from transformers.activations import get_activation
|
8 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
9 |
-
from transformers import GPTNeoXForCausalLM, GPTNeoXTokenizerFast
|
10 |
|
11 |
-
|
12 |
-
st.title('GPT2: To see all prompt outlines: https://huggingface.co/BigSalmon/InformalToFormalLincoln64Paraphrase')
|
13 |
-
|
14 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
15 |
|
16 |
@st.cache(allow_output_mutation=True)
|
17 |
def get_model():
|
18 |
-
|
19 |
-
|
20 |
-
model = AutoModelForCausalLM.from_pretrained("BigSalmon/
|
21 |
-
|
22 |
-
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln84Paraphrase")
|
23 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln84Paraphrase")
|
24 |
-
|
25 |
-
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln76ParaphraseXL")
|
26 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln76ParaphraseXL", low_cpu_mem_usage=True)
|
27 |
-
|
28 |
-
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln77Paraphrase")
|
29 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln77Paraphrase")
|
30 |
-
|
31 |
-
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln75Paraphrase")
|
32 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln75Paraphrase")
|
33 |
-
|
34 |
-
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln71Paraphrase")
|
35 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln71Paraphrase")
|
36 |
-
|
37 |
-
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln70Paraphrase")
|
38 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln70Paraphrase")
|
39 |
-
|
40 |
-
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln69Paraphrase")
|
41 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln69Paraphrase")
|
42 |
-
|
43 |
-
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln60Paraphrase")
|
44 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln60Paraphrase")
|
45 |
-
|
46 |
-
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/GPTNeo1.3BInformalToFormal")
|
47 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo1.3BInformalToFormal")
|
48 |
-
|
49 |
-
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln55")
|
50 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln55")
|
51 |
-
|
52 |
-
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln51")
|
53 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln51")
|
54 |
-
|
55 |
-
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln45")
|
56 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln49")
|
57 |
-
|
58 |
-
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln43")
|
59 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln43")
|
60 |
-
|
61 |
-
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln41")
|
62 |
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln41")
|
63 |
-
|
64 |
-
#
|
65 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/
|
66 |
-
|
67 |
-
#
|
68 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln37")
|
69 |
-
|
70 |
-
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln36")
|
71 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln36")
|
72 |
-
|
73 |
-
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/MediumInformalToFormalLincoln")
|
74 |
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln")
|
75 |
-
|
76 |
-
#
|
77 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/
|
78 |
-
|
79 |
-
#
|
80 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/
|
81 |
-
|
82 |
-
#
|
83 |
-
#model = AutoModelForCausalLM.from_pretrained("
|
84 |
-
|
85 |
-
#
|
86 |
-
#
|
87 |
-
|
88 |
-
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/
|
89 |
-
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/
|
90 |
-
|
91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
-
model, tokenizer = get_model()
|
94 |
-
|
95 |
-
g = """informal english: garage band has made people who know nothing about music good at creating music.
|
96 |
-
Translated into the Style of Abraham Lincoln: garage band ( offers the uninitiated in music the ability to produce professional-quality compositions / catapults those for whom music is an uncharted art the ability the realize masterpieces / stimulates music novice's competency to yield sublime arrangements / begets individuals of rudimentary musical talent the proficiency to fashion elaborate suites ).
|
97 |
-
|
98 |
-
informal english: chrome extensions can make doing regular tasks much easier to get done.
|
99 |
-
Translated into the Style of Abraham Lincoln: chrome extensions ( yield the boon of time-saving convenience / ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks / turbocharges the velocity with which one can conduct their obligations ).
|
100 |
-
|
101 |
-
informal english: broadband is finally expanding to rural areas, a great development that will thrust them into modern life.
|
102 |
-
Translated into the Style of Abraham Lincoln: broadband is ( ( finally / at last / after years of delay ) arriving in remote locations / springing to life in far-flung outposts / inching into even the most backwater corners of the nation ) that will leap-frog them into the twenty-first century.
|
103 |
-
|
104 |
-
informal english: google translate has made talking to people who do not share your language easier.
|
105 |
-
Translated into the Style of Abraham Lincoln: google translate ( imparts communicability to individuals whose native tongue differs / mitigates the trials of communication across linguistic barriers / hastens the bridging of semantic boundaries / mollifies the complexity of multilingual communication / avails itself to the internationalization of discussion / flexes its muscles to abet intercultural conversation / calms the tides of linguistic divergence ).
|
106 |
-
|
107 |
-
informal english: corn fields are all across illinois, visible once you leave chicago.
|
108 |
-
Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.
|
109 |
-
|
110 |
-
informal english: """
|
111 |
|
112 |
-
|
113 |
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
|
120 |
-
logits = logits[0,-1]
|
121 |
-
probabilities = torch.nn.functional.softmax(logits)
|
122 |
-
best_logits, best_indices = logits.topk(10)
|
123 |
-
best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
|
124 |
-
for i in best_words[0:10]:
|
125 |
-
print("_______")
|
126 |
-
st.write(f"${i} $\n")
|
127 |
-
f = (f"${i} $\n")
|
128 |
-
m = (prompt + f"{i}")
|
129 |
-
BestProbs2(m)
|
130 |
-
return f
|
131 |
|
132 |
-
def
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
|
158 |
-
for i in best_words[0:9]:
|
159 |
-
print("_______")
|
160 |
-
f = i
|
161 |
-
col1.append(f)
|
162 |
-
m = (prompt + f"{i}")
|
163 |
-
#print("^^" + f + " ^^")
|
164 |
-
prompt = m.strip()
|
165 |
-
text = tokenizer.encode(prompt)
|
166 |
-
myinput, past_key_values = torch.tensor([text]), None
|
167 |
-
myinput = myinput
|
168 |
-
logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
|
169 |
-
logits = logits[0,-1]
|
170 |
-
probabilities = torch.nn.functional.softmax(logits)
|
171 |
-
best_logits, best_indices = logits.topk(20)
|
172 |
-
best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
|
173 |
-
for i in best_words[0:20]:
|
174 |
-
#print(i)
|
175 |
-
col2.append(i)
|
176 |
-
#print(col1)
|
177 |
-
#print(col2)
|
178 |
-
d = {col1[0]: [col2[0], col2[1], col2[2], col2[3], col2[4], col2[5], col2[6], col2[7], col2[8], col2[9], col2[10], col2[11], col2[12], col2[13], col2[14], col2[15], col2[16], col2[17], col2[18], col2[19]],
|
179 |
-
col1[1]: [col2[20], col2[21], col2[22], col2[23], col2[24], col2[25], col2[26], col2[27], col2[28], col2[29], col2[30], col2[31], col2[32], col2[33], col2[34], col2[35], col2[36], col2[37], col2[38], col2[39]],
|
180 |
-
col1[2]: [col2[40], col2[41], col2[42], col2[43], col2[44], col2[45], col2[46], col2[47], col2[48], col2[49], col2[50], col2[51], col2[52], col2[53], col2[54], col2[55], col2[56], col2[57], col2[58], col2[59]],
|
181 |
-
col1[3]: [col2[60], col2[61], col2[62], col2[63], col2[64], col2[65], col2[66], col2[67], col2[68], col2[69], col2[70], col2[71], col2[72], col2[73], col2[74], col2[75], col2[76], col2[77], col2[78], col2[79]],
|
182 |
-
col1[4]: [col2[80], col2[81], col2[82], col2[83], col2[84], col2[85], col2[86], col2[87], col2[88], col2[89], col2[90], col2[91], col2[92], col2[93], col2[94], col2[95], col2[96], col2[97], col2[98], col2[99]],
|
183 |
-
col1[5]: [col2[100], col2[101], col2[102], col2[103], col2[104], col2[105], col2[106], col2[107], col2[108], col2[109], col2[110], col2[111], col2[112], col2[113], col2[114], col2[115], col2[116], col2[117], col2[118], col2[119]],
|
184 |
-
col1[6]: [col2[120], col2[121], col2[122], col2[123], col2[124], col2[125], col2[126], col2[127], col2[128], col2[129], col2[130], col2[131], col2[132], col2[133], col2[134], col2[135], col2[136], col2[137], col2[138], col2[139]],
|
185 |
-
col1[7]: [col2[140], col2[141], col2[142], col2[143], col2[144], col2[145], col2[146], col2[147], col2[148], col2[149], col2[150], col2[151], col2[152], col2[153], col2[154], col2[155], col2[156], col2[157], col2[158], col2[159]],
|
186 |
-
col1[8]: [col2[160], col2[161], col2[162], col2[163], col2[164], col2[165], col2[166], col2[167], col2[168], col2[169], col2[170], col2[171], col2[172], col2[173], col2[174], col2[175], col2[176], col2[177], col2[178], col2[179]],
|
187 |
-
col1[9]: [col2[180], col2[181], col2[182], col2[183], col2[184], col2[185], col2[186], col2[187], col2[188], col2[189], col2[190], col2[191], col2[192], col2[193], col2[194], col2[195], col2[196], col2[197], col2[198], col2[199]]}
|
188 |
-
df = pd.DataFrame(data=d)
|
189 |
-
print(df)
|
190 |
-
st.write(df)
|
191 |
-
return df
|
192 |
|
193 |
def BestProbs5(prompt):
|
194 |
prompt = prompt.strip()
|
@@ -207,88 +98,88 @@ def BestProbs5(prompt):
|
|
207 |
st.write(g)
|
208 |
l = run_generate(g, "hey")
|
209 |
st.write(l)
|
210 |
-
|
211 |
-
def
|
212 |
-
prompt = prompt.strip()
|
213 |
-
prompt = prompt.split(word)[0]
|
214 |
-
prompt = "Translated into the Style of Abraham Lincoln: " + prompt + "( " + word + " /"
|
215 |
-
#prompt = prompt.replace("/ ", "/")
|
216 |
-
print(prompt)
|
217 |
-
text = tokenizer.encode(prompt)
|
218 |
-
myinput, past_key_values = torch.tensor([text]), None
|
219 |
-
myinput = myinput
|
220 |
-
logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
|
221 |
-
logits = logits[0,-1]
|
222 |
-
probabilities = torch.nn.functional.softmax(logits)
|
223 |
-
best_logits, best_indices = logits.topk(100)
|
224 |
-
best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
|
225 |
-
for i in best_words[0:100]:
|
226 |
-
print(prompt)
|
227 |
-
st.write(i)
|
228 |
-
|
229 |
-
def run_generate(text, bad_words):
|
230 |
yo = []
|
231 |
-
input_ids =
|
232 |
-
res = len(
|
233 |
bad_words = bad_words.split()
|
234 |
-
bad_word_ids = [
|
235 |
for bad_word in bad_words:
|
236 |
bad_word = " " + bad_word
|
237 |
-
ids =
|
238 |
bad_word_ids.append(ids)
|
239 |
-
sample_outputs =
|
240 |
input_ids,
|
241 |
do_sample=True,
|
242 |
-
max_length= res +
|
243 |
-
min_length = res +
|
244 |
top_k=50,
|
245 |
-
temperature=
|
246 |
-
num_return_sequences=
|
247 |
bad_words_ids=bad_word_ids
|
248 |
)
|
249 |
-
for i in range(
|
250 |
-
e =
|
251 |
e = e.replace(text, "")
|
252 |
yo.append(e)
|
253 |
-
print(yo)
|
254 |
return yo
|
255 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
256 |
with st.form(key='my_form'):
|
257 |
-
|
258 |
submit_button = st.form_submit_button(label='Submit')
|
259 |
-
submit_button2 = st.form_submit_button(label='
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
|
|
|
|
|
|
|
|
264 |
if submit_button:
|
|
|
|
|
|
|
265 |
with torch.no_grad():
|
266 |
-
|
267 |
-
|
|
|
|
|
268 |
myinput = myinput
|
269 |
-
myinput= myinput.to(device)
|
270 |
logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
|
271 |
logits = logits[0,-1]
|
272 |
probabilities = torch.nn.functional.softmax(logits)
|
273 |
-
best_logits, best_indices = logits.topk(
|
274 |
-
best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
|
275 |
-
text.append(best_indices[0].item())
|
276 |
-
best_probabilities = probabilities[best_indices].tolist()
|
277 |
-
words = []
|
278 |
st.write(best_words)
|
279 |
-
if submit_button2:
|
280 |
-
print("----")
|
281 |
-
st.write("___")
|
282 |
-
m = LogProbs(prompt)
|
283 |
-
st.write("___")
|
284 |
-
st.write(m)
|
285 |
-
st.write("___")
|
286 |
if submit_button3:
|
287 |
-
|
288 |
-
|
289 |
-
st.write(BestProbs)
|
290 |
if submit_button4:
|
291 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
292 |
if submit_button5:
|
293 |
-
|
294 |
-
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModel
|
|
|
|
|
3 |
import torch
|
|
|
|
|
|
|
|
|
4 |
|
5 |
+
first = """informal english: corn fields are all across illinois, visible once you leave chicago.\nTranslated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.\n\ninformal english: """
|
|
|
|
|
|
|
6 |
|
7 |
@st.cache(allow_output_mutation=True)
|
8 |
def get_model():
|
9 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln2")
|
10 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln21")
|
11 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln40")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln41")
|
13 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln41")
|
14 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln49")
|
15 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPT2InformalToFormalLincoln42")
|
16 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/Points3")
|
17 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo1.3BPointsLincolnFormalInformal")
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln")
|
19 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln7")
|
20 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincolnConciseWordy")
|
21 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln2")
|
22 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln3")
|
23 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln4")
|
24 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln50")
|
25 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPT2Neo1.3BPoints2")
|
26 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPT2Neo1.3BPoints3")
|
27 |
+
#model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m")
|
28 |
+
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln63Paraphrase")
|
29 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln68Paraphrase")
|
30 |
+
#model2 = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln63Paraphrase")
|
31 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln63Paraphrase")
|
32 |
+
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln73Paraphrase")
|
33 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln73Paraphrase")
|
34 |
+
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln76Paraphrase")
|
35 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln76Paraphrase")
|
36 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln78Paraphrase")
|
37 |
+
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln78Paraphrase")
|
38 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln80Paraphrase")
|
39 |
+
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln80Paraphrase")
|
40 |
+
#model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln82Paraphrase")
|
41 |
+
#tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln82Paraphrase")
|
42 |
+
model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln85Paraphrase")
|
43 |
+
tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln85Paraphrase")
|
44 |
+
tokenizer2 = AutoTokenizer.from_pretrained("gpt2")
|
45 |
+
model2 = AutoModelForCausalLM.from_pretrained("gpt2")
|
46 |
+
return model, model2, tokenizer, tokenizer2
|
47 |
|
48 |
+
model, model2, tokenizer, tokenizer2 = get_model()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
+
st.text('''For Prompt Templates: https://huggingface.co/BigSalmon/InformalToFormalLincoln82Paraphrase''')
|
51 |
|
52 |
+
temp = st.sidebar.slider("Temperature", 0.7, 1.5)
|
53 |
+
number_of_outputs = st.sidebar.slider("Number of Outputs", 5, 50)
|
54 |
+
lengths = st.sidebar.slider("Length", 3, 500)
|
55 |
+
bad_words = st.text_input("Words You Do Not Want Generated", " core lemon height time ")
|
56 |
+
logs_outputs = st.sidebar.slider("Logit Outputs", 50, 300)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
+
def run_generate(text, bad_words):
|
59 |
+
yo = []
|
60 |
+
input_ids = tokenizer.encode(text, return_tensors='pt')
|
61 |
+
res = len(tokenizer.encode(text))
|
62 |
+
bad_words = bad_words.split()
|
63 |
+
bad_word_ids = []
|
64 |
+
for bad_word in bad_words:
|
65 |
+
bad_word = " " + bad_word
|
66 |
+
ids = tokenizer(bad_word).input_ids
|
67 |
+
bad_word_ids.append(ids)
|
68 |
+
sample_outputs = model.generate(
|
69 |
+
input_ids,
|
70 |
+
do_sample=True,
|
71 |
+
max_length= res + lengths,
|
72 |
+
min_length = res + lengths,
|
73 |
+
top_k=50,
|
74 |
+
temperature=temp,
|
75 |
+
num_return_sequences=number_of_outputs,
|
76 |
+
bad_words_ids=bad_word_ids
|
77 |
+
)
|
78 |
+
for i in range(number_of_outputs):
|
79 |
+
e = tokenizer.decode(sample_outputs[i])
|
80 |
+
e = e.replace(text, "")
|
81 |
+
yo.append(e)
|
82 |
+
return yo
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
def BestProbs5(prompt):
|
85 |
prompt = prompt.strip()
|
|
|
98 |
st.write(g)
|
99 |
l = run_generate(g, "hey")
|
100 |
st.write(l)
|
101 |
+
|
102 |
+
def run_generate2(text, bad_words):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
yo = []
|
104 |
+
input_ids = tokenizer2.encode(text, return_tensors='pt')
|
105 |
+
res = len(tokenizer2.encode(text))
|
106 |
bad_words = bad_words.split()
|
107 |
+
bad_word_ids = []
|
108 |
for bad_word in bad_words:
|
109 |
bad_word = " " + bad_word
|
110 |
+
ids = tokenizer2(bad_word).input_ids
|
111 |
bad_word_ids.append(ids)
|
112 |
+
sample_outputs = model2.generate(
|
113 |
input_ids,
|
114 |
do_sample=True,
|
115 |
+
max_length= res + lengths,
|
116 |
+
min_length = res + lengths,
|
117 |
top_k=50,
|
118 |
+
temperature=temp,
|
119 |
+
num_return_sequences=number_of_outputs,
|
120 |
bad_words_ids=bad_word_ids
|
121 |
)
|
122 |
+
for i in range(number_of_outputs):
|
123 |
+
e = tokenizer2.decode(sample_outputs[i])
|
124 |
e = e.replace(text, "")
|
125 |
yo.append(e)
|
|
|
126 |
return yo
|
127 |
+
|
128 |
+
def prefix_format(sentence):
|
129 |
+
words = sentence.split()
|
130 |
+
if "[MASK]" in sentence:
|
131 |
+
words2 = words.index("[MASK]")
|
132 |
+
#print(words2)
|
133 |
+
output = ("<Prefix> " + ' '.join(words[:words2]) + " <Prefix> " + "<Suffix> " + ' '.join(words[words2+1:]) + " <Suffix>" + " <Middle>")
|
134 |
+
st.write(output)
|
135 |
+
else:
|
136 |
+
st.write("Add [MASK] to sentence")
|
137 |
+
|
138 |
with st.form(key='my_form'):
|
139 |
+
text = st.text_area(label='Enter sentence', value=first)
|
140 |
submit_button = st.form_submit_button(label='Submit')
|
141 |
+
submit_button2 = st.form_submit_button(label='Submit Log Probs')
|
142 |
+
|
143 |
+
submit_button3 = st.form_submit_button(label='Submit Other Model')
|
144 |
+
submit_button4 = st.form_submit_button(label='Submit Log Probs Other Model')
|
145 |
+
|
146 |
+
submit_button5 = st.form_submit_button(label='Most Prob')
|
147 |
+
|
148 |
+
submit_button6 = st.form_submit_button(label='Turn Sentence with [MASK] into <Prefix> Format')
|
149 |
+
|
150 |
if submit_button:
|
151 |
+
translated_text = run_generate(text, bad_words)
|
152 |
+
st.write(translated_text if translated_text else "No translation found")
|
153 |
+
if submit_button2:
|
154 |
with torch.no_grad():
|
155 |
+
text2 = str(text)
|
156 |
+
print(text2)
|
157 |
+
text3 = tokenizer.encode(text2)
|
158 |
+
myinput, past_key_values = torch.tensor([text3]), None
|
159 |
myinput = myinput
|
|
|
160 |
logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
|
161 |
logits = logits[0,-1]
|
162 |
probabilities = torch.nn.functional.softmax(logits)
|
163 |
+
best_logits, best_indices = logits.topk(logs_outputs)
|
164 |
+
best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
|
|
|
|
|
|
|
165 |
st.write(best_words)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
if submit_button3:
|
167 |
+
translated_text = run_generate2(text, bad_words)
|
168 |
+
st.write(translated_text if translated_text else "No translation found")
|
|
|
169 |
if submit_button4:
|
170 |
+
text2 = str(text)
|
171 |
+
print(text2)
|
172 |
+
text3 = tokenizer2.encode(text2)
|
173 |
+
myinput, past_key_values = torch.tensor([text3]), None
|
174 |
+
myinput = myinput
|
175 |
+
logits, past_key_values = model2(myinput, past_key_values = past_key_values, return_dict=False)
|
176 |
+
logits = logits[0,-1]
|
177 |
+
probabilities = torch.nn.functional.softmax(logits)
|
178 |
+
best_logits, best_indices = logits.topk(logs_outputs)
|
179 |
+
best_words = [tokenizer2.decode([idx.item()]) for idx in best_indices]
|
180 |
+
st.write(best_words)
|
181 |
if submit_button5:
|
182 |
+
BestProbs5(text)
|
183 |
+
if submit_button6:
|
184 |
+
text2 = str(text)
|
185 |
+
prefix_format(text2)
|