BigSalmon commited on
Commit
65e10d8
0 Parent(s):

Duplicate from BigSalmon/AbstractTwst

Browse files
Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +295 -0
  4. requirements.txt +2 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: AbstractTwst
3
+ emoji: 🐨
4
+ colorFrom: yellow
5
+ colorTo: green
6
+ sdk: streamlit
7
+ sdk_version: 1.21.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: BigSalmon/AbstractTwst
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import numpy as np
3
+ import pandas as pd
4
+ import os
5
+ import torch
6
+ import torch.nn as nn
7
+ from transformers.activations import get_activation
8
+ from transformers import AutoTokenizer, AutoModelForCausalLM
9
+
10
+
11
+ first = """informal english: corn fields are all across illinois, visible once you leave chicago.\nTranslated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.\n\ninformal english: """
12
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
+ @st.cache(allow_output_mutation=True)
14
+ def get_model():
15
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln86Paraphrase")
16
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln86Paraphrase")
17
+
18
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln82Paraphrase")
19
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln82Paraphrase")
20
+
21
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln79Paraphrase")
22
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln79Paraphrase")
23
+
24
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln74Paraphrase")
25
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln74Paraphrase")
26
+
27
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln72Paraphrase")
28
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln72Paraphrase")
29
+
30
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln64Paraphrase")
31
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln64Paraphrase")
32
+
33
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln60Paraphrase")
34
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln60Paraphrase")
35
+
36
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/GPTNeo1.3BInformalToFormal")
37
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo1.3BInformalToFormal")
38
+
39
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln55")
40
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln55")
41
+
42
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln51")
43
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln51")
44
+
45
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln45")
46
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln49")
47
+
48
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln43")
49
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln43")
50
+
51
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln41")
52
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln41")
53
+
54
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln38")
55
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln38")
56
+
57
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln37")
58
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln37")
59
+
60
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln36")
61
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln36")
62
+
63
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/MediumInformalToFormalLincoln")
64
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln")
65
+
66
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln35")
67
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln35")
68
+
69
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln31")
70
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln31")
71
+
72
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln21")
73
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln21")
74
+
75
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/PointsOneSent")
76
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/PointsOneSent")
77
+
78
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/PointsToSentence")
79
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/PointsToSentence")
80
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln89Paraphrase")
81
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln89Paraphrase")
82
+
83
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/DefinitionsSynonyms1")
84
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/DefinitionsSynonyms1")
85
+
86
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln95Paraphrase")
87
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln95Paraphrase")
88
+
89
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/AbstractTest")
90
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln99Paraphrase")
91
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/AbstractTest")
92
+
93
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/AbstractTest")
94
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/AbstractGen")
95
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/AbstractGen")
96
+
97
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln103Paraphrase")
98
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln103Paraphrase")
99
+
100
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln105Paraphrase")
101
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln105Paraphrase")
102
+
103
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln106Paraphrase")
104
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln106Paraphrase")
105
+
106
+ tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln109Paraphrase")
107
+ model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln109Paraphrase")
108
+
109
+
110
+
111
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln101Paraphrase")
112
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln101Paraphrase")
113
+
114
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/DefinitionsSynonyms2")
115
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/DefinitionsSynonyms2")
116
+ #tokenizer2 = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincolnMedium")
117
+ #model2 = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincolnMedium")
118
+ return model, tokenizer
119
+
120
+ model, tokenizer = get_model()
121
+
122
+ g = """informal english: garage band has made people who know nothing about music good at creating music.
123
+ Translated into the Style of Abraham Lincoln: garage band ( offers the uninitiated in music the ability to produce professional-quality compositions / catapults those for whom music is an uncharted art the ability the realize masterpieces / stimulates music novice's competency to yield sublime arrangements / begets individuals of rudimentary musical talent the proficiency to fashion elaborate suites ).
124
+ informal english: chrome extensions can make doing regular tasks much easier to get done.
125
+ Translated into the Style of Abraham Lincoln: chrome extensions ( yield the boon of time-saving convenience / ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks / turbocharges the velocity with which one can conduct their obligations ).
126
+ informal english: broadband is finally expanding to rural areas, a great development that will thrust them into modern life.
127
+ Translated into the Style of Abraham Lincoln: broadband is ( ( finally / at last / after years of delay ) arriving in remote locations / springing to life in far-flung outposts / inching into even the most backwater corners of the nation ) that will leap-frog them into the twenty-first century.
128
+ informal english: google translate has made talking to people who do not share your language easier.
129
+ Translated into the Style of Abraham Lincoln: google translate ( imparts communicability to individuals whose native tongue differs / mitigates the trials of communication across linguistic barriers / hastens the bridging of semantic boundaries / mollifies the complexity of multilingual communication / avails itself to the internationalization of discussion / flexes its muscles to abet intercultural conversation / calms the tides of linguistic divergence ).
130
+ informal english: corn fields are all across illinois, visible once you leave chicago.
131
+ Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.
132
+ informal english: """
133
+
134
+ number_of_outputs = st.sidebar.slider("Number of Outputs", 5, 100)
135
+ log_nums = st.sidebar.slider("How Many Log Outputs?", 50, 1000)
136
+
137
+ def BestProbs(prompt):
138
+ prompt = prompt.strip()
139
+ text = tokenizer.encode(prompt)
140
+ myinput, past_key_values = torch.tensor([text]), None
141
+ myinput = myinput
142
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
143
+ logits = logits[0,-1]
144
+ probabilities = torch.nn.functional.softmax(logits)
145
+ best_logits, best_indices = logits.topk(10)
146
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
147
+ for i in best_words[0:10]:
148
+ print("_______")
149
+ st.write(f"${i} $\n")
150
+ f = (f"${i} $\n")
151
+ m = (prompt + f"{i}")
152
+ BestProbs2(m)
153
+ return f
154
+
155
+ def BestProbs2(prompt):
156
+ prompt = prompt.strip()
157
+ text = tokenizer.encode(prompt)
158
+ myinput, past_key_values = torch.tensor([text]), None
159
+ myinput = myinput
160
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
161
+ logits = logits[0,-1]
162
+ probabilities = torch.nn.functional.softmax(logits)
163
+ best_logits, best_indices = logits.topk(20)
164
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
165
+ for i in best_words[0:20]:
166
+ print(i)
167
+ st.write(i)
168
+
169
+ def LogProbs(prompt):
170
+ col1 = []
171
+ col2 = []
172
+ prompt = prompt.strip()
173
+ text = tokenizer.encode(prompt)
174
+ myinput, past_key_values = torch.tensor([text]), None
175
+ myinput = myinput
176
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
177
+ logits = logits[0,-1]
178
+ probabilities = torch.nn.functional.softmax(logits)
179
+ best_logits, best_indices = logits.topk(10)
180
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
181
+ for i in best_words[0:10]:
182
+ print("_______")
183
+ f = i
184
+ col1.append(f)
185
+ m = (prompt + f"{i}")
186
+ #print("^^" + f + " ^^")
187
+ prompt = m.strip()
188
+ text = tokenizer.encode(prompt)
189
+ myinput, past_key_values = torch.tensor([text]), None
190
+ myinput = myinput
191
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
192
+ logits = logits[0,-1]
193
+ probabilities = torch.nn.functional.softmax(logits)
194
+ best_logits, best_indices = logits.topk(20)
195
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
196
+ for i in best_words[0:20]:
197
+ #print(i)
198
+ col2.append(i)
199
+ #print(col1)
200
+ #print(col2)
201
+ d = {col1[0]: [col2[0], col2[1], col2[2], col2[3], col2[4], col2[5], col2[6], col2[7], col2[8], col2[9], col2[10], col2[11], col2[12], col2[13], col2[14], col2[15], col2[16], col2[17], col2[18], col2[19]],
202
+ col1[1]: [col2[20], col2[21], col2[22], col2[23], col2[24], col2[25], col2[26], col2[27], col2[28], col2[29], col2[30], col2[31], col2[32], col2[33], col2[34], col2[35], col2[36], col2[37], col2[38], col2[39]],
203
+ col1[2]: [col2[40], col2[41], col2[42], col2[43], col2[44], col2[45], col2[46], col2[47], col2[48], col2[49], col2[50], col2[51], col2[52], col2[53], col2[54], col2[55], col2[56], col2[57], col2[58], col2[59]],
204
+ col1[3]: [col2[60], col2[61], col2[62], col2[63], col2[64], col2[65], col2[66], col2[67], col2[68], col2[69], col2[70], col2[71], col2[72], col2[73], col2[74], col2[75], col2[76], col2[77], col2[78], col2[79]],
205
+ col1[4]: [col2[80], col2[81], col2[82], col2[83], col2[84], col2[85], col2[86], col2[87], col2[88], col2[89], col2[90], col2[91], col2[92], col2[93], col2[94], col2[95], col2[96], col2[97], col2[98], col2[99]],
206
+ col1[5]: [col2[100], col2[101], col2[102], col2[103], col2[104], col2[105], col2[106], col2[107], col2[108], col2[109], col2[110], col2[111], col2[112], col2[113], col2[114], col2[115], col2[116], col2[117], col2[118], col2[119]],
207
+ col1[6]: [col2[120], col2[121], col2[122], col2[123], col2[124], col2[125], col2[126], col2[127], col2[128], col2[129], col2[130], col2[131], col2[132], col2[133], col2[134], col2[135], col2[136], col2[137], col2[138], col2[139]],
208
+ col1[7]: [col2[140], col2[141], col2[142], col2[143], col2[144], col2[145], col2[146], col2[147], col2[148], col2[149], col2[150], col2[151], col2[152], col2[153], col2[154], col2[155], col2[156], col2[157], col2[158], col2[159]],
209
+ col1[8]: [col2[160], col2[161], col2[162], col2[163], col2[164], col2[165], col2[166], col2[167], col2[168], col2[169], col2[170], col2[171], col2[172], col2[173], col2[174], col2[175], col2[176], col2[177], col2[178], col2[179]],
210
+ col1[9]: [col2[180], col2[181], col2[182], col2[183], col2[184], col2[185], col2[186], col2[187], col2[188], col2[189], col2[190], col2[191], col2[192], col2[193], col2[194], col2[195], col2[196], col2[197], col2[198], col2[199]]}
211
+ df = pd.DataFrame(data=d)
212
+ print(df)
213
+ st.write(df)
214
+ return df
215
+
216
+ def BestProbs5(prompt):
217
+ prompt = prompt.strip()
218
+ text = tokenizer.encode(prompt)
219
+ myinput, past_key_values = torch.tensor([text]), None
220
+ myinput = myinput
221
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
222
+ logits = logits[0,-1]
223
+ probabilities = torch.nn.functional.softmax(logits)
224
+ best_logits, best_indices = logits.topk(number_of_outputs)
225
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
226
+ for i in best_words[0:number_of_outputs]:
227
+ #print(i)
228
+ print("\n")
229
+ g = (prompt + i)
230
+ st.write(g)
231
+ l = run_generate(g, "hey")
232
+ st.write(l)
233
+
234
+ def run_generate(text, bad_words):
235
+ yo = []
236
+ input_ids = tokenizer.encode(text, return_tensors='pt')
237
+ res = len(tokenizer.encode(text))
238
+ bad_words = bad_words.split()
239
+ bad_word_ids = [[7829], [40940]]
240
+ for bad_word in bad_words:
241
+ bad_word = " " + bad_word
242
+ ids = tokenizer(bad_word).input_ids
243
+ bad_word_ids.append(ids)
244
+ sample_outputs = model.generate(
245
+ input_ids,
246
+ do_sample=True,
247
+ max_length= res + 5,
248
+ min_length = res + 5,
249
+ top_k=50,
250
+ temperature=1.0,
251
+ num_return_sequences=3,
252
+ bad_words_ids=bad_word_ids
253
+ )
254
+ for i in range(3):
255
+ e = tokenizer.decode(sample_outputs[i])
256
+ e = e.replace(text, "")
257
+ yo.append(e)
258
+ print(yo)
259
+ return yo
260
+
261
+ with st.form(key='my_form'):
262
+ prompt = st.text_area(label='Enter sentence', value=g, height=500)
263
+ submit_button = st.form_submit_button(label='Submit')
264
+ submit_button2 = st.form_submit_button(label='Fast Forward')
265
+ submit_button3 = st.form_submit_button(label='Fast Forward 2.0')
266
+ submit_button4 = st.form_submit_button(label='Get Top')
267
+
268
+ if submit_button:
269
+ with torch.no_grad():
270
+ text = tokenizer.encode(prompt)
271
+ myinput, past_key_values = torch.tensor([text]), None
272
+ myinput = myinput
273
+ myinput= myinput.to(device)
274
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
275
+ logits = logits[0,-1]
276
+ probabilities = torch.nn.functional.softmax(logits)
277
+ best_logits, best_indices = logits.topk(log_nums)
278
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
279
+ text.append(best_indices[0].item())
280
+ best_probabilities = probabilities[best_indices].tolist()
281
+ words = []
282
+ st.write(best_words)
283
+ if submit_button2:
284
+ print("----")
285
+ st.write("___")
286
+ m = LogProbs(prompt)
287
+ st.write("___")
288
+ st.write(m)
289
+ st.write("___")
290
+ if submit_button3:
291
+ print("----")
292
+ st.write("___")
293
+ st.write(BestProbs)
294
+ if submit_button4:
295
+ BestProbs5(prompt)
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ torch
2
+ transformers