BigSalmon commited on
Commit
f0bf491
1 Parent(s): d54493e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +218 -135
app.py CHANGED
@@ -1,89 +1,194 @@
1
  import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModel
 
 
3
  import torch
 
 
 
4
 
5
- first = """informal english: corn fields are all across illinois, visible once you leave chicago.\nTranslated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.\n\ninformal english: """
 
 
 
6
 
7
  @st.cache(allow_output_mutation=True)
8
  def get_model():
9
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln2")
10
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln21")
11
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln40")
12
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln41")
13
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln41")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln49")
15
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPT2InformalToFormalLincoln42")
16
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/Points3")
17
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo1.3BPointsLincolnFormalInformal")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  #model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln")
19
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo350MInformalToFormalLincoln7")
20
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincolnConciseWordy")
21
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln2")
22
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln3")
23
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln4")
24
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln50")
25
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPT2Neo1.3BPoints2")
26
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPT2Neo1.3BPoints3")
27
- #model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m")
28
- #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln63Paraphrase")
29
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln68Paraphrase")
30
- #model2 = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln63Paraphrase")
31
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln63Paraphrase")
32
- #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln73Paraphrase")
33
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln73Paraphrase")
34
- #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln76Paraphrase")
35
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln76Paraphrase")
36
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln78Paraphrase")
37
- #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln78Paraphrase")
38
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln80Paraphrase")
39
- #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln80Paraphrase")
40
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln82Paraphrase")
41
- #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln82Paraphrase")
42
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln85Paraphrase")
43
- #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln85Paraphrase")
44
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln88Paraphrase")
45
- #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln88Paraphrase")
46
- model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln90Paraphrase")
47
- tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln90Paraphrase")
48
- tokenizer2 = AutoTokenizer.from_pretrained("gpt2")
49
- model2 = AutoModelForCausalLM.from_pretrained("gpt2")
50
- return model, model2, tokenizer, tokenizer2
51
 
52
- model, model2, tokenizer, tokenizer2 = get_model()
 
 
 
 
 
53
 
54
- st.text('''For Prompt Templates: https://huggingface.co/BigSalmon/InformalToFormalLincoln82Paraphrase''')
 
55
 
56
- temp = st.sidebar.slider("Temperature", 0.7, 1.5)
57
- number_of_outputs = st.sidebar.slider("Number of Outputs", 5, 50)
58
- lengths = st.sidebar.slider("Length", 3, 500)
59
- bad_words = st.text_input("Words You Do Not Want Generated", " core lemon height time ")
60
- logs_outputs = st.sidebar.slider("Logit Outputs", 50, 300)
61
 
62
- def run_generate(text, bad_words):
63
- yo = []
64
- input_ids = tokenizer.encode(text, return_tensors='pt')
65
- res = len(tokenizer.encode(text))
66
- bad_words = bad_words.split()
67
- bad_word_ids = []
68
- for bad_word in bad_words:
69
- bad_word = " " + bad_word
70
- ids = tokenizer(bad_word).input_ids
71
- bad_word_ids.append(ids)
72
- sample_outputs = model.generate(
73
- input_ids,
74
- do_sample=True,
75
- max_length= res + lengths,
76
- min_length = res + lengths,
77
- top_k=50,
78
- temperature=temp,
79
- num_return_sequences=number_of_outputs,
80
- bad_words_ids=bad_word_ids
81
- )
82
- for i in range(number_of_outputs):
83
- e = tokenizer.decode(sample_outputs[i])
84
- e = e.replace(text, "")
85
- yo.append(e)
86
- return yo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
  def BestProbs5(prompt):
89
  prompt = prompt.strip()
@@ -102,88 +207,66 @@ def BestProbs5(prompt):
102
  st.write(g)
103
  l = run_generate(g, "hey")
104
  st.write(l)
105
-
106
- def run_generate2(text, bad_words):
107
  yo = []
108
- input_ids = tokenizer2.encode(text, return_tensors='pt')
109
- res = len(tokenizer2.encode(text))
110
  bad_words = bad_words.split()
111
- bad_word_ids = []
112
  for bad_word in bad_words:
113
  bad_word = " " + bad_word
114
- ids = tokenizer2(bad_word).input_ids
115
  bad_word_ids.append(ids)
116
- sample_outputs = model2.generate(
117
  input_ids,
118
  do_sample=True,
119
- max_length= res + lengths,
120
- min_length = res + lengths,
121
  top_k=50,
122
- temperature=temp,
123
- num_return_sequences=number_of_outputs,
124
  bad_words_ids=bad_word_ids
125
  )
126
- for i in range(number_of_outputs):
127
- e = tokenizer2.decode(sample_outputs[i])
128
  e = e.replace(text, "")
129
  yo.append(e)
 
130
  return yo
131
-
132
- def prefix_format(sentence):
133
- words = sentence.split()
134
- if "[MASK]" in sentence:
135
- words2 = words.index("[MASK]")
136
- #print(words2)
137
- output = ("<Prefix> " + ' '.join(words[:words2]) + " <Prefix> " + "<Suffix> " + ' '.join(words[words2+1:]) + " <Suffix>" + " <Middle>")
138
- st.write(output)
139
- else:
140
- st.write("Add [MASK] to sentence")
141
-
142
  with st.form(key='my_form'):
143
- text = st.text_area(label='Enter sentence', value=first)
144
  submit_button = st.form_submit_button(label='Submit')
145
- submit_button2 = st.form_submit_button(label='Submit Log Probs')
146
-
147
- submit_button3 = st.form_submit_button(label='Submit Other Model')
148
- submit_button4 = st.form_submit_button(label='Submit Log Probs Other Model')
149
-
150
- submit_button5 = st.form_submit_button(label='Most Prob')
151
-
152
- submit_button6 = st.form_submit_button(label='Turn Sentence with [MASK] into <Prefix> Format')
153
-
154
  if submit_button:
155
- translated_text = run_generate(text, bad_words)
156
- st.write(translated_text if translated_text else "No translation found")
157
- if submit_button2:
158
  with torch.no_grad():
159
- text2 = str(text)
160
- print(text2)
161
- text3 = tokenizer.encode(text2)
162
- myinput, past_key_values = torch.tensor([text3]), None
163
  myinput = myinput
 
164
  logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
165
  logits = logits[0,-1]
166
  probabilities = torch.nn.functional.softmax(logits)
167
- best_logits, best_indices = logits.topk(logs_outputs)
168
- best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
 
 
 
169
  st.write(best_words)
 
 
 
 
 
 
 
170
  if submit_button3:
171
- translated_text = run_generate2(text, bad_words)
172
- st.write(translated_text if translated_text else "No translation found")
 
173
  if submit_button4:
174
- text2 = str(text)
175
- print(text2)
176
- text3 = tokenizer2.encode(text2)
177
- myinput, past_key_values = torch.tensor([text3]), None
178
- myinput = myinput
179
- logits, past_key_values = model2(myinput, past_key_values = past_key_values, return_dict=False)
180
- logits = logits[0,-1]
181
- probabilities = torch.nn.functional.softmax(logits)
182
- best_logits, best_indices = logits.topk(logs_outputs)
183
- best_words = [tokenizer2.decode([idx.item()]) for idx in best_indices]
184
- st.write(best_words)
185
- if submit_button5:
186
- BestProbs5(text)
187
- if submit_button6:
188
- text2 = str(text)
189
- prefix_format(text2)
 
1
  import streamlit as st
2
+ import numpy as np
3
+ import pandas as pd
4
+ import os
5
  import torch
6
+ import torch.nn as nn
7
+ from transformers.activations import get_activation
8
+ from transformers import AutoTokenizer, AutoModelForCausalLM
9
 
10
+
11
+ st.title('GPT2: To see all prompt outlines: https://huggingface.co/BigSalmon/InformalToFormalLincoln64Paraphrase')
12
+
13
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
 
15
  @st.cache(allow_output_mutation=True)
16
  def get_model():
17
+
18
+ tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln90Paraphrase")
19
+ model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln90Paraphrase")
20
+
21
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln88Paraphrase")
22
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln88Paraphrase")
23
+
24
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln86Paraphrase")
25
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln86Paraphrase")
26
+
27
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln82Paraphrase")
28
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln82Paraphrase")
29
+
30
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln79Paraphrase")
31
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln79Paraphrase")
32
+
33
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln74Paraphrase")
34
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln74Paraphrase")
35
+
36
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln72Paraphrase")
37
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln72Paraphrase")
38
+
39
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln64Paraphrase")
40
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln64Paraphrase")
41
+
42
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln60Paraphrase")
43
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln60Paraphrase")
44
+
45
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/GPTNeo1.3BInformalToFormal")
46
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo1.3BInformalToFormal")
47
+
48
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln55")
49
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln55")
50
+
51
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln51")
52
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln51")
53
+
54
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln45")
55
  #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln49")
56
+
57
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln43")
58
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln43")
59
+
60
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln41")
61
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln41")
62
+
63
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln38")
64
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln38")
65
+
66
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln37")
67
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln37")
68
+
69
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln36")
70
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln36")
71
+
72
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/MediumInformalToFormalLincoln")
73
  #model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln")
74
+
75
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln35")
76
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln35")
77
+
78
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln31")
79
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln31")
80
+
81
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln21")
82
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln21")
83
+
84
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/PointsOneSent")
85
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/PointsOneSent")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/PointsToSentence")
88
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/PointsToSentence")
89
+
90
+ return model, tokenizer
91
+
92
+ model, tokenizer = get_model()
93
 
94
+ g = """informal english: garage band has made people who know nothing about music good at creating music.
95
+ Translated into the Style of Abraham Lincoln: garage band ( offers the uninitiated in music the ability to produce professional-quality compositions / catapults those for whom music is an uncharted art the ability the realize masterpieces / stimulates music novice's competency to yield sublime arrangements / begets individuals of rudimentary musical talent the proficiency to fashion elaborate suites ).
96
 
97
+ informal english: chrome extensions can make doing regular tasks much easier to get done.
98
+ Translated into the Style of Abraham Lincoln: chrome extensions ( yield the boon of time-saving convenience / ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks / turbocharges the velocity with which one can conduct their obligations ).
 
 
 
99
 
100
+ informal english: broadband is finally expanding to rural areas, a great development that will thrust them into modern life.
101
+ Translated into the Style of Abraham Lincoln: broadband is ( ( finally / at last / after years of delay ) arriving in remote locations / springing to life in far-flung outposts / inching into even the most backwater corners of the nation ) that will leap-frog them into the twenty-first century.
102
+
103
+ informal english: google translate has made talking to people who do not share your language easier.
104
+ Translated into the Style of Abraham Lincoln: google translate ( imparts communicability to individuals whose native tongue differs / mitigates the trials of communication across linguistic barriers / hastens the bridging of semantic boundaries / mollifies the complexity of multilingual communication / avails itself to the internationalization of discussion / flexes its muscles to abet intercultural conversation / calms the tides of linguistic divergence ).
105
+
106
+ informal english: corn fields are all across illinois, visible once you leave chicago.
107
+ Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.
108
+
109
+ informal english: """
110
+
111
+ number_of_outputs = st.sidebar.slider("Number of Outputs", 5, 100)
112
+ log_nums = st.sidebar.slider("How Many Log Outputs?", 50, 600)
113
+
114
+ def BestProbs(prompt):
115
+ prompt = prompt.strip()
116
+ text = tokenizer.encode(prompt)
117
+ myinput, past_key_values = torch.tensor([text]), None
118
+ myinput = myinput
119
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
120
+ logits = logits[0,-1]
121
+ probabilities = torch.nn.functional.softmax(logits)
122
+ best_logits, best_indices = logits.topk(10)
123
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
124
+ for i in best_words[0:10]:
125
+ print("_______")
126
+ st.write(f"${i} $\n")
127
+ f = (f"${i} $\n")
128
+ m = (prompt + f"{i}")
129
+ BestProbs2(m)
130
+ return f
131
+
132
+ def BestProbs2(prompt):
133
+ prompt = prompt.strip()
134
+ text = tokenizer.encode(prompt)
135
+ myinput, past_key_values = torch.tensor([text]), None
136
+ myinput = myinput
137
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
138
+ logits = logits[0,-1]
139
+ probabilities = torch.nn.functional.softmax(logits)
140
+ best_logits, best_indices = logits.topk(20)
141
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
142
+ for i in best_words[0:20]:
143
+ print(i)
144
+ st.write(i)
145
+
146
+ def LogProbs(prompt):
147
+ col1 = []
148
+ col2 = []
149
+ prompt = prompt.strip()
150
+ text = tokenizer.encode(prompt)
151
+ myinput, past_key_values = torch.tensor([text]), None
152
+ myinput = myinput
153
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
154
+ logits = logits[0,-1]
155
+ probabilities = torch.nn.functional.softmax(logits)
156
+ best_logits, best_indices = logits.topk(10)
157
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
158
+ for i in best_words[0:10]:
159
+ print("_______")
160
+ f = i
161
+ col1.append(f)
162
+ m = (prompt + f"{i}")
163
+ #print("^^" + f + " ^^")
164
+ prompt = m.strip()
165
+ text = tokenizer.encode(prompt)
166
+ myinput, past_key_values = torch.tensor([text]), None
167
+ myinput = myinput
168
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
169
+ logits = logits[0,-1]
170
+ probabilities = torch.nn.functional.softmax(logits)
171
+ best_logits, best_indices = logits.topk(20)
172
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
173
+ for i in best_words[0:20]:
174
+ #print(i)
175
+ col2.append(i)
176
+ #print(col1)
177
+ #print(col2)
178
+ d = {col1[0]: [col2[0], col2[1], col2[2], col2[3], col2[4], col2[5], col2[6], col2[7], col2[8], col2[9], col2[10], col2[11], col2[12], col2[13], col2[14], col2[15], col2[16], col2[17], col2[18], col2[19]],
179
+ col1[1]: [col2[20], col2[21], col2[22], col2[23], col2[24], col2[25], col2[26], col2[27], col2[28], col2[29], col2[30], col2[31], col2[32], col2[33], col2[34], col2[35], col2[36], col2[37], col2[38], col2[39]],
180
+ col1[2]: [col2[40], col2[41], col2[42], col2[43], col2[44], col2[45], col2[46], col2[47], col2[48], col2[49], col2[50], col2[51], col2[52], col2[53], col2[54], col2[55], col2[56], col2[57], col2[58], col2[59]],
181
+ col1[3]: [col2[60], col2[61], col2[62], col2[63], col2[64], col2[65], col2[66], col2[67], col2[68], col2[69], col2[70], col2[71], col2[72], col2[73], col2[74], col2[75], col2[76], col2[77], col2[78], col2[79]],
182
+ col1[4]: [col2[80], col2[81], col2[82], col2[83], col2[84], col2[85], col2[86], col2[87], col2[88], col2[89], col2[90], col2[91], col2[92], col2[93], col2[94], col2[95], col2[96], col2[97], col2[98], col2[99]],
183
+ col1[5]: [col2[100], col2[101], col2[102], col2[103], col2[104], col2[105], col2[106], col2[107], col2[108], col2[109], col2[110], col2[111], col2[112], col2[113], col2[114], col2[115], col2[116], col2[117], col2[118], col2[119]],
184
+ col1[6]: [col2[120], col2[121], col2[122], col2[123], col2[124], col2[125], col2[126], col2[127], col2[128], col2[129], col2[130], col2[131], col2[132], col2[133], col2[134], col2[135], col2[136], col2[137], col2[138], col2[139]],
185
+ col1[7]: [col2[140], col2[141], col2[142], col2[143], col2[144], col2[145], col2[146], col2[147], col2[148], col2[149], col2[150], col2[151], col2[152], col2[153], col2[154], col2[155], col2[156], col2[157], col2[158], col2[159]],
186
+ col1[8]: [col2[160], col2[161], col2[162], col2[163], col2[164], col2[165], col2[166], col2[167], col2[168], col2[169], col2[170], col2[171], col2[172], col2[173], col2[174], col2[175], col2[176], col2[177], col2[178], col2[179]],
187
+ col1[9]: [col2[180], col2[181], col2[182], col2[183], col2[184], col2[185], col2[186], col2[187], col2[188], col2[189], col2[190], col2[191], col2[192], col2[193], col2[194], col2[195], col2[196], col2[197], col2[198], col2[199]]}
188
+ df = pd.DataFrame(data=d)
189
+ print(df)
190
+ st.write(df)
191
+ return df
192
 
193
  def BestProbs5(prompt):
194
  prompt = prompt.strip()
 
207
  st.write(g)
208
  l = run_generate(g, "hey")
209
  st.write(l)
210
+
211
+ def run_generate(text, bad_words):
212
  yo = []
213
+ input_ids = tokenizer.encode(text, return_tensors='pt')
214
+ res = len(tokenizer.encode(text))
215
  bad_words = bad_words.split()
216
+ bad_word_ids = [[7829], [40940]]
217
  for bad_word in bad_words:
218
  bad_word = " " + bad_word
219
+ ids = tokenizer(bad_word).input_ids
220
  bad_word_ids.append(ids)
221
+ sample_outputs = model.generate(
222
  input_ids,
223
  do_sample=True,
224
+ max_length= res + 5,
225
+ min_length = res + 5,
226
  top_k=50,
227
+ temperature=1.0,
228
+ num_return_sequences=3,
229
  bad_words_ids=bad_word_ids
230
  )
231
+ for i in range(3):
232
+ e = tokenizer.decode(sample_outputs[i])
233
  e = e.replace(text, "")
234
  yo.append(e)
235
+ print(yo)
236
  return yo
237
+
 
 
 
 
 
 
 
 
 
 
238
  with st.form(key='my_form'):
239
+ prompt = st.text_area(label='Enter sentence', value=g, height=500)
240
  submit_button = st.form_submit_button(label='Submit')
241
+ submit_button2 = st.form_submit_button(label='Fast Forward')
242
+ submit_button3 = st.form_submit_button(label='Fast Forward 2.0')
243
+ submit_button4 = st.form_submit_button(label='Get Top')
244
+
 
 
 
 
 
245
  if submit_button:
 
 
 
246
  with torch.no_grad():
247
+ text = tokenizer.encode(prompt)
248
+ myinput, past_key_values = torch.tensor([text]), None
 
 
249
  myinput = myinput
250
+ myinput= myinput.to(device)
251
  logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
252
  logits = logits[0,-1]
253
  probabilities = torch.nn.functional.softmax(logits)
254
+ best_logits, best_indices = logits.topk(log_nums)
255
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
256
+ text.append(best_indices[0].item())
257
+ best_probabilities = probabilities[best_indices].tolist()
258
+ words = []
259
  st.write(best_words)
260
+ if submit_button2:
261
+ print("----")
262
+ st.write("___")
263
+ m = LogProbs(prompt)
264
+ st.write("___")
265
+ st.write(m)
266
+ st.write("___")
267
  if submit_button3:
268
+ print("----")
269
+ st.write("___")
270
+ st.write(BestProbs)
271
  if submit_button4:
272
+ BestProbs5(prompt)