parquet-converter commited on
Commit
47bd072
1 Parent(s): ef06b5d

Update parquet files

Browse files
.gitignore DELETED
@@ -1,3 +0,0 @@
1
- *.pyc
2
- .DS_Store
3
- fails
 
 
 
 
README.md DELETED
@@ -1,20 +0,0 @@
1
- ---
2
- language:
3
- - code
4
- ---
5
-
6
- # Python State Changes
7
-
8
- State changes from the execution of single lines of Python code.
9
- All code was taken from Python HackerRank solutions.
10
-
11
- Scraped from my dataset of traced HackerRank solutions. https://www.kaggle.com/frasergreenlee/ran-hackerrank-solutions
12
-
13
- ```json
14
- {"start": "g = 100; i = 1; l = [100, 100, 0, 0, -100, -100]", "code": "g += l[i]", "end": "g = 200; i = 1; l = [100, 100, 0, 0, -100, -100]"}
15
- {"start": "a = 1; b = 2; d = 4; i = 3; j = 2", "code": "i, j = a + (j - b), b + (d - (i - a))", "end": "a = 1; b = 2; d = 4; i = 1; j = 4"}
16
- {"start": "b = 15", "code": "b = b // 2", "end": "b = 7"}
17
- ```
18
-
19
- ## Get an overview of the dataset from seeing the frequency of different ASTs.
20
- 👉 https://observablehq.com/@frasergreenlee/python-lines-dataset#chart
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data.jsonl → default/python-state-changes-train-00000-of-00002.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:969d4e10b40ee18e1f4f7fce42d3b96cac796995bc420d2b64b6120f8ef22faf
3
- size 1192717294
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b14311fdf8f45efb7d554ab39bafc158e51974170993b5aa2179791d5427d943
3
+ size 291148930
default/python-state-changes-train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2262f8da0ca00be5c62112f8d4238bda52b9253ae3421b566d3d6e51b497a86
3
+ size 272613945
make_variations/generate_with_codeT5.ipynb DELETED
@@ -1,615 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": 1,
6
- "metadata": {},
7
- "outputs": [
8
- {
9
- "name": "stdout",
10
- "output_type": "stream",
11
- "text": [
12
- "{user.username}\n"
13
- ]
14
- }
15
- ],
16
- "source": [
17
- "from transformers import AutoTokenizer, AutoModelForSeq2SeqLM\n",
18
- "\n",
19
- "tokenizer = AutoTokenizer.from_pretrained(\"Salesforce/codet5-base\")\n",
20
- "model = AutoModelForSeq2SeqLM.from_pretrained(\"Salesforce/codet5-base\")\n",
21
- "\n",
22
- "text = \"def greet(user): print(f'hello <extra_id_0>!')\"\n",
23
- "input_ids = tokenizer(text, return_tensors=\"pt\").input_ids\n",
24
- "\n",
25
- "# simply generate a single sequence\n",
26
- "generated_ids = model.generate(input_ids, max_length=8)\n",
27
- "print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))\n",
28
- "# this prints \"{user.username}\""
29
- ]
30
- },
31
- {
32
- "cell_type": "code",
33
- "execution_count": 58,
34
- "metadata": {},
35
- "outputs": [],
36
- "source": [
37
- "import ast\n",
38
- "\n",
39
- "def filter_codes(codes):\n",
40
- " codes = list(set(codes))\n",
41
- " new_codes = []\n",
42
- " for code in codes:\n",
43
- " if ';' in code:\n",
44
- " code = code[code.index(';'):]\n",
45
- " try:\n",
46
- " ast.parse(code)\n",
47
- " except Exception:\n",
48
- " continue\n",
49
- " new_codes.append(code)\n",
50
- " return new_codes"
51
- ]
52
- },
53
- {
54
- "cell_type": "code",
55
- "execution_count": 59,
56
- "metadata": {},
57
- "outputs": [],
58
- "source": [
59
- "def temp_value(value):\n",
60
- " if value[0] == '[' and value[-1] == ']':\n",
61
- " return '[<extra_id_0>]'\n",
62
- " if value[0] == '\"' and value[-1] == '\"':\n",
63
- " return '\"<extra_id_0>\"'\n",
64
- " if value[0] == \"'\" and value[-1] == \"'\":\n",
65
- " return \"'<extra_id_0>'\"\n",
66
- " if value[0] == '{' and value[-1] == '}':\n",
67
- " return '{<extra_id_0>}'\n",
68
- " return '<extra_id_0>'\n",
69
- "\n",
70
- "def temp_var(var):\n",
71
- " value = var[4:]\n",
72
- " return var[:4] + temp_value(value)"
73
- ]
74
- },
75
- {
76
- "cell_type": "code",
77
- "execution_count": 60,
78
- "metadata": {},
79
- "outputs": [],
80
- "source": [
81
- "def make_code(start, code):\n",
82
- " return f'def main(): {\"; \".join(start)}; {code}; return {\", \".join([v.split()[0] for v in start])}'"
83
- ]
84
- },
85
- {
86
- "cell_type": "code",
87
- "execution_count": 61,
88
- "metadata": {},
89
- "outputs": [],
90
- "source": [
91
- "import ast\n",
92
- "\n",
93
- "def filter_codes(codes):\n",
94
- " codes = list(set(codes))\n",
95
- " new_codes = []\n",
96
- " for code in codes:\n",
97
- " if ';' in code:\n",
98
- " code = code[code.index(';'):]\n",
99
- " try:\n",
100
- " ast.parse(code)\n",
101
- " except Exception:\n",
102
- " continue\n",
103
- " new_codes.append(code)\n",
104
- " return new_codes"
105
- ]
106
- },
107
- {
108
- "cell_type": "code",
109
- "execution_count": 62,
110
- "metadata": {},
111
- "outputs": [],
112
- "source": [
113
- "def alt_from_code(code):\n",
114
- " input_ids = tokenizer(code, return_tensors=\"pt\").input_ids\n",
115
- " generated_ids = model.generate(input_ids, num_return_sequences=100, max_length=20, do_sample=True, temperature=1.0)\n",
116
- " return filter_codes(tokenizer.batch_decode(generated_ids, skip_special_tokens=True))"
117
- ]
118
- },
119
- {
120
- "cell_type": "code",
121
- "execution_count": 63,
122
- "metadata": {},
123
- "outputs": [],
124
- "source": [
125
- "import errno\n",
126
- "import os\n",
127
- "import signal\n",
128
- "import functools\n",
129
- "\n",
130
- "class TimeoutError(Exception):\n",
131
- " pass\n",
132
- "\n",
133
- "def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):\n",
134
- " def decorator(func):\n",
135
- " def _handle_timeout(signum, frame):\n",
136
- " raise TimeoutError(error_message)\n",
137
- "\n",
138
- " @functools.wraps(func)\n",
139
- " def wrapper(*args, **kwargs):\n",
140
- " signal.signal(signal.SIGALRM, _handle_timeout)\n",
141
- " signal.alarm(seconds)\n",
142
- " try:\n",
143
- " result = func(*args, **kwargs)\n",
144
- " finally:\n",
145
- " signal.alarm(0)\n",
146
- " return result\n",
147
- "\n",
148
- " return wrapper\n",
149
- "\n",
150
- " return decorator"
151
- ]
152
- },
153
- {
154
- "cell_type": "code",
155
- "execution_count": 64,
156
- "metadata": {},
157
- "outputs": [],
158
- "source": [
159
- "def state_dict_to_str(state):\n",
160
- " vals = []\n",
161
- " for k, v in state.items():\n",
162
- " vals.append(\n",
163
- " f'{k} = {v}'\n",
164
- " )\n",
165
- " vals = sorted(vals)\n",
166
- " return '; '.join(vals)"
167
- ]
168
- },
169
- {
170
- "cell_type": "code",
171
- "execution_count": 65,
172
- "metadata": {},
173
- "outputs": [],
174
- "source": [
175
- "@timeout(seconds=3)\n",
176
- "def trace_code(start_state: str, code: str):\n",
177
- " state = {}\n",
178
- " try:\n",
179
- " exec(start_state, {}, state)\n",
180
- " except Exception:\n",
181
- " return\n",
182
- " start_state = dict(state)\n",
183
- " try:\n",
184
- " exec(code, {}, state)\n",
185
- " except Exception:\n",
186
- " return\n",
187
- " return state_dict_to_str(start_state), code, state_dict_to_str(state)"
188
- ]
189
- },
190
- {
191
- "cell_type": "code",
192
- "execution_count": 66,
193
- "metadata": {},
194
- "outputs": [
195
- {
196
- "data": {
197
- "text/plain": [
198
- "[{'start': 'g = 100; i = 1; l = [1, 100, 1]',\n",
199
- " 'code': 'g += l[i]',\n",
200
- " 'end': 'g = 200; i = 1; l = [1, 100, 1]'},\n",
201
- " {'start': 'g = 100; i = 1; l = [1, 1]',\n",
202
- " 'code': 'g += l[i]',\n",
203
- " 'end': 'g = 101; i = 1; l = [1, 1]'},\n",
204
- " {'start': 'g = 100; i = 1; l = [1, 1, 1]',\n",
205
- " 'code': 'g += l[i]',\n",
206
- " 'end': 'g = 101; i = 1; l = [1, 1, 1]'},\n",
207
- " {'start': 'g = 100; i = 1; l = [100, 100]',\n",
208
- " 'code': 'g += l[i]',\n",
209
- " 'end': 'g = 200; i = 1; l = [100, 100]'},\n",
210
- " {'start': 'g = 100; i = 1; l = [50, 50, 50, 40]',\n",
211
- " 'code': 'g += l[i]',\n",
212
- " 'end': 'g = 150; i = 1; l = [50, 50, 50, 40]'},\n",
213
- " {'start': 'g = 100; i = 1; l = [0, 10]',\n",
214
- " 'code': 'g += l[i]',\n",
215
- " 'end': 'g = 110; i = 1; l = [0, 10]'},\n",
216
- " {'start': 'g = 100; i = 1; l = [100, 900, 10, 10]',\n",
217
- " 'code': 'g += l[i]',\n",
218
- " 'end': 'g = 1000; i = 1; l = [100, 900, 10, 10]'},\n",
219
- " {'start': 'g = 100; i = 1; l = [1, 1, 2]',\n",
220
- " 'code': 'g += l[i]',\n",
221
- " 'end': 'g = 101; i = 1; l = [1, 1, 2]'},\n",
222
- " {'start': 'g = 100; i = 1; l = [100, 100, 100, 0, 0]',\n",
223
- " 'code': 'g += l[i]',\n",
224
- " 'end': 'g = 200; i = 1; l = [100, 100, 100, 0, 0]'}]"
225
- ]
226
- },
227
- "execution_count": 66,
228
- "metadata": {},
229
- "output_type": "execute_result"
230
- }
231
- ],
232
- "source": [
233
- "def get_working_alts(other_vars, var_alts, code):\n",
234
- " rows = []\n",
235
- " for alt in var_alts:\n",
236
- " start = other_vars + [alt]\n",
237
- " result = trace_code('; '.join(start), code)\n",
238
- " if result:\n",
239
- " rows.append({'start': result[0], 'code': result[1], 'end': result[2]})\n",
240
- " return rows\n",
241
- "\n",
242
- "test_alt_vars = [\n",
243
- " 'l = [1, 100, 1]',\n",
244
- " 'l = [1, 1]',\n",
245
- " 'l = [f]',\n",
246
- " 'l = [1, 1, 1,]',\n",
247
- " 'l = [i = 10]',\n",
248
- " 'l = [100, 100]',\n",
249
- " 'l = [l[i].max(), l[i].min()]',\n",
250
- " 'l = [1]',\n",
251
- " 'l = [50, 50, 50, 40]',\n",
252
- " 'l = [0, 10]',\n",
253
- " 'l = [100, 900, 10, 10]',\n",
254
- " 'l = [i, 1, 2]',\n",
255
- " 'l = [100, 100, 100, 0, 0]'\n",
256
- "]\n",
257
- "get_working_alts(['g = 100', 'i = 1'], test_alt_vars, 'g += l[i]')"
258
- ]
259
- },
260
- {
261
- "cell_type": "code",
262
- "execution_count": 67,
263
- "metadata": {},
264
- "outputs": [
265
- {
266
- "data": {
267
- "text/plain": [
268
- "(['g = 100', 'i = 1'],\n",
269
- " ['l = [1, 2]',\n",
270
- " 'l = [g, i, j]',\n",
271
- " 'l = [i,g]',\n",
272
- " 'l = [k, j, k2]',\n",
273
- " 'l = [1.0, 0.01, 0.01, 0.01]',\n",
274
- " 'l = [k, j]',\n",
275
- " 'l = [j]',\n",
276
- " 'l = [r, t, d]',\n",
277
- " 'l = [g, i, l]',\n",
278
- " 'l = [1]',\n",
279
- " 'l = [l]',\n",
280
- " 'l = [i, 1]',\n",
281
- " 'l = [g + h*g + i*i]',\n",
282
- " 'l = [g, i, 1]',\n",
283
- " 'l = [b[i], b [ j ]]',\n",
284
- " 'l = [2, 3, 3,]',\n",
285
- " 'l = [a[g, e, c]]',\n",
286
- " 'l = [b [ a ] [b[3]]]',\n",
287
- " 'l = [g - 1, i]',\n",
288
- " 'l = [2]',\n",
289
- " 'l = [5]',\n",
290
- " 'l = [6, 5, 3, 2]',\n",
291
- " 'l = [b[g], b[i], b[g]]',\n",
292
- " 'l = [b[i][j]]',\n",
293
- " 'l = [c[j ], c[j+1 ]]',\n",
294
- " 'l = [i, g * g]',\n",
295
- " 'l = [g]',\n",
296
- " 'l = [g, i, f]',\n",
297
- " 'l = [a [ i ]]',\n",
298
- " 'l = [1, 1, 1]',\n",
299
- " 'l = [1, 4, 4]',\n",
300
- " 'l = [b [j ]]',\n",
301
- " 'l = [g, i]',\n",
302
- " 'l = [1, 0, 0]',\n",
303
- " 'l = [i, l]',\n",
304
- " 'l = [0.0]',\n",
305
- " 'l = [i]',\n",
306
- " 'l = [g, i, 0]',\n",
307
- " 'l = [{ i }]',\n",
308
- " 'l = [i, v[0], v[1],l]',\n",
309
- " 'l = [c[j ],]',\n",
310
- " 'l = [0]',\n",
311
- " 'l = [a [ 0 ]]',\n",
312
- " 'l = [d, g, i]',\n",
313
- " 'l = [g, g, i]',\n",
314
- " 'l = [b[j ]]'])"
315
- ]
316
- },
317
- "execution_count": 67,
318
- "metadata": {},
319
- "output_type": "execute_result"
320
- }
321
- ],
322
- "source": [
323
- "def get_alts_for_var(start_vars, alt_i, code):\n",
324
- " start_vars[alt_i] = temp_var(start_vars[alt_i])\n",
325
- " code = make_code(start_vars, row['code'])\n",
326
- " var_alts = alt_from_code(code)\n",
327
- " alt_var_temp = start_vars[alt_i]\n",
328
- " del start_vars[alt_i]\n",
329
- " return start_vars, [alt_var_temp.replace('<extra_id_0>', alt) for alt in var_alts]\n",
330
- "\n",
331
- "alt_start_vars, var_alts = get_alts_for_var(\n",
332
- " ['g = 100', 'i = 1', 'l = [100, 100, 0, 0, -100, -100]'], 2, 'g += l[i]'\n",
333
- ")\n",
334
- "alt_start_vars, var_alts"
335
- ]
336
- },
337
- {
338
- "cell_type": "code",
339
- "execution_count": 68,
340
- "metadata": {},
341
- "outputs": [
342
- {
343
- "data": {
344
- "text/plain": [
345
- "(29,\n",
346
- " [{'start': 'g = 50; i = 1; l = [100, 100, 0, 0, -100, -100]',\n",
347
- " 'code': 'g += l[i]',\n",
348
- " 'end': 'g = 150; i = 1; l = [100, 100, 0, 0, -100, -100]'},\n",
349
- " {'start': 'g = 10; i = 1; l = [100, 100, 0, 0, -100, -100]',\n",
350
- " 'code': 'g += l[i]',\n",
351
- " 'end': 'g = 110; i = 1; l = [100, 100, 0, 0, -100, -100]'},\n",
352
- " {'start': 'g = -3; i = 1; l = [100, 100, 0, 0, -100, -100]',\n",
353
- " 'code': 'g += l[i]',\n",
354
- " 'end': 'g = 97; i = 1; l = [100, 100, 0, 0, -100, -100]'}])"
355
- ]
356
- },
357
- "execution_count": 68,
358
- "metadata": {},
359
- "output_type": "execute_result"
360
- }
361
- ],
362
- "source": [
363
- "def make_alternatives(row):\n",
364
- " start_vars = row['start'].split('; ')\n",
365
- "\n",
366
- " alts = []\n",
367
- " for i in range(len(start_vars)):\n",
368
- " alt_start_vars, var_alts = get_alts_for_var(list(start_vars), i, row['code'])\n",
369
- " alts += get_working_alts(alt_start_vars, var_alts, row['code'])\n",
370
- "\n",
371
- " return alts\n",
372
- "\n",
373
- "alts = make_alternatives(\n",
374
- " {'start': 'g = 100; i = 1; l = [100, 100, 0, 0, -100, -100]',\n",
375
- " 'code': 'g += l[i]',\n",
376
- " 'end': 'g = 200; i = 1; l = [100, 100, 0, 0, -100, -100]'}\n",
377
- ")\n",
378
- "len(alts), alts[:3]"
379
- ]
380
- },
381
- {
382
- "cell_type": "code",
383
- "execution_count": 69,
384
- "metadata": {},
385
- "outputs": [
386
- {
387
- "name": "stderr",
388
- "output_type": "stream",
389
- "text": [
390
- " 0%| | 1/8968897 [00:09<24001:13:52, 9.63s/it]<string>:1: SyntaxWarning: 'int' object is not callable; perhaps you missed a comma?\n",
391
- "<string>:1: SyntaxWarning: 'int' object is not callable; perhaps you missed a comma?\n",
392
- "<string>:1: SyntaxWarning: 'int' object is not callable; perhaps you missed a comma?\n",
393
- " 0%| | 22/8968897 [02:45<14831:12:33, 5.95s/it]<string>:1: SyntaxWarning: 'int' object is not callable; perhaps you missed a comma?\n",
394
- "<string>:1: SyntaxWarning: 'int' object is not subscriptable; perhaps you missed a comma?\n",
395
- "<string>:1: SyntaxWarning: 'int' object is not subscriptable; perhaps you missed a comma?\n",
396
- " 0%| | 34/8968897 [04:26<26565:33:36, 10.66s/it]<string>:1: SyntaxWarning: 'int' object is not subscriptable; perhaps you missed a comma?\n",
397
- "<string>:1: SyntaxWarning: 'int' object is not callable; perhaps you missed a comma?\n",
398
- " 0%| | 44/8968897 [10:01<34031:25:54, 13.66s/it] \n"
399
- ]
400
- },
401
- {
402
- "ename": "KeyboardInterrupt",
403
- "evalue": "",
404
- "output_type": "error",
405
- "traceback": [
406
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
407
- "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
408
- "Input \u001b[0;32mIn [69]\u001b[0m, in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m id_, line \u001b[38;5;129;01min\u001b[39;00m tqdm(\u001b[38;5;28menumerate\u001b[39m(f), total\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m8968897\u001b[39m):\n\u001b[1;32m 19\u001b[0m row \u001b[38;5;241m=\u001b[39m json\u001b[38;5;241m.\u001b[39mloads(line)\n\u001b[0;32m---> 20\u001b[0m alts \u001b[38;5;241m=\u001b[39m \u001b[43mmake_alternatives\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrow\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 21\u001b[0m new_rows \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m [row] \u001b[38;5;241m+\u001b[39m alts\n\u001b[1;32m 22\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m new_rows \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(new_rows) \u001b[38;5;241m%\u001b[39m \u001b[38;5;241m10_000\u001b[39m \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m:\n",
409
- "Input \u001b[0;32mIn [68]\u001b[0m, in \u001b[0;36mmake_alternatives\u001b[0;34m(row)\u001b[0m\n\u001b[1;32m 4\u001b[0m alts \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mlen\u001b[39m(start_vars)):\n\u001b[0;32m----> 6\u001b[0m alt_start_vars, var_alts \u001b[38;5;241m=\u001b[39m \u001b[43mget_alts_for_var\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mlist\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mstart_vars\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mi\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrow\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mcode\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 7\u001b[0m alts \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m get_working_alts(alt_start_vars, var_alts, row[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mcode\u001b[39m\u001b[38;5;124m'\u001b[39m])\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m alts\n",
410
- "Input \u001b[0;32mIn [67]\u001b[0m, in \u001b[0;36mget_alts_for_var\u001b[0;34m(start_vars, alt_i, code)\u001b[0m\n\u001b[1;32m 2\u001b[0m start_vars[alt_i] \u001b[38;5;241m=\u001b[39m temp_var(start_vars[alt_i])\n\u001b[1;32m 3\u001b[0m code \u001b[38;5;241m=\u001b[39m make_code(start_vars, row[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mcode\u001b[39m\u001b[38;5;124m'\u001b[39m])\n\u001b[0;32m----> 4\u001b[0m var_alts \u001b[38;5;241m=\u001b[39m \u001b[43malt_from_code\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcode\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 5\u001b[0m alt_var_temp \u001b[38;5;241m=\u001b[39m start_vars[alt_i]\n\u001b[1;32m 6\u001b[0m \u001b[38;5;28;01mdel\u001b[39;00m start_vars[alt_i]\n",
411
- "Input \u001b[0;32mIn [62]\u001b[0m, in \u001b[0;36malt_from_code\u001b[0;34m(code)\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21malt_from_code\u001b[39m(code):\n\u001b[1;32m 2\u001b[0m input_ids \u001b[38;5;241m=\u001b[39m tokenizer(code, return_tensors\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpt\u001b[39m\u001b[38;5;124m\"\u001b[39m)\u001b[38;5;241m.\u001b[39minput_ids\n\u001b[0;32m----> 3\u001b[0m generated_ids \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgenerate\u001b[49m\u001b[43m(\u001b[49m\u001b[43minput_ids\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mnum_return_sequences\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m100\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmax_length\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m20\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdo_sample\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtemperature\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1.0\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m filter_codes(tokenizer\u001b[38;5;241m.\u001b[39mbatch_decode(generated_ids, skip_special_tokens\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m))\n",
412
- "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/autograd/grad_mode.py:28\u001b[0m, in \u001b[0;36m_DecoratorContextManager.__call__.<locals>.decorate_context\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[38;5;129m@functools\u001b[39m\u001b[38;5;241m.\u001b[39mwraps(func)\n\u001b[1;32m 26\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mdecorate_context\u001b[39m(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[1;32m 27\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m():\n\u001b[0;32m---> 28\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
413
- "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/transformers/generation_utils.py:1200\u001b[0m, in \u001b[0;36mGenerationMixin.generate\u001b[0;34m(self, inputs, max_length, min_length, do_sample, early_stopping, num_beams, temperature, top_k, top_p, repetition_penalty, bad_words_ids, bos_token_id, pad_token_id, eos_token_id, length_penalty, no_repeat_ngram_size, encoder_no_repeat_ngram_size, num_return_sequences, max_time, max_new_tokens, decoder_start_token_id, use_cache, num_beam_groups, diversity_penalty, prefix_allowed_tokens_fn, logits_processor, stopping_criteria, output_attentions, output_hidden_states, output_scores, return_dict_in_generate, forced_bos_token_id, forced_eos_token_id, remove_invalid_values, synced_gpus, **model_kwargs)\u001b[0m\n\u001b[1;32m 1192\u001b[0m input_ids, model_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_expand_inputs_for_generation(\n\u001b[1;32m 1193\u001b[0m input_ids,\n\u001b[1;32m 1194\u001b[0m expand_size\u001b[38;5;241m=\u001b[39mnum_return_sequences,\n\u001b[1;32m 1195\u001b[0m is_encoder_decoder\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconfig\u001b[38;5;241m.\u001b[39mis_encoder_decoder,\n\u001b[1;32m 1196\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mmodel_kwargs,\n\u001b[1;32m 1197\u001b[0m )\n\u001b[1;32m 1199\u001b[0m \u001b[38;5;66;03m# 12. run sample\u001b[39;00m\n\u001b[0;32m-> 1200\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msample\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1201\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1202\u001b[0m \u001b[43m \u001b[49m\u001b[43mlogits_processor\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlogits_processor\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1203\u001b[0m \u001b[43m \u001b[49m\u001b[43mlogits_warper\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlogits_warper\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1204\u001b[0m \u001b[43m \u001b[49m\u001b[43mstopping_criteria\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstopping_criteria\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1205\u001b[0m \u001b[43m \u001b[49m\u001b[43mpad_token_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpad_token_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1206\u001b[0m \u001b[43m \u001b[49m\u001b[43meos_token_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43meos_token_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1207\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_scores\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_scores\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1208\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_dict_in_generate\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_dict_in_generate\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1209\u001b[0m \u001b[43m \u001b[49m\u001b[43msynced_gpus\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msynced_gpus\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1210\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mmodel_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1211\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1213\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m is_beam_gen_mode:\n\u001b[1;32m 1214\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m num_return_sequences \u001b[38;5;241m>\u001b[39m num_beams:\n",
414
- "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/transformers/generation_utils.py:1710\u001b[0m, in \u001b[0;36mGenerationMixin.sample\u001b[0;34m(self, input_ids, logits_processor, stopping_criteria, logits_warper, max_length, pad_token_id, eos_token_id, output_attentions, output_hidden_states, output_scores, return_dict_in_generate, synced_gpus, **model_kwargs)\u001b[0m\n\u001b[1;32m 1707\u001b[0m model_inputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mprepare_inputs_for_generation(input_ids, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mmodel_kwargs)\n\u001b[1;32m 1709\u001b[0m \u001b[38;5;66;03m# forward pass to get next token\u001b[39;00m\n\u001b[0;32m-> 1710\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1711\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mmodel_inputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1712\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 1713\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1714\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1715\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1717\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m synced_gpus \u001b[38;5;129;01mand\u001b[39;00m this_peer_finished:\n\u001b[1;32m 1718\u001b[0m cur_len \u001b[38;5;241m=\u001b[39m cur_len \u001b[38;5;241m+\u001b[39m \u001b[38;5;241m1\u001b[39m\n",
415
- "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/nn/modules/module.py:1102\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 1098\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1099\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1100\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1101\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1102\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1103\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1104\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
416
- "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py:1616\u001b[0m, in \u001b[0;36mT5ForConditionalGeneration.forward\u001b[0;34m(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, head_mask, decoder_head_mask, cross_attn_head_mask, encoder_outputs, past_key_values, inputs_embeds, decoder_inputs_embeds, labels, use_cache, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m 1613\u001b[0m decoder_attention_mask \u001b[38;5;241m=\u001b[39m decoder_attention_mask\u001b[38;5;241m.\u001b[39mto(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdecoder\u001b[38;5;241m.\u001b[39mfirst_device)\n\u001b[1;32m 1615\u001b[0m \u001b[38;5;66;03m# Decode\u001b[39;00m\n\u001b[0;32m-> 1616\u001b[0m decoder_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdecoder\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1617\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdecoder_input_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1618\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdecoder_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1619\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs_embeds\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdecoder_inputs_embeds\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1620\u001b[0m \u001b[43m \u001b[49m\u001b[43mpast_key_values\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpast_key_values\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1621\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1622\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1623\u001b[0m \u001b[43m \u001b[49m\u001b[43mhead_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdecoder_head_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1624\u001b[0m \u001b[43m \u001b[49m\u001b[43mcross_attn_head_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcross_attn_head_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1625\u001b[0m \u001b[43m \u001b[49m\u001b[43muse_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muse_cache\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1626\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1627\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1628\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_dict\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_dict\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1629\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1631\u001b[0m sequence_output \u001b[38;5;241m=\u001b[39m decoder_outputs[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m 1633\u001b[0m \u001b[38;5;66;03m# Set device for model parallelism\u001b[39;00m\n",
417
- "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/nn/modules/module.py:1102\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 1098\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1099\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1100\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1101\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1102\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1103\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1104\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
418
- "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py:1011\u001b[0m, in \u001b[0;36mT5Stack.forward\u001b[0;34m(self, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, inputs_embeds, head_mask, cross_attn_head_mask, past_key_values, use_cache, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m 998\u001b[0m layer_outputs \u001b[38;5;241m=\u001b[39m checkpoint(\n\u001b[1;32m 999\u001b[0m create_custom_forward(layer_module),\n\u001b[1;32m 1000\u001b[0m hidden_states,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 1008\u001b[0m \u001b[38;5;28;01mNone\u001b[39;00m, \u001b[38;5;66;03m# past_key_value is always None with gradient checkpointing\u001b[39;00m\n\u001b[1;32m 1009\u001b[0m )\n\u001b[1;32m 1010\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1011\u001b[0m layer_outputs \u001b[38;5;241m=\u001b[39m \u001b[43mlayer_module\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1012\u001b[0m \u001b[43m \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1013\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mextended_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1014\u001b[0m \u001b[43m \u001b[49m\u001b[43mposition_bias\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mposition_bias\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1015\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1016\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_extended_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1017\u001b[0m \u001b[43m \u001b[49m\u001b[43mencoder_decoder_position_bias\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_decoder_position_bias\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1018\u001b[0m \u001b[43m \u001b[49m\u001b[43mlayer_head_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlayer_head_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1019\u001b[0m \u001b[43m \u001b[49m\u001b[43mcross_attn_layer_head_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcross_attn_layer_head_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1020\u001b[0m \u001b[43m \u001b[49m\u001b[43mpast_key_value\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpast_key_value\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1021\u001b[0m \u001b[43m \u001b[49m\u001b[43muse_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muse_cache\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1022\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1023\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1025\u001b[0m \u001b[38;5;66;03m# layer_outputs is a tuple with:\u001b[39;00m\n\u001b[1;32m 1026\u001b[0m \u001b[38;5;66;03m# hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)\u001b[39;00m\n\u001b[1;32m 1027\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m use_cache \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mFalse\u001b[39;00m:\n",
419
- "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/nn/modules/module.py:1102\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 1098\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1099\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1100\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1101\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1102\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1103\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1104\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
420
- "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py:672\u001b[0m, in \u001b[0;36mT5Block.forward\u001b[0;34m(self, hidden_states, attention_mask, position_bias, encoder_hidden_states, encoder_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, past_key_value, use_cache, output_attentions, return_dict)\u001b[0m\n\u001b[1;32m 669\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 670\u001b[0m query_length \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m--> 672\u001b[0m cross_attention_outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlayer\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 673\u001b[0m \u001b[43m \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 674\u001b[0m \u001b[43m \u001b[49m\u001b[43mkey_value_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 675\u001b[0m \u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 676\u001b[0m \u001b[43m \u001b[49m\u001b[43mposition_bias\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoder_decoder_position_bias\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 677\u001b[0m \u001b[43m \u001b[49m\u001b[43mlayer_head_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcross_attn_layer_head_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 678\u001b[0m \u001b[43m \u001b[49m\u001b[43mpast_key_value\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcross_attn_past_key_value\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 679\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_length\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquery_length\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 680\u001b[0m \u001b[43m \u001b[49m\u001b[43muse_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muse_cache\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 681\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 682\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 683\u001b[0m hidden_states \u001b[38;5;241m=\u001b[39m cross_attention_outputs[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m 685\u001b[0m \u001b[38;5;66;03m# clamp inf values to enable fp16 training\u001b[39;00m\n",
421
- "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/nn/modules/module.py:1102\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 1098\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1099\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1100\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1101\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1102\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1103\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1104\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
422
- "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py:587\u001b[0m, in \u001b[0;36mT5LayerCrossAttention.forward\u001b[0;34m(self, hidden_states, key_value_states, attention_mask, position_bias, layer_head_mask, past_key_value, use_cache, query_length, output_attentions)\u001b[0m\n\u001b[1;32m 574\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\n\u001b[1;32m 575\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 576\u001b[0m hidden_states,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 584\u001b[0m output_attentions\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[1;32m 585\u001b[0m ):\n\u001b[1;32m 586\u001b[0m normed_hidden_states \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlayer_norm(hidden_states)\n\u001b[0;32m--> 587\u001b[0m attention_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mEncDecAttention\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 588\u001b[0m \u001b[43m \u001b[49m\u001b[43mnormed_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 589\u001b[0m \u001b[43m \u001b[49m\u001b[43mmask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 590\u001b[0m \u001b[43m \u001b[49m\u001b[43mkey_value_states\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mkey_value_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 591\u001b[0m \u001b[43m \u001b[49m\u001b[43mposition_bias\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mposition_bias\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 592\u001b[0m \u001b[43m \u001b[49m\u001b[43mlayer_head_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlayer_head_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 593\u001b[0m \u001b[43m \u001b[49m\u001b[43mpast_key_value\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpast_key_value\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 594\u001b[0m \u001b[43m \u001b[49m\u001b[43muse_cache\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muse_cache\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 595\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery_length\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mquery_length\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 596\u001b[0m \u001b[43m \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 597\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 598\u001b[0m layer_output \u001b[38;5;241m=\u001b[39m hidden_states \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdropout(attention_output[\u001b[38;5;241m0\u001b[39m])\n\u001b[1;32m 599\u001b[0m outputs \u001b[38;5;241m=\u001b[39m (layer_output,) \u001b[38;5;241m+\u001b[39m attention_output[\u001b[38;5;241m1\u001b[39m:] \u001b[38;5;66;03m# add attentions if we output them\u001b[39;00m\n",
423
- "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/nn/modules/module.py:1102\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 1098\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1099\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1100\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1101\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1102\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1103\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1104\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
424
- "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/transformers/models/t5/modeling_t5.py:525\u001b[0m, in \u001b[0;36mT5Attention.forward\u001b[0;34m(self, hidden_states, mask, key_value_states, position_bias, past_key_value, layer_head_mask, query_length, use_cache, output_attentions)\u001b[0m\n\u001b[1;32m 522\u001b[0m attn_weights \u001b[38;5;241m=\u001b[39m attn_weights \u001b[38;5;241m*\u001b[39m layer_head_mask\n\u001b[1;32m 524\u001b[0m attn_output \u001b[38;5;241m=\u001b[39m unshape(torch\u001b[38;5;241m.\u001b[39mmatmul(attn_weights, value_states)) \u001b[38;5;66;03m# (batch_size, seq_length, dim)\u001b[39;00m\n\u001b[0;32m--> 525\u001b[0m attn_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mo\u001b[49m\u001b[43m(\u001b[49m\u001b[43mattn_output\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 527\u001b[0m present_key_value_state \u001b[38;5;241m=\u001b[39m (key_states, value_states) \u001b[38;5;28;01mif\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mis_decoder \u001b[38;5;129;01mand\u001b[39;00m use_cache) \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 528\u001b[0m outputs \u001b[38;5;241m=\u001b[39m (attn_output,) \u001b[38;5;241m+\u001b[39m (present_key_value_state,) \u001b[38;5;241m+\u001b[39m (position_bias,)\n",
425
- "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/nn/modules/module.py:1102\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 1098\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1099\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1100\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1101\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1102\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1103\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1104\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
426
- "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/nn/modules/linear.py:103\u001b[0m, in \u001b[0;36mLinear.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 102\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m--> 103\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mF\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlinear\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias\u001b[49m\u001b[43m)\u001b[49m\n",
427
- "File \u001b[0;32m~/.pyenv/versions/3.9.9/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/torch/nn/functional.py:1848\u001b[0m, in \u001b[0;36mlinear\u001b[0;34m(input, weight, bias)\u001b[0m\n\u001b[1;32m 1846\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m has_torch_function_variadic(\u001b[38;5;28minput\u001b[39m, weight, bias):\n\u001b[1;32m 1847\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m handle_torch_function(linear, (\u001b[38;5;28minput\u001b[39m, weight, bias), \u001b[38;5;28minput\u001b[39m, weight, bias\u001b[38;5;241m=\u001b[39mbias)\n\u001b[0;32m-> 1848\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_C\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_nn\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlinear\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbias\u001b[49m\u001b[43m)\u001b[49m\n",
428
- "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
429
- ]
430
- }
431
- ],
432
- "source": [
433
- "import json, gzip\n",
434
- "from tqdm import tqdm\n",
435
- "\n",
436
- "\n",
437
- "with open('data.single_start_alts.jsonl.gz', 'w') as f:\n",
438
- " f.write('')\n",
439
- "\n",
440
- "\n",
441
- "def write_rows_compressed(rows):\n",
442
- " rows = [json.dumps(r) for r in rows]\n",
443
- " with gzip.open('data.alts.jsonl.gz', 'ab') as f:\n",
444
- " f.write('\\n'.join(rows).encode() + b'\\n')\n",
445
- "\n",
446
- "\n",
447
- "# currently takes ~10 seconds per iteration for 89,68,897 samples so 1k days\n",
448
- "with open('../data.jsonl', 'r', encoding=\"utf-8\") as f:\n",
449
- " new_rows = []\n",
450
- " for id_, line in tqdm(enumerate(f), total=8968897):\n",
451
- " row = json.loads(line)\n",
452
- " alts = make_alternatives(row)\n",
453
- " new_rows += [row] + alts\n",
454
- " if new_rows and len(new_rows) % 10_000 == 0:\n",
455
- " write_rows_compressed(new_rows)\n",
456
- " new_rows = []\n",
457
- " break\n"
458
- ]
459
- },
460
- {
461
- "cell_type": "code",
462
- "execution_count": null,
463
- "metadata": {},
464
- "outputs": [],
465
- "source": []
466
- },
467
- {
468
- "cell_type": "code",
469
- "execution_count": null,
470
- "metadata": {},
471
- "outputs": [],
472
- "source": []
473
- },
474
- {
475
- "cell_type": "code",
476
- "execution_count": 3,
477
- "metadata": {},
478
- "outputs": [
479
- {
480
- "data": {
481
- "text/plain": [
482
- "['1, 2',\n",
483
- " '1, 0',\n",
484
- " '1, 1, 1, 1',\n",
485
- " '1, 1',\n",
486
- " '\"ab\",i,2',\n",
487
- " '0, 1',\n",
488
- " '8',\n",
489
- " '\"s\", \"m\", \"v\", \"r \"',\n",
490
- " 'g, - p',\n",
491
- " '1, 1, 1,',\n",
492
- " '7, 5, 6',\n",
493
- " 'g, i, l',\n",
494
- " '1',\n",
495
- " '1,1,2,3',\n",
496
- " '1, 2, 2',\n",
497
- " '\"ab\", \"aa\", \"ab\", \"aa\"',\n",
498
- " '1, 2, 3, 4',\n",
499
- " '\"ab\",\"ace\",\"ae\",\"ad\"',\n",
500
- " 'i, i',\n",
501
- " '\"ab\", \"a\", \"e\"',\n",
502
- " '100, 100, 100',\n",
503
- " '1,3,3,4,5,6,7,9,0',\n",
504
- " '\" a\"',\n",
505
- " '0, 1, 2',\n",
506
- " '0, 1, 1, 1, 0',\n",
507
- " '\"ab\", \"bal,ca\"',\n",
508
- " 'g,i, l [ i ]',\n",
509
- " '1, 3,4, 6',\n",
510
- " 'a',\n",
511
- " '1, 2, 3',\n",
512
- " '9, 9',\n",
513
- " '( 1)',\n",
514
- " '2, - 1, - 1',\n",
515
- " '0 | 1 | 0|0',\n",
516
- " '{ 1 }',\n",
517
- " 'i - 1',\n",
518
- " 'o, l1, o2, l',\n",
519
- " '\"ab\"',\n",
520
- " '1, 1, 2',\n",
521
- " 'g, i',\n",
522
- " '0, 0',\n",
523
- " '\"a\"',\n",
524
- " 'i, l',\n",
525
- " 'i',\n",
526
- " '0,0',\n",
527
- " '- l [ i ]',\n",
528
- " '1, 2, 3, 1',\n",
529
- " 'l[ i - 1 ]',\n",
530
- " '\"1\",\"2\", \"3\",\"4\", \"5\"',\n",
531
- " 'g, g, i']"
532
- ]
533
- },
534
- "execution_count": 3,
535
- "metadata": {},
536
- "output_type": "execute_result"
537
- }
538
- ],
539
- "source": [
540
- "code ='def main(): g = \"ab\"; i = 1; l = [<extra_id_0>]; g += l[i]; return g, i, l'\n",
541
- "\n",
542
- "input_ids = tokenizer(code, return_tensors=\"pt\").input_ids\n",
543
- "generated_ids = model.generate(input_ids, num_return_sequences=100, max_length=20, do_sample=True, temperature=1.0)\n",
544
- "filter_codes(tokenizer.batch_decode(generated_ids, skip_special_tokens=True))\n",
545
- "\n",
546
- "# 100 samples -> ~8 valid alternatives, 3.1s on macos CPU"
547
- ]
548
- },
549
- {
550
- "cell_type": "code",
551
- "execution_count": 54,
552
- "metadata": {},
553
- "outputs": [
554
- {
555
- "data": {
556
- "text/plain": [
557
- "['<pad><s><extra_id_0>5<extra_id_1>g i l [ 0</s><pad><pad>',\n",
558
- " '<pad><s><extra_id_0>0<extra_id_1>0, 0, 0</s><pad><pad>',\n",
559
- " '<pad><s><extra_id_0>0<extra_id_1>1 1 2, 1</s><pad><pad>',\n",
560
- " \"<pad><s><extra_id_0>'<extra_id_1>i</s><pad><pad><pad><pad><pad><pad>\",\n",
561
- " '<pad><s><extra_id_0>0<extra_id_1>a t</s><pad><pad><pad><pad><pad>',\n",
562
- " '<pad><s><extra_id_0>0.0<extra_id_1>e. f_i</s>',\n",
563
- " '<pad><s><extra_id_0>\" \"<extra_id_1>1</s><pad><pad><pad><pad><pad>',\n",
564
- " '<pad><s><extra_id_0>0<extra_id_1>n = 1 l =</s><pad><pad>',\n",
565
- " '<pad><s><extra_id_0>0, 0, 1<extra_id_1>1</s><pad><pad>',\n",
566
- " '<pad><s><extra_id_0>1<extra_id_1>k y y x z</s><pad><pad>']"
567
- ]
568
- },
569
- "execution_count": 54,
570
- "metadata": {},
571
- "output_type": "execute_result"
572
- }
573
- ],
574
- "source": [
575
- "code ='def main(): g = <extra_id_0>; i = 1; l = [<extra_id_1>]; g += l[i]; return g, i, l'\n",
576
- "\n",
577
- "input_ids = tokenizer(code, return_tensors=\"pt\").input_ids\n",
578
- "generated_ids = model.generate(input_ids, num_return_sequences=10, max_length=20, do_sample=True, temperature=1.0)\n",
579
- "tokenizer.batch_decode(generated_ids)"
580
- ]
581
- },
582
- {
583
- "cell_type": "code",
584
- "execution_count": null,
585
- "metadata": {},
586
- "outputs": [],
587
- "source": []
588
- }
589
- ],
590
- "metadata": {
591
- "interpreter": {
592
- "hash": "ced6a873299cbeeefe969ab88294103b352f8c83b6537b9e08e8739795321d60"
593
- },
594
- "kernelspec": {
595
- "display_name": "Python 3.9.9 64-bit ('3.9.9': pyenv)",
596
- "language": "python",
597
- "name": "python3"
598
- },
599
- "language_info": {
600
- "codemirror_mode": {
601
- "name": "ipython",
602
- "version": 3
603
- },
604
- "file_extension": ".py",
605
- "mimetype": "text/x-python",
606
- "name": "python",
607
- "nbconvert_exporter": "python",
608
- "pygments_lexer": "ipython3",
609
- "version": "3.9.9"
610
- },
611
- "orig_nbformat": 4
612
- },
613
- "nbformat": 4,
614
- "nbformat_minor": 2
615
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data.mini.jsonl → mini/python-state-changes-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d0f744caffd600ab879e729bdf99ee99d74f8fae0512421416bc0cc1e1fe9c1
3
- size 13299950
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66dce26698701d2e9a983193ce7a281b53971cfd8a641615178acc8dab3ed01d
3
+ size 6340539
python-state-changes.py DELETED
@@ -1,55 +0,0 @@
1
- """Python State Changes"""
2
- from random import choice, shuffle
3
- from sys import maxsize
4
- import datasets
5
- import json
6
-
7
-
8
- _DESCRIPTION = """\
9
- Python state changes from a single line of code.
10
- """
11
- _FEATURES = datasets.Features(
12
- {
13
- "start": datasets.Value("string"),
14
- "code": datasets.Value("string"),
15
- "end": datasets.Value("string"),
16
- }
17
- )
18
- _DATA_URL = {
19
- 'default': "https://huggingface.co/datasets/Fraser/python-state-changes/resolve/main/data.jsonl",
20
- 'mini': "https://huggingface.co/datasets/Fraser/python-state-changes/resolve/main/data.mini.jsonl"
21
- }
22
- _LICENSE = "MIT License"
23
-
24
-
25
- class PythonStateChanges(datasets.GeneratorBasedBuilder):
26
- """Program Synthesis dataset from dreamcoder."""
27
-
28
- VERSION = datasets.Version("1.1.0")
29
- BUILDER_CONFIGS = [
30
- datasets.BuilderConfig(version=VERSION),
31
- datasets.BuilderConfig(
32
- name="mini", version=VERSION, description="100k subset of the dataset."
33
- ),
34
- ]
35
- DEFAULT_CONFIG_NAME = "default"
36
-
37
- def _info(self):
38
- return datasets.DatasetInfo(
39
- description=_DESCRIPTION,
40
- features=_FEATURES,
41
- license=_LICENSE,
42
- )
43
-
44
- def _split_generators(self, dl_manager):
45
- data_path = dl_manager.download(_DATA_URL[self.config.name])
46
- return [
47
- datasets.SplitGenerator(
48
- name=datasets.Split.TRAIN, gen_kwargs={'path': data_path}
49
- ),
50
- ]
51
-
52
- def _generate_examples(self, path):
53
- with open(path, 'r', encoding="utf-8") as f:
54
- for id_, line in enumerate(f):
55
- yield id_, json.loads(line)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tests/test.py DELETED
@@ -1,8 +0,0 @@
1
- import datasets
2
-
3
-
4
- data = datasets.load_dataset('Fraser/python-state-changes', 'mini', streaming=True)
5
-
6
- for row in data['train']:
7
- print(row)
8
- break