OZ1150 commited on
Commit
2abae32
1 Parent(s): 9efac10

Upload 8 files

Browse files
adapter_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "/workspace/WizardLM-7b",
3
+ "bias": "all",
4
+ "fan_in_fan_out": false,
5
+ "inference_mode": true,
6
+ "init_lora_weights": true,
7
+ "lora_alpha": 64,
8
+ "lora_dropout": 0.1,
9
+ "modules_to_save": null,
10
+ "peft_type": "LORA",
11
+ "r": 8,
12
+ "target_modules": [
13
+ "q_proj",
14
+ "k_proj",
15
+ "v_proj",
16
+ "out_proj",
17
+ "fc_in",
18
+ "fc_out",
19
+ "wte"
20
+ ],
21
+ "task_type": "CAUSAL_LM"
22
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7e509a37773ddff40c0df2df2979f2da94c3e92374b6ef8903962172a871397
3
+ size 25234701
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[PAD]": 32000
3
+ }
finetune.ipynb ADDED
@@ -0,0 +1,1130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "3139c31d-2ca2-472e-8e29-c404032e4a55",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "from pathlib import Path\n",
11
+ "\n",
12
+ "#!git clone https://github.com/IlyaGusev/rulm.git\n",
13
+ "#!git clone https://github.com/ggerganov/llama.cpp.git\n",
14
+ "\n",
15
+ "self_instruct_dir = Path('rulm/self_instruct').resolve()\n",
16
+ "\n",
17
+ "#!cd {self_instruct_dir} && pip install -r ../requirements.txt"
18
+ ]
19
+ },
20
+ {
21
+ "cell_type": "code",
22
+ "execution_count": 2,
23
+ "id": "6f04525b-5567-4335-98b2-7bb60d4be6ca",
24
+ "metadata": {},
25
+ "outputs": [],
26
+ "source": [
27
+ "from huggingface_hub import snapshot_download\n",
28
+ "content_dir = Path('.').resolve()\n",
29
+ "\n",
30
+ "model_dir = content_dir / \"WizardLM-7b\"\n",
31
+ "base_model = \"ehartford/WizardLM-7B-V1.0-Uncensored\" #@param {type:\"string\"}\n",
32
+ "#snapshot_download(repo_id=base_model, local_dir=model_dir, ignore_patterns=[\"LICENSE\", \"README.md\", \".gitattributes\"])\n"
33
+ ]
34
+ },
35
+ {
36
+ "cell_type": "code",
37
+ "execution_count": 3,
38
+ "id": "5321ea58-116a-4501-b6c3-3521ac5e6e7f",
39
+ "metadata": {},
40
+ "outputs": [
41
+ {
42
+ "name": "stderr",
43
+ "output_type": "stream",
44
+ "text": [
45
+ "Overriding torch_dtype=None with `torch_dtype=torch.float16` due to requirements of `bitsandbytes` to enable model loading in mixed int8. Either pass torch_dtype=torch.float16 or don't pass this argument at all to remove this warning.\n"
46
+ ]
47
+ },
48
+ {
49
+ "name": "stdout",
50
+ "output_type": "stream",
51
+ "text": [
52
+ "\n",
53
+ "===================================BUG REPORT===================================\n",
54
+ "Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues\n",
55
+ "================================================================================\n",
56
+ "CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching /usr/local/cuda/lib64...\n",
57
+ "CUDA SETUP: CUDA runtime path found: /usr/local/cuda/lib64/libcudart.so\n",
58
+ "CUDA SETUP: Highest compute capability among GPUs detected: 8.0\n",
59
+ "CUDA SETUP: Detected CUDA version 117\n",
60
+ "CUDA SETUP: Loading binary /usr/local/lib/python3.10/dist-packages/bitsandbytes/libbitsandbytes_cuda117.so...\n"
61
+ ]
62
+ },
63
+ {
64
+ "name": "stderr",
65
+ "output_type": "stream",
66
+ "text": [
67
+ "/usr/local/lib/python3.10/dist-packages/bitsandbytes/cuda_setup/main.py:136: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('/usr/local/nvidia/lib64'), PosixPath('/usr/local/nvidia/lib')}\n",
68
+ " warn(msg)\n",
69
+ "/usr/local/lib/python3.10/dist-packages/bitsandbytes/cuda_setup/main.py:136: UserWarning: /usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/lib/x86_64-linux-gnu did not contain libcudart.so as expected! Searching further paths...\n",
70
+ " warn(msg)\n",
71
+ "/usr/local/lib/python3.10/dist-packages/bitsandbytes/cuda_setup/main.py:136: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('yCIZccJum+9/ZLm7c7rDHLQYnn/OPUl4MSQ9SgINSks'), PosixPath('SHA256')}\n",
72
+ " warn(msg)\n",
73
+ "/usr/local/lib/python3.10/dist-packages/bitsandbytes/cuda_setup/main.py:136: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('noninteractive SHELL=/bin/bash')}\n",
74
+ " warn(msg)\n",
75
+ "/usr/local/lib/python3.10/dist-packages/bitsandbytes/cuda_setup/main.py:136: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('/workspace/Untitled.ipynb')}\n",
76
+ " warn(msg)\n",
77
+ "/usr/local/lib/python3.10/dist-packages/bitsandbytes/cuda_setup/main.py:136: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('//matplotlib_inline.backend_inline'), PosixPath('module')}\n",
78
+ " warn(msg)\n"
79
+ ]
80
+ },
81
+ {
82
+ "data": {
83
+ "application/vnd.jupyter.widget-view+json": {
84
+ "model_id": "5861e63248924916a4e574e3b559d5f9",
85
+ "version_major": 2,
86
+ "version_minor": 0
87
+ },
88
+ "text/plain": [
89
+ "Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
90
+ ]
91
+ },
92
+ "metadata": {},
93
+ "output_type": "display_data"
94
+ }
95
+ ],
96
+ "source": [
97
+ "import torch\n",
98
+ "from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig\n",
99
+ "\n",
100
+ "model_id = \"/workspace/WizardLM-7b\"\n",
101
+ "bnb_config = BitsAndBytesConfig(\n",
102
+ " load_in_8bit=True,\n",
103
+ " bnb_8bit_use_double_quant=True,\n",
104
+ " bnb_8bit_quant_type=\"nf4\",\n",
105
+ " bnb_8bit_compute_dtype=torch.float32,\n",
106
+ " llm_int8_enable_fp32_cpu_offload=True,\n",
107
+ " load_in_8bit_fp32_cpu_offload=True\n",
108
+ ")\n",
109
+ "\n",
110
+ "\n",
111
+ "\n",
112
+ "\n",
113
+ "tokenizer = AutoTokenizer.from_pretrained(model_id)\n",
114
+ "model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config, device_map=\"auto\", max_memory={0:'8GiB', 'cpu':'50GiB'})"
115
+ ]
116
+ },
117
+ {
118
+ "cell_type": "code",
119
+ "execution_count": 4,
120
+ "id": "6e575c22-66de-457c-82b2-da57db38ead8",
121
+ "metadata": {},
122
+ "outputs": [],
123
+ "source": [
124
+ "from peft import prepare_model_for_int8_training\n",
125
+ "\n",
126
+ "model.gradient_checkpointing_enable()\n",
127
+ "model = prepare_model_for_int8_training(model)"
128
+ ]
129
+ },
130
+ {
131
+ "cell_type": "code",
132
+ "execution_count": 5,
133
+ "id": "16fd3d91-6468-4268-bc48-3b1c5bcf39cc",
134
+ "metadata": {},
135
+ "outputs": [],
136
+ "source": [
137
+ "def print_trainable_parameters(model):\n",
138
+ " \"\"\"\n",
139
+ " Prints the number of trainable parameters in the model.\n",
140
+ " \"\"\"\n",
141
+ " trainable_params = 0\n",
142
+ " all_param = 0\n",
143
+ " for _, param in model.named_parameters():\n",
144
+ " all_param += param.numel()\n",
145
+ " if param.requires_grad:\n",
146
+ " trainable_params += param.numel()\n",
147
+ " print(\n",
148
+ " f\"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}\"\n",
149
+ " )"
150
+ ]
151
+ },
152
+ {
153
+ "cell_type": "code",
154
+ "execution_count": 6,
155
+ "id": "79566f41-ee3f-4662-bbcc-0c8ebc684b8d",
156
+ "metadata": {},
157
+ "outputs": [
158
+ {
159
+ "name": "stdout",
160
+ "output_type": "stream",
161
+ "text": [
162
+ "trainable params: 6291456 || all params: 6744707072 || trainable%: 0.09327989982127426\n"
163
+ ]
164
+ }
165
+ ],
166
+ "source": [
167
+ "from peft import LoraConfig, get_peft_model\n",
168
+ "\n",
169
+ "\n",
170
+ "config = LoraConfig(\n",
171
+ " r=8,\n",
172
+ " lora_alpha=64,\n",
173
+ " target_modules = [\"q_proj\", \"k_proj\", \"v_proj\", \"out_proj\", \"fc_in\", \"fc_out\", \"wte\"],\n",
174
+ " lora_dropout=0.1,\n",
175
+ " bias=\"all\",\n",
176
+ " task_type=\"CAUSAL_LM\"\n",
177
+ ")\n",
178
+ "\n",
179
+ "model = get_peft_model(model, config)\n",
180
+ "print_trainable_parameters(model)\n"
181
+ ]
182
+ },
183
+ {
184
+ "cell_type": "code",
185
+ "execution_count": 7,
186
+ "id": "68ca2e1a-eeca-41a0-a220-f27971309477",
187
+ "metadata": {},
188
+ "outputs": [
189
+ {
190
+ "name": "stderr",
191
+ "output_type": "stream",
192
+ "text": [
193
+ "Found cached dataset json (/root/.cache/huggingface/datasets/json/default-6d1f9620cbe5783f/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96)\n",
194
+ "Loading cached processed dataset at /root/.cache/huggingface/datasets/json/default-6d1f9620cbe5783f/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-6b78e4d017496646.arrow\n"
195
+ ]
196
+ }
197
+ ],
198
+ "source": [
199
+ "from datasets import load_dataset\n",
200
+ "\n",
201
+ "# Define a function to tokenize the dialogues\n",
202
+ "def tokenize_function(examples):\n",
203
+ " return tokenizer(examples[\"dialogue\"], truncation=True, padding='max_length', max_length=128)\n",
204
+ "\n",
205
+ "# Load the new dataset\n",
206
+ "dataset = load_dataset('json', data_files='/workspace/new_output.jsonl', split='train')\n",
207
+ "\n",
208
+ "# Tokenize the dataset\n",
209
+ "dataset = dataset.map(tokenize_function, batched=True)\n",
210
+ "\n",
211
+ "# Split the dataset into training and evaluation sets\n",
212
+ "dataset = dataset.train_test_split(test_size=0.1)\n",
213
+ "\n",
214
+ "# Get the training and evaluation datasets\n",
215
+ "train_dataset = dataset['train']\n",
216
+ "eval_dataset = dataset['test']"
217
+ ]
218
+ },
219
+ {
220
+ "cell_type": "code",
221
+ "execution_count": null,
222
+ "id": "0ba14b1b-26ec-420c-bb41-6dacfaa5093a",
223
+ "metadata": {},
224
+ "outputs": [
225
+ {
226
+ "name": "stderr",
227
+ "output_type": "stream",
228
+ "text": [
229
+ "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mdj1150277539\u001b[0m. Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n"
230
+ ]
231
+ },
232
+ {
233
+ "data": {
234
+ "text/html": [
235
+ "Tracking run with wandb version 0.15.4"
236
+ ],
237
+ "text/plain": [
238
+ "<IPython.core.display.HTML object>"
239
+ ]
240
+ },
241
+ "metadata": {},
242
+ "output_type": "display_data"
243
+ },
244
+ {
245
+ "data": {
246
+ "text/html": [
247
+ "Run data is saved locally in <code>/workspace/wandb/run-20230704_050553-7zgg0kn5</code>"
248
+ ],
249
+ "text/plain": [
250
+ "<IPython.core.display.HTML object>"
251
+ ]
252
+ },
253
+ "metadata": {},
254
+ "output_type": "display_data"
255
+ },
256
+ {
257
+ "data": {
258
+ "text/html": [
259
+ "Syncing run <strong><a href='https://wandb.ai/dj1150277539/huggingface/runs/7zgg0kn5' target=\"_blank\">distinctive-feather-33</a></strong> to <a href='https://wandb.ai/dj1150277539/huggingface' target=\"_blank\">Weights & Biases</a> (<a href='https://wandb.me/run' target=\"_blank\">docs</a>)<br/>"
260
+ ],
261
+ "text/plain": [
262
+ "<IPython.core.display.HTML object>"
263
+ ]
264
+ },
265
+ "metadata": {},
266
+ "output_type": "display_data"
267
+ },
268
+ {
269
+ "data": {
270
+ "text/html": [
271
+ " View project at <a href='https://wandb.ai/dj1150277539/huggingface' target=\"_blank\">https://wandb.ai/dj1150277539/huggingface</a>"
272
+ ],
273
+ "text/plain": [
274
+ "<IPython.core.display.HTML object>"
275
+ ]
276
+ },
277
+ "metadata": {},
278
+ "output_type": "display_data"
279
+ },
280
+ {
281
+ "data": {
282
+ "text/html": [
283
+ " View run at <a href='https://wandb.ai/dj1150277539/huggingface/runs/7zgg0kn5' target=\"_blank\">https://wandb.ai/dj1150277539/huggingface/runs/7zgg0kn5</a>"
284
+ ],
285
+ "text/plain": [
286
+ "<IPython.core.display.HTML object>"
287
+ ]
288
+ },
289
+ "metadata": {},
290
+ "output_type": "display_data"
291
+ },
292
+ {
293
+ "name": "stderr",
294
+ "output_type": "stream",
295
+ "text": [
296
+ "You're using a LlamaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.\n"
297
+ ]
298
+ },
299
+ {
300
+ "data": {
301
+ "text/html": [
302
+ "\n",
303
+ " <div>\n",
304
+ " \n",
305
+ " <progress value='292' max='300' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
306
+ " [292/300 13:37:21 < 22:32, 0.01 it/s, Epoch 166/300]\n",
307
+ " </div>\n",
308
+ " <table border=\"1\" class=\"dataframe\">\n",
309
+ " <thead>\n",
310
+ " <tr style=\"text-align: left;\">\n",
311
+ " <th>Step</th>\n",
312
+ " <th>Training Loss</th>\n",
313
+ " <th>Validation Loss</th>\n",
314
+ " </tr>\n",
315
+ " </thead>\n",
316
+ " <tbody>\n",
317
+ " <tr>\n",
318
+ " <td>2</td>\n",
319
+ " <td>3.336500</td>\n",
320
+ " <td>14.149993</td>\n",
321
+ " </tr>\n",
322
+ " <tr>\n",
323
+ " <td>4</td>\n",
324
+ " <td>6.517100</td>\n",
325
+ " <td>13.468672</td>\n",
326
+ " </tr>\n",
327
+ " <tr>\n",
328
+ " <td>6</td>\n",
329
+ " <td>9.206500</td>\n",
330
+ " <td>12.650669</td>\n",
331
+ " </tr>\n",
332
+ " <tr>\n",
333
+ " <td>8</td>\n",
334
+ " <td>9.328000</td>\n",
335
+ " <td>11.601873</td>\n",
336
+ " </tr>\n",
337
+ " <tr>\n",
338
+ " <td>10</td>\n",
339
+ " <td>6.599700</td>\n",
340
+ " <td>10.404304</td>\n",
341
+ " </tr>\n",
342
+ " <tr>\n",
343
+ " <td>12</td>\n",
344
+ " <td>3.456600</td>\n",
345
+ " <td>8.535113</td>\n",
346
+ " </tr>\n",
347
+ " <tr>\n",
348
+ " <td>14</td>\n",
349
+ " <td>0.919500</td>\n",
350
+ " <td>6.562298</td>\n",
351
+ " </tr>\n",
352
+ " <tr>\n",
353
+ " <td>16</td>\n",
354
+ " <td>0.691900</td>\n",
355
+ " <td>6.245927</td>\n",
356
+ " </tr>\n",
357
+ " <tr>\n",
358
+ " <td>18</td>\n",
359
+ " <td>1.991800</td>\n",
360
+ " <td>5.460762</td>\n",
361
+ " </tr>\n",
362
+ " <tr>\n",
363
+ " <td>20</td>\n",
364
+ " <td>2.966800</td>\n",
365
+ " <td>4.838455</td>\n",
366
+ " </tr>\n",
367
+ " <tr>\n",
368
+ " <td>22</td>\n",
369
+ " <td>3.540500</td>\n",
370
+ " <td>4.411865</td>\n",
371
+ " </tr>\n",
372
+ " <tr>\n",
373
+ " <td>24</td>\n",
374
+ " <td>3.101700</td>\n",
375
+ " <td>4.204891</td>\n",
376
+ " </tr>\n",
377
+ " <tr>\n",
378
+ " <td>26</td>\n",
379
+ " <td>1.990200</td>\n",
380
+ " <td>4.001011</td>\n",
381
+ " </tr>\n",
382
+ " <tr>\n",
383
+ " <td>28</td>\n",
384
+ " <td>0.980100</td>\n",
385
+ " <td>3.855425</td>\n",
386
+ " </tr>\n",
387
+ " <tr>\n",
388
+ " <td>30</td>\n",
389
+ " <td>0.058200</td>\n",
390
+ " <td>3.714956</td>\n",
391
+ " </tr>\n",
392
+ " <tr>\n",
393
+ " <td>32</td>\n",
394
+ " <td>0.800900</td>\n",
395
+ " <td>3.611897</td>\n",
396
+ " </tr>\n",
397
+ " <tr>\n",
398
+ " <td>34</td>\n",
399
+ " <td>1.613400</td>\n",
400
+ " <td>3.506055</td>\n",
401
+ " </tr>\n",
402
+ " <tr>\n",
403
+ " <td>36</td>\n",
404
+ " <td>2.387300</td>\n",
405
+ " <td>3.416133</td>\n",
406
+ " </tr>\n",
407
+ " <tr>\n",
408
+ " <td>38</td>\n",
409
+ " <td>2.598900</td>\n",
410
+ " <td>3.339494</td>\n",
411
+ " </tr>\n",
412
+ " <tr>\n",
413
+ " <td>40</td>\n",
414
+ " <td>2.018800</td>\n",
415
+ " <td>3.274912</td>\n",
416
+ " </tr>\n",
417
+ " <tr>\n",
418
+ " <td>42</td>\n",
419
+ " <td>1.217900</td>\n",
420
+ " <td>3.217013</td>\n",
421
+ " </tr>\n",
422
+ " <tr>\n",
423
+ " <td>44</td>\n",
424
+ " <td>0.448600</td>\n",
425
+ " <td>3.172593</td>\n",
426
+ " </tr>\n",
427
+ " <tr>\n",
428
+ " <td>46</td>\n",
429
+ " <td>0.293700</td>\n",
430
+ " <td>3.141093</td>\n",
431
+ " </tr>\n",
432
+ " <tr>\n",
433
+ " <td>48</td>\n",
434
+ " <td>1.022200</td>\n",
435
+ " <td>3.091443</td>\n",
436
+ " </tr>\n",
437
+ " <tr>\n",
438
+ " <td>50</td>\n",
439
+ " <td>1.731100</td>\n",
440
+ " <td>3.060865</td>\n",
441
+ " </tr>\n",
442
+ " <tr>\n",
443
+ " <td>52</td>\n",
444
+ " <td>2.338500</td>\n",
445
+ " <td>3.032907</td>\n",
446
+ " </tr>\n",
447
+ " <tr>\n",
448
+ " <td>54</td>\n",
449
+ " <td>2.226600</td>\n",
450
+ " <td>3.012452</td>\n",
451
+ " </tr>\n",
452
+ " <tr>\n",
453
+ " <td>56</td>\n",
454
+ " <td>1.500500</td>\n",
455
+ " <td>2.992773</td>\n",
456
+ " </tr>\n",
457
+ " <tr>\n",
458
+ " <td>58</td>\n",
459
+ " <td>0.795400</td>\n",
460
+ " <td>2.976812</td>\n",
461
+ " </tr>\n",
462
+ " <tr>\n",
463
+ " <td>60</td>\n",
464
+ " <td>0.092900</td>\n",
465
+ " <td>2.965430</td>\n",
466
+ " </tr>\n",
467
+ " <tr>\n",
468
+ " <td>62</td>\n",
469
+ " <td>0.597500</td>\n",
470
+ " <td>2.950096</td>\n",
471
+ " </tr>\n",
472
+ " <tr>\n",
473
+ " <td>64</td>\n",
474
+ " <td>1.287900</td>\n",
475
+ " <td>2.940890</td>\n",
476
+ " </tr>\n",
477
+ " <tr>\n",
478
+ " <td>66</td>\n",
479
+ " <td>1.970100</td>\n",
480
+ " <td>2.930588</td>\n",
481
+ " </tr>\n",
482
+ " <tr>\n",
483
+ " <td>68</td>\n",
484
+ " <td>2.240600</td>\n",
485
+ " <td>2.923621</td>\n",
486
+ " </tr>\n",
487
+ " <tr>\n",
488
+ " <td>70</td>\n",
489
+ " <td>1.825200</td>\n",
490
+ " <td>2.917476</td>\n",
491
+ " </tr>\n",
492
+ " <tr>\n",
493
+ " <td>72</td>\n",
494
+ " <td>1.137100</td>\n",
495
+ " <td>2.911416</td>\n",
496
+ " </tr>\n",
497
+ " <tr>\n",
498
+ " <td>74</td>\n",
499
+ " <td>0.455000</td>\n",
500
+ " <td>2.906709</td>\n",
501
+ " </tr>\n",
502
+ " <tr>\n",
503
+ " <td>76</td>\n",
504
+ " <td>0.225000</td>\n",
505
+ " <td>2.902960</td>\n",
506
+ " </tr>\n",
507
+ " <tr>\n",
508
+ " <td>78</td>\n",
509
+ " <td>0.907000</td>\n",
510
+ " <td>2.898351</td>\n",
511
+ " </tr>\n",
512
+ " <tr>\n",
513
+ " <td>80</td>\n",
514
+ " <td>1.582700</td>\n",
515
+ " <td>2.894657</td>\n",
516
+ " </tr>\n",
517
+ " <tr>\n",
518
+ " <td>82</td>\n",
519
+ " <td>2.208000</td>\n",
520
+ " <td>2.891902</td>\n",
521
+ " </tr>\n",
522
+ " <tr>\n",
523
+ " <td>84</td>\n",
524
+ " <td>2.161300</td>\n",
525
+ " <td>2.889836</td>\n",
526
+ " </tr>\n",
527
+ " <tr>\n",
528
+ " <td>86</td>\n",
529
+ " <td>1.485300</td>\n",
530
+ " <td>2.886881</td>\n",
531
+ " </tr>\n",
532
+ " <tr>\n",
533
+ " <td>88</td>\n",
534
+ " <td>0.808700</td>\n",
535
+ " <td>2.888359</td>\n",
536
+ " </tr>\n",
537
+ " <tr>\n",
538
+ " <td>90</td>\n",
539
+ " <td>0.134600</td>\n",
540
+ " <td>2.898380</td>\n",
541
+ " </tr>\n",
542
+ " <tr>\n",
543
+ " <td>92</td>\n",
544
+ " <td>0.540100</td>\n",
545
+ " <td>2.904707</td>\n",
546
+ " </tr>\n",
547
+ " <tr>\n",
548
+ " <td>94</td>\n",
549
+ " <td>1.212500</td>\n",
550
+ " <td>2.884937</td>\n",
551
+ " </tr>\n",
552
+ " <tr>\n",
553
+ " <td>96</td>\n",
554
+ " <td>1.882300</td>\n",
555
+ " <td>2.886901</td>\n",
556
+ " </tr>\n",
557
+ " <tr>\n",
558
+ " <td>98</td>\n",
559
+ " <td>2.190800</td>\n",
560
+ " <td>2.881487</td>\n",
561
+ " </tr>\n",
562
+ " <tr>\n",
563
+ " <td>100</td>\n",
564
+ " <td>1.831900</td>\n",
565
+ " <td>2.881773</td>\n",
566
+ " </tr>\n",
567
+ " <tr>\n",
568
+ " <td>102</td>\n",
569
+ " <td>1.164300</td>\n",
570
+ " <td>2.879683</td>\n",
571
+ " </tr>\n",
572
+ " <tr>\n",
573
+ " <td>104</td>\n",
574
+ " <td>0.490800</td>\n",
575
+ " <td>2.878468</td>\n",
576
+ " </tr>\n",
577
+ " <tr>\n",
578
+ " <td>106</td>\n",
579
+ " <td>0.178100</td>\n",
580
+ " <td>2.877084</td>\n",
581
+ " </tr>\n",
582
+ " <tr>\n",
583
+ " <td>108</td>\n",
584
+ " <td>0.841800</td>\n",
585
+ " <td>2.875591</td>\n",
586
+ " </tr>\n",
587
+ " <tr>\n",
588
+ " <td>110</td>\n",
589
+ " <td>1.500900</td>\n",
590
+ " <td>2.874607</td>\n",
591
+ " </tr>\n",
592
+ " <tr>\n",
593
+ " <td>112</td>\n",
594
+ " <td>2.174100</td>\n",
595
+ " <td>2.873750</td>\n",
596
+ " </tr>\n",
597
+ " <tr>\n",
598
+ " <td>114</td>\n",
599
+ " <td>0.664800</td>\n",
600
+ " <td>2.875177</td>\n",
601
+ " </tr>\n",
602
+ " <tr>\n",
603
+ " <td>116</td>\n",
604
+ " <td>1.330800</td>\n",
605
+ " <td>2.873417</td>\n",
606
+ " </tr>\n",
607
+ " <tr>\n",
608
+ " <td>118</td>\n",
609
+ " <td>1.988000</td>\n",
610
+ " <td>2.872393</td>\n",
611
+ " </tr>\n",
612
+ " <tr>\n",
613
+ " <td>120</td>\n",
614
+ " <td>2.162500</td>\n",
615
+ " <td>2.873486</td>\n",
616
+ " </tr>\n",
617
+ " <tr>\n",
618
+ " <td>122</td>\n",
619
+ " <td>1.675900</td>\n",
620
+ " <td>2.873488</td>\n",
621
+ " </tr>\n",
622
+ " <tr>\n",
623
+ " <td>124</td>\n",
624
+ " <td>1.014600</td>\n",
625
+ " <td>2.874382</td>\n",
626
+ " </tr>\n",
627
+ " <tr>\n",
628
+ " <td>126</td>\n",
629
+ " <td>0.350600</td>\n",
630
+ " <td>2.878257</td>\n",
631
+ " </tr>\n",
632
+ " <tr>\n",
633
+ " <td>128</td>\n",
634
+ " <td>0.307900</td>\n",
635
+ " <td>2.882156</td>\n",
636
+ " </tr>\n",
637
+ " <tr>\n",
638
+ " <td>130</td>\n",
639
+ " <td>0.963200</td>\n",
640
+ " <td>2.874237</td>\n",
641
+ " </tr>\n",
642
+ " <tr>\n",
643
+ " <td>132</td>\n",
644
+ " <td>1.624300</td>\n",
645
+ " <td>2.872886</td>\n",
646
+ " </tr>\n",
647
+ " <tr>\n",
648
+ " <td>134</td>\n",
649
+ " <td>2.146800</td>\n",
650
+ " <td>2.873395</td>\n",
651
+ " </tr>\n",
652
+ " <tr>\n",
653
+ " <td>136</td>\n",
654
+ " <td>2.014700</td>\n",
655
+ " <td>2.875467</td>\n",
656
+ " </tr>\n",
657
+ " <tr>\n",
658
+ " <td>138</td>\n",
659
+ " <td>1.351300</td>\n",
660
+ " <td>2.876896</td>\n",
661
+ " </tr>\n",
662
+ " <tr>\n",
663
+ " <td>140</td>\n",
664
+ " <td>0.699800</td>\n",
665
+ " <td>2.879110</td>\n",
666
+ " </tr>\n",
667
+ " <tr>\n",
668
+ " <td>142</td>\n",
669
+ " <td>0.044100</td>\n",
670
+ " <td>2.881222</td>\n",
671
+ " </tr>\n",
672
+ " <tr>\n",
673
+ " <td>144</td>\n",
674
+ " <td>0.610900</td>\n",
675
+ " <td>2.880388</td>\n",
676
+ " </tr>\n",
677
+ " <tr>\n",
678
+ " <td>146</td>\n",
679
+ " <td>1.260600</td>\n",
680
+ " <td>2.882942</td>\n",
681
+ " </tr>\n",
682
+ " <tr>\n",
683
+ " <td>148</td>\n",
684
+ " <td>1.908200</td>\n",
685
+ " <td>2.885541</td>\n",
686
+ " </tr>\n",
687
+ " <tr>\n",
688
+ " <td>150</td>\n",
689
+ " <td>2.125100</td>\n",
690
+ " <td>2.886549</td>\n",
691
+ " </tr>\n",
692
+ " <tr>\n",
693
+ " <td>152</td>\n",
694
+ " <td>1.685200</td>\n",
695
+ " <td>2.889215</td>\n",
696
+ " </tr>\n",
697
+ " <tr>\n",
698
+ " <td>154</td>\n",
699
+ " <td>1.042300</td>\n",
700
+ " <td>2.893012</td>\n",
701
+ " </tr>\n",
702
+ " <tr>\n",
703
+ " <td>156</td>\n",
704
+ " <td>0.386300</td>\n",
705
+ " <td>2.897541</td>\n",
706
+ " </tr>\n",
707
+ " <tr>\n",
708
+ " <td>158</td>\n",
709
+ " <td>0.258700</td>\n",
710
+ " <td>2.897153</td>\n",
711
+ " </tr>\n",
712
+ " <tr>\n",
713
+ " <td>160</td>\n",
714
+ " <td>0.901400</td>\n",
715
+ " <td>2.905147</td>\n",
716
+ " </tr>\n",
717
+ " <tr>\n",
718
+ " <td>162</td>\n",
719
+ " <td>1.545900</td>\n",
720
+ " <td>2.902808</td>\n",
721
+ " </tr>\n",
722
+ " <tr>\n",
723
+ " <td>164</td>\n",
724
+ " <td>2.106000</td>\n",
725
+ " <td>2.907741</td>\n",
726
+ " </tr>\n",
727
+ " <tr>\n",
728
+ " <td>166</td>\n",
729
+ " <td>2.025800</td>\n",
730
+ " <td>2.897594</td>\n",
731
+ " </tr>\n",
732
+ " <tr>\n",
733
+ " <td>168</td>\n",
734
+ " <td>1.384000</td>\n",
735
+ " <td>2.900589</td>\n",
736
+ " </tr>\n",
737
+ " <tr>\n",
738
+ " <td>170</td>\n",
739
+ " <td>0.730600</td>\n",
740
+ " <td>2.906770</td>\n",
741
+ " </tr>\n",
742
+ " <tr>\n",
743
+ " <td>172</td>\n",
744
+ " <td>0.087300</td>\n",
745
+ " <td>2.923102</td>\n",
746
+ " </tr>\n",
747
+ " <tr>\n",
748
+ " <td>174</td>\n",
749
+ " <td>0.556200</td>\n",
750
+ " <td>2.896429</td>\n",
751
+ " </tr>\n",
752
+ " <tr>\n",
753
+ " <td>176</td>\n",
754
+ " <td>1.199200</td>\n",
755
+ " <td>2.916603</td>\n",
756
+ " </tr>\n",
757
+ " <tr>\n",
758
+ " <td>178</td>\n",
759
+ " <td>1.837500</td>\n",
760
+ " <td>2.915208</td>\n",
761
+ " </tr>\n",
762
+ " <tr>\n",
763
+ " <td>180</td>\n",
764
+ " <td>2.091800</td>\n",
765
+ " <td>2.918897</td>\n",
766
+ " </tr>\n",
767
+ " <tr>\n",
768
+ " <td>182</td>\n",
769
+ " <td>1.706300</td>\n",
770
+ " <td>2.923554</td>\n",
771
+ " </tr>\n",
772
+ " <tr>\n",
773
+ " <td>184</td>\n",
774
+ " <td>1.060900</td>\n",
775
+ " <td>2.924343</td>\n",
776
+ " </tr>\n",
777
+ " <tr>\n",
778
+ " <td>186</td>\n",
779
+ " <td>0.426200</td>\n",
780
+ " <td>2.937875</td>\n",
781
+ " </tr>\n",
782
+ " <tr>\n",
783
+ " <td>188</td>\n",
784
+ " <td>0.213300</td>\n",
785
+ " <td>2.928014</td>\n",
786
+ " </tr>\n",
787
+ " <tr>\n",
788
+ " <td>190</td>\n",
789
+ " <td>0.846600</td>\n",
790
+ " <td>2.946512</td>\n",
791
+ " </tr>\n",
792
+ " <tr>\n",
793
+ " <td>192</td>\n",
794
+ " <td>1.483100</td>\n",
795
+ " <td>2.939318</td>\n",
796
+ " </tr>\n",
797
+ " <tr>\n",
798
+ " <td>194</td>\n",
799
+ " <td>2.072100</td>\n",
800
+ " <td>2.945714</td>\n",
801
+ " </tr>\n",
802
+ " <tr>\n",
803
+ " <td>196</td>\n",
804
+ " <td>2.029300</td>\n",
805
+ " <td>2.949480</td>\n",
806
+ " </tr>\n",
807
+ " <tr>\n",
808
+ " <td>198</td>\n",
809
+ " <td>1.394200</td>\n",
810
+ " <td>2.952962</td>\n",
811
+ " </tr>\n",
812
+ " <tr>\n",
813
+ " <td>200</td>\n",
814
+ " <td>0.761500</td>\n",
815
+ " <td>2.967859</td>\n",
816
+ " </tr>\n",
817
+ " <tr>\n",
818
+ " <td>202</td>\n",
819
+ " <td>0.123500</td>\n",
820
+ " <td>2.950326</td>\n",
821
+ " </tr>\n",
822
+ " <tr>\n",
823
+ " <td>204</td>\n",
824
+ " <td>0.505700</td>\n",
825
+ " <td>2.971651</td>\n",
826
+ " </tr>\n",
827
+ " <tr>\n",
828
+ " <td>206</td>\n",
829
+ " <td>1.131400</td>\n",
830
+ " <td>2.973131</td>\n",
831
+ " </tr>\n",
832
+ " <tr>\n",
833
+ " <td>208</td>\n",
834
+ " <td>1.762000</td>\n",
835
+ " <td>2.972790</td>\n",
836
+ " </tr>\n",
837
+ " <tr>\n",
838
+ " <td>210</td>\n",
839
+ " <td>2.054900</td>\n",
840
+ " <td>2.977710</td>\n",
841
+ " </tr>\n",
842
+ " <tr>\n",
843
+ " <td>212</td>\n",
844
+ " <td>1.715500</td>\n",
845
+ " <td>2.984576</td>\n",
846
+ " </tr>\n",
847
+ " <tr>\n",
848
+ " <td>214</td>\n",
849
+ " <td>1.087700</td>\n",
850
+ " <td>2.982883</td>\n",
851
+ " </tr>\n",
852
+ " <tr>\n",
853
+ " <td>216</td>\n",
854
+ " <td>0.456900</td>\n",
855
+ " <td>2.988919</td>\n",
856
+ " </tr>\n",
857
+ " <tr>\n",
858
+ " <td>218</td>\n",
859
+ " <td>0.166900</td>\n",
860
+ " <td>2.996212</td>\n",
861
+ " </tr>\n",
862
+ " <tr>\n",
863
+ " <td>220</td>\n",
864
+ " <td>0.794400</td>\n",
865
+ " <td>2.994239</td>\n",
866
+ " </tr>\n",
867
+ " <tr>\n",
868
+ " <td>222</td>\n",
869
+ " <td>1.415700</td>\n",
870
+ " <td>3.001529</td>\n",
871
+ " </tr>\n",
872
+ " <tr>\n",
873
+ " <td>224</td>\n",
874
+ " <td>2.044000</td>\n",
875
+ " <td>3.003330</td>\n",
876
+ " </tr>\n",
877
+ " <tr>\n",
878
+ " <td>226</td>\n",
879
+ " <td>0.622300</td>\n",
880
+ " <td>3.006619</td>\n",
881
+ " </tr>\n",
882
+ " <tr>\n",
883
+ " <td>228</td>\n",
884
+ " <td>1.249300</td>\n",
885
+ " <td>3.011294</td>\n",
886
+ " </tr>\n",
887
+ " <tr>\n",
888
+ " <td>230</td>\n",
889
+ " <td>1.873000</td>\n",
890
+ " <td>3.013264</td>\n",
891
+ " </tr>\n",
892
+ " <tr>\n",
893
+ " <td>232</td>\n",
894
+ " <td>2.038300</td>\n",
895
+ " <td>3.014204</td>\n",
896
+ " </tr>\n",
897
+ " <tr>\n",
898
+ " <td>234</td>\n",
899
+ " <td>1.581300</td>\n",
900
+ " <td>3.021334</td>\n",
901
+ " </tr>\n",
902
+ " <tr>\n",
903
+ " <td>236</td>\n",
904
+ " <td>0.957400</td>\n",
905
+ " <td>3.019286</td>\n",
906
+ " </tr>\n",
907
+ " <tr>\n",
908
+ " <td>238</td>\n",
909
+ " <td>0.334200</td>\n",
910
+ " <td>3.023535</td>\n",
911
+ " </tr>\n",
912
+ " <tr>\n",
913
+ " <td>240</td>\n",
914
+ " <td>0.290700</td>\n",
915
+ " <td>3.026750</td>\n",
916
+ " </tr>\n",
917
+ " <tr>\n",
918
+ " <td>242</td>\n",
919
+ " <td>0.915000</td>\n",
920
+ " <td>3.029947</td>\n",
921
+ " </tr>\n",
922
+ " <tr>\n",
923
+ " <td>244</td>\n",
924
+ " <td>1.532900</td>\n",
925
+ " <td>3.030478</td>\n",
926
+ " </tr>\n",
927
+ " <tr>\n",
928
+ " <td>246</td>\n",
929
+ " <td>2.030900</td>\n",
930
+ " <td>3.033974</td>\n",
931
+ " </tr>\n",
932
+ " <tr>\n",
933
+ " <td>248</td>\n",
934
+ " <td>1.905700</td>\n",
935
+ " <td>3.042156</td>\n",
936
+ " </tr>\n",
937
+ " <tr>\n",
938
+ " <td>250</td>\n",
939
+ " <td>1.282600</td>\n",
940
+ " <td>3.039005</td>\n",
941
+ " </tr>\n",
942
+ " <tr>\n",
943
+ " <td>252</td>\n",
944
+ " <td>0.661500</td>\n",
945
+ " <td>3.036432</td>\n",
946
+ " </tr>\n",
947
+ " <tr>\n",
948
+ " <td>254</td>\n",
949
+ " <td>0.041400</td>\n",
950
+ " <td>3.039858</td>\n",
951
+ " </tr>\n",
952
+ " <tr>\n",
953
+ " <td>256</td>\n",
954
+ " <td>0.580900</td>\n",
955
+ " <td>3.042423</td>\n",
956
+ " </tr>\n",
957
+ " <tr>\n",
958
+ " <td>258</td>\n",
959
+ " <td>1.199400</td>\n",
960
+ " <td>3.042602</td>\n",
961
+ " </tr>\n",
962
+ " <tr>\n",
963
+ " <td>260</td>\n",
964
+ " <td>1.818700</td>\n",
965
+ " <td>3.046433</td>\n",
966
+ " </tr>\n",
967
+ " <tr>\n",
968
+ " <td>262</td>\n",
969
+ " <td>2.023600</td>\n",
970
+ " <td>3.047827</td>\n",
971
+ " </tr>\n",
972
+ " <tr>\n",
973
+ " <td>264</td>\n",
974
+ " <td>1.608300</td>\n",
975
+ " <td>3.050818</td>\n",
976
+ " </tr>\n",
977
+ " <tr>\n",
978
+ " <td>266</td>\n",
979
+ " <td>0.993300</td>\n",
980
+ " <td>3.050040</td>\n",
981
+ " </tr>\n",
982
+ " <tr>\n",
983
+ " <td>268</td>\n",
984
+ " <td>0.371800</td>\n",
985
+ " <td>3.053066</td>\n",
986
+ " </tr>\n",
987
+ " <tr>\n",
988
+ " <td>270</td>\n",
989
+ " <td>0.247700</td>\n",
990
+ " <td>3.051264</td>\n",
991
+ " </tr>\n",
992
+ " <tr>\n",
993
+ " <td>272</td>\n",
994
+ " <td>0.863300</td>\n",
995
+ " <td>3.050924</td>\n",
996
+ " </tr>\n",
997
+ " <tr>\n",
998
+ " <td>274</td>\n",
999
+ " <td>1.487000</td>\n",
1000
+ " <td>3.053752</td>\n",
1001
+ " </tr>\n",
1002
+ " <tr>\n",
1003
+ " <td>276</td>\n",
1004
+ " <td>2.020500</td>\n",
1005
+ " <td>3.056072</td>\n",
1006
+ " </tr>\n",
1007
+ " <tr>\n",
1008
+ " <td>278</td>\n",
1009
+ " <td>1.937400</td>\n",
1010
+ " <td>3.054851</td>\n",
1011
+ " </tr>\n",
1012
+ " <tr>\n",
1013
+ " <td>280</td>\n",
1014
+ " <td>1.321400</td>\n",
1015
+ " <td>3.053991</td>\n",
1016
+ " </tr>\n",
1017
+ " <tr>\n",
1018
+ " <td>282</td>\n",
1019
+ " <td>0.704000</td>\n",
1020
+ " <td>3.053047</td>\n",
1021
+ " </tr>\n",
1022
+ " <tr>\n",
1023
+ " <td>284</td>\n",
1024
+ " <td>0.082900</td>\n",
1025
+ " <td>3.054484</td>\n",
1026
+ " </tr>\n",
1027
+ " <tr>\n",
1028
+ " <td>286</td>\n",
1029
+ " <td>0.536800</td>\n",
1030
+ " <td>3.054063</td>\n",
1031
+ " </tr>\n",
1032
+ " <tr>\n",
1033
+ " <td>288</td>\n",
1034
+ " <td>1.155600</td>\n",
1035
+ " <td>3.050694</td>\n",
1036
+ " </tr>\n",
1037
+ " <tr>\n",
1038
+ " <td>290</td>\n",
1039
+ " <td>1.777500</td>\n",
1040
+ " <td>3.054609</td>\n",
1041
+ " </tr>\n",
1042
+ " </tbody>\n",
1043
+ "</table><p>"
1044
+ ],
1045
+ "text/plain": [
1046
+ "<IPython.core.display.HTML object>"
1047
+ ]
1048
+ },
1049
+ "metadata": {},
1050
+ "output_type": "display_data"
1051
+ }
1052
+ ],
1053
+ "source": [
1054
+ "import transformers\n",
1055
+ "\n",
1056
+ "# needed for gpt-neo-x tokenizer\n",
1057
+ "tokenizer.pad_token = tokenizer.eos_token\n",
1058
+ "\n",
1059
+ "trainer = transformers.Trainer(\n",
1060
+ " model=model,\n",
1061
+ " train_dataset=train_dataset,\n",
1062
+ " eval_dataset=eval_dataset,\n",
1063
+ " args=transformers.TrainingArguments(\n",
1064
+ " evaluation_strategy=\"steps\",\n",
1065
+ " per_device_train_batch_size=64,\n",
1066
+ " per_device_eval_batch_size=16,\n",
1067
+ " gradient_accumulation_steps=64,\n",
1068
+ " warmup_steps=30,\n",
1069
+ " save_steps=100,\n",
1070
+ " eval_steps=2,\n",
1071
+ " num_train_epochs=300,\n",
1072
+ " learning_rate=3e-4,\n",
1073
+ " save_total_limit=2,\n",
1074
+ " load_best_model_at_end= True,\n",
1075
+ " fp16=True,\n",
1076
+ " logging_steps=1,\n",
1077
+ " lr_scheduler_type=\"cosine\",\n",
1078
+ " output_dir=\"/workspace\",\n",
1079
+ " optim=\"adamw_torch\",\n",
1080
+ " ),\n",
1081
+ " data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),\n",
1082
+ ")\n",
1083
+ "model.config.use_cache = False # silence the warnings. Please re-enable for inference!\n",
1084
+ "trainer.train()"
1085
+ ]
1086
+ },
1087
+ {
1088
+ "cell_type": "code",
1089
+ "execution_count": 9,
1090
+ "id": "c6be457c-c54b-4191-885a-475745c75293",
1091
+ "metadata": {},
1092
+ "outputs": [],
1093
+ "source": [
1094
+ "#saving the adapter model\n",
1095
+ "pt_save_directory = '/workspace'\n",
1096
+ "tokenizer.save_pretrained(pt_save_directory)\n",
1097
+ "model.save_pretrained(pt_save_directory)"
1098
+ ]
1099
+ },
1100
+ {
1101
+ "cell_type": "code",
1102
+ "execution_count": null,
1103
+ "id": "7df8097d-40f9-4524-8103-161dad3be122",
1104
+ "metadata": {},
1105
+ "outputs": [],
1106
+ "source": []
1107
+ }
1108
+ ],
1109
+ "metadata": {
1110
+ "kernelspec": {
1111
+ "display_name": "Python 3 (ipykernel)",
1112
+ "language": "python",
1113
+ "name": "python3"
1114
+ },
1115
+ "language_info": {
1116
+ "codemirror_mode": {
1117
+ "name": "ipython",
1118
+ "version": 3
1119
+ },
1120
+ "file_extension": ".py",
1121
+ "mimetype": "text/x-python",
1122
+ "name": "python",
1123
+ "nbconvert_exporter": "python",
1124
+ "pygments_lexer": "ipython3",
1125
+ "version": "3.10.6"
1126
+ }
1127
+ },
1128
+ "nbformat": 4,
1129
+ "nbformat_minor": 5
1130
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "model_max_length": 2048,
22
+ "pad_token": null,
23
+ "padding_side": "right",
24
+ "sp_model_kwargs": {},
25
+ "tokenizer_class": "LlamaTokenizer",
26
+ "unk_token": {
27
+ "__type": "AddedToken",
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }