aashay96 commited on
Commit
1c60e01
1 Parent(s): 55d0815

Added training file

Browse files
Files changed (1) hide show
  1. train_on_streaming_lora.ipynb +503 -0
train_on_streaming_lora.ipynb ADDED
@@ -0,0 +1,503 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "5e32d010-11d0-4be3-a34f-00c87d369347",
7
+ "metadata": {
8
+ "tags": []
9
+ },
10
+ "outputs": [
11
+ {
12
+ "name": "stdout",
13
+ "output_type": "stream",
14
+ "text": [
15
+ "\u001b[31mERROR: responses 0.18.0 has requirement urllib3>=1.25.10, but you'll have urllib3 1.25.8 which is incompatible.\u001b[0m\n",
16
+ "\u001b[33m WARNING: The script plasma_store is installed in '/home/qblocks/.local/bin' which is not on PATH.\n",
17
+ " Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\n",
18
+ "\u001b[33m WARNING: The script huggingface-cli is installed in '/home/qblocks/.local/bin' which is not on PATH.\n",
19
+ " Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\n",
20
+ "\u001b[33m WARNING: The script datasets-cli is installed in '/home/qblocks/.local/bin' which is not on PATH.\n",
21
+ " Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\n",
22
+ "\u001b[33m WARNING: The scripts accelerate, accelerate-config and accelerate-launch are installed in '/home/qblocks/.local/bin' which is not on PATH.\n",
23
+ " Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\n",
24
+ "\u001b[31mERROR: torchaudio 0.10.1+rocm4.1 has requirement torch==1.10.1, but you'll have torch 2.0.0 which is incompatible.\u001b[0m\n",
25
+ "\u001b[31mERROR: torchvision 0.11.2+cu111 has requirement torch==1.10.1, but you'll have torch 2.0.0 which is incompatible.\u001b[0m\n",
26
+ "\u001b[33m WARNING: The script transformers-cli is installed in '/home/qblocks/.local/bin' which is not on PATH.\n",
27
+ " Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\n",
28
+ "\u001b[33m WARNING: The script isympy is installed in '/home/qblocks/.local/bin' which is not on PATH.\n",
29
+ " Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\n",
30
+ "\u001b[33m WARNING: The scripts cmake, cpack and ctest are installed in '/home/qblocks/.local/bin' which is not on PATH.\n",
31
+ " Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\n",
32
+ "\u001b[33m WARNING: The script lit is installed in '/home/qblocks/.local/bin' which is not on PATH.\n",
33
+ " Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\n",
34
+ "\u001b[33m WARNING: The scripts convert-caffe2-to-onnx, convert-onnx-to-caffe2 and torchrun are installed in '/home/qblocks/.local/bin' which is not on PATH.\n",
35
+ " Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\u001b[0m\n"
36
+ ]
37
+ }
38
+ ],
39
+ "source": [
40
+ "!pip install -q bitsandbytes datasets accelerate loralib\n",
41
+ "!pip install -q git+https://github.com/huggingface/transformers.git@main git+https://github.com/huggingface/peft.git"
42
+ ]
43
+ },
44
+ {
45
+ "cell_type": "code",
46
+ "execution_count": 8,
47
+ "id": "d35008ce-0d55-4f74-9eb9-c9dcd392a4ce",
48
+ "metadata": {
49
+ "tags": []
50
+ },
51
+ "outputs": [],
52
+ "source": [
53
+ "import os\n",
54
+ "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
55
+ "import torch\n",
56
+ "import torch.nn as nn\n",
57
+ "import bitsandbytes as bnb\n",
58
+ "from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM\n",
59
+ "\n",
60
+ "\n",
61
+ "tokenizer = AutoTokenizer.from_pretrained(\"bigscience/bloom-3b\")\n",
62
+ "tokenizer.pad_token = tokenizer.eos_token"
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "code",
67
+ "execution_count": 2,
68
+ "id": "0efc3e69-f796-46cf-8ee8-52d72f9f653e",
69
+ "metadata": {
70
+ "scrolled": true,
71
+ "tags": []
72
+ },
73
+ "outputs": [],
74
+ "source": [
75
+ "import transformers\n",
76
+ "from datasets import load_dataset\n",
77
+ "from datasets import interleave_datasets\n",
78
+ "data_as = load_dataset(\"aashay96/indic_language_corpus\",data_files=[\"indic_dataset_extracted/data/as/as.txt\"],split='train',streaming=True)\n",
79
+ "data_bn = load_dataset(\"aashay96/indic_language_corpus\",data_files=[\"indic_dataset_extracted/data/bn/bn.txt\"],split='train',streaming=True)\n",
80
+ "data_gu = load_dataset(\"aashay96/indic_language_corpus\",data_files=[\"indic_dataset_extracted/data/gu/gu.txt\"],split='train',streaming=True)\n",
81
+ "data_hi = load_dataset(\"aashay96/indic_language_corpus\",data_files=[\"indic_dataset_extracted/data/hi/hi.txt\"],split='train',streaming=True)\n",
82
+ "data_kn = load_dataset(\"aashay96/indic_language_corpus\",data_files=[\"indic_dataset_extracted/data/kn/kn.txt\"],split='train',streaming=True)\n",
83
+ "data_ml = load_dataset(\"aashay96/indic_language_corpus\",data_files=[\"indic_dataset_extracted/data/ml/ml.txt\"],split='train',streaming=True)\n",
84
+ "data_mr = load_dataset(\"aashay96/indic_language_corpus\",data_files=[\"indic_dataset_extracted/data/mr/mr.txt\"],split='train',streaming=True)\n",
85
+ "data_or = load_dataset(\"aashay96/indic_language_corpus\",data_files=[\"indic_dataset_extracted/data/or/or.txt\"],split='train',streaming=True)\n",
86
+ "data_pa = load_dataset(\"aashay96/indic_language_corpus\",data_files=[\"indic_dataset_extracted/data/pa/pa.txt\"],split='train',streaming=True)\n",
87
+ "data_ta = load_dataset(\"aashay96/indic_language_corpus\",data_files=[\"indic_dataset_extracted/data/ta/ta.txt\"],split='train',streaming=True)\n",
88
+ "data_te = load_dataset(\"aashay96/indic_language_corpus\",data_files=[\"indic_dataset_extracted/data/te/te.txt\"],split='train',streaming=True)\n",
89
+ "\n",
90
+ "multilingual_dataset = interleave_datasets([data_as, data_bn,data_gu,data_hi,data_kn,data_ml,data_mr,data_or,data_pa,data_ta,data_te])\n",
91
+ "\n",
92
+ "#data_en = load_dataset(\"aashay96/indic_language_corpus\",data_files=[\"indic_dataset_extracted/data/bn/en.txt\"],streaming=True)\n"
93
+ ]
94
+ },
95
+ {
96
+ "cell_type": "code",
97
+ "execution_count": 10,
98
+ "id": "f61461ed-e91e-45e4-b1cd-c31cf15a6d2d",
99
+ "metadata": {
100
+ "tags": []
101
+ },
102
+ "outputs": [],
103
+ "source": [
104
+ "multilingual_dataset = multilingual_dataset.map(lambda samples: tokenizer(samples['text'],truncation=True,max_length=1024,padding=True), batched=True)\n",
105
+ "#data.push_to_hub('aashay96/indic_complete_tokenised')"
106
+ ]
107
+ },
108
+ {
109
+ "cell_type": "code",
110
+ "execution_count": 3,
111
+ "id": "b8ed6593-d80c-4fdb-82e7-7b56b2bbc2c2",
112
+ "metadata": {
113
+ "scrolled": true,
114
+ "tags": []
115
+ },
116
+ "outputs": [
117
+ {
118
+ "name": "stderr",
119
+ "output_type": "stream",
120
+ "text": [
121
+ "Overriding torch_dtype=None with `torch_dtype=torch.float16` due to requirements of `bitsandbytes` to enable model loading in mixed int8. Either pass torch_dtype=torch.float16 or don't pass this argument at all to remove this warning.\n"
122
+ ]
123
+ }
124
+ ],
125
+ "source": [
126
+ "model = AutoModelForCausalLM.from_pretrained(\n",
127
+ " \"bigscience/bloom-3b\", \n",
128
+ " load_in_8bit=True, \n",
129
+ " device_map='auto',\n",
130
+ ")\n"
131
+ ]
132
+ },
133
+ {
134
+ "cell_type": "code",
135
+ "execution_count": 6,
136
+ "id": "6c4d2f2e-da71-42bc-a877-d4e236701f84",
137
+ "metadata": {
138
+ "tags": []
139
+ },
140
+ "outputs": [
141
+ {
142
+ "data": {
143
+ "text/plain": [
144
+ "BloomForCausalLM(\n",
145
+ " (transformer): BloomModel(\n",
146
+ " (word_embeddings): Embedding(250880, 2560)\n",
147
+ " (word_embeddings_layernorm): LayerNorm((2560,), eps=1e-05, elementwise_affine=True)\n",
148
+ " (h): ModuleList(\n",
149
+ " (0-29): 30 x BloomBlock(\n",
150
+ " (input_layernorm): LayerNorm((2560,), eps=1e-05, elementwise_affine=True)\n",
151
+ " (self_attention): BloomAttention(\n",
152
+ " (query_key_value): Linear8bitLt(in_features=2560, out_features=7680, bias=True)\n",
153
+ " (dense): Linear8bitLt(in_features=2560, out_features=2560, bias=True)\n",
154
+ " (attention_dropout): Dropout(p=0.0, inplace=False)\n",
155
+ " )\n",
156
+ " (post_attention_layernorm): LayerNorm((2560,), eps=1e-05, elementwise_affine=True)\n",
157
+ " (mlp): BloomMLP(\n",
158
+ " (dense_h_to_4h): Linear8bitLt(in_features=2560, out_features=10240, bias=True)\n",
159
+ " (gelu_impl): BloomGelu()\n",
160
+ " (dense_4h_to_h): Linear8bitLt(in_features=10240, out_features=2560, bias=True)\n",
161
+ " )\n",
162
+ " )\n",
163
+ " )\n",
164
+ " (ln_f): LayerNorm((2560,), eps=1e-05, elementwise_affine=True)\n",
165
+ " )\n",
166
+ " (lm_head): Linear(in_features=2560, out_features=250880, bias=False)\n",
167
+ ")"
168
+ ]
169
+ },
170
+ "execution_count": 6,
171
+ "metadata": {},
172
+ "output_type": "execute_result"
173
+ }
174
+ ],
175
+ "source": [
176
+ "model"
177
+ ]
178
+ },
179
+ {
180
+ "cell_type": "code",
181
+ "execution_count": 4,
182
+ "id": "90340bb5-8a3a-414a-8b5b-8cf897918381",
183
+ "metadata": {
184
+ "tags": []
185
+ },
186
+ "outputs": [],
187
+ "source": [
188
+ "for param in model.parameters():\n",
189
+ " param.requires_grad = False # freeze the model - train adapters later\n",
190
+ " if param.ndim == 1:\n",
191
+ " # cast the small parameters (e.g. layernorm) to fp32 for stability\n",
192
+ " param.data = param.data.to(torch.float32)\n",
193
+ "\n",
194
+ "model.gradient_checkpointing_enable() # reduce number of stored activations\n",
195
+ "model.enable_input_require_grads()\n",
196
+ "\n",
197
+ "class CastOutputToFloat(nn.Sequential):\n",
198
+ " def forward(self, x): return super().forward(x).to(torch.float32)\n",
199
+ "model.lm_head = CastOutputToFloat(model.lm_head)"
200
+ ]
201
+ },
202
+ {
203
+ "cell_type": "code",
204
+ "execution_count": 7,
205
+ "id": "963eccdd-a57c-4970-b86c-bf446cc0243a",
206
+ "metadata": {
207
+ "tags": []
208
+ },
209
+ "outputs": [
210
+ {
211
+ "data": {
212
+ "text/plain": [
213
+ "BloomForCausalLM(\n",
214
+ " (transformer): BloomModel(\n",
215
+ " (word_embeddings): Embedding(250880, 2560)\n",
216
+ " (word_embeddings_layernorm): LayerNorm((2560,), eps=1e-05, elementwise_affine=True)\n",
217
+ " (h): ModuleList(\n",
218
+ " (0-29): 30 x BloomBlock(\n",
219
+ " (input_layernorm): LayerNorm((2560,), eps=1e-05, elementwise_affine=True)\n",
220
+ " (self_attention): BloomAttention(\n",
221
+ " (query_key_value): Linear8bitLt(in_features=2560, out_features=7680, bias=True)\n",
222
+ " (dense): Linear8bitLt(in_features=2560, out_features=2560, bias=True)\n",
223
+ " (attention_dropout): Dropout(p=0.0, inplace=False)\n",
224
+ " )\n",
225
+ " (post_attention_layernorm): LayerNorm((2560,), eps=1e-05, elementwise_affine=True)\n",
226
+ " (mlp): BloomMLP(\n",
227
+ " (dense_h_to_4h): Linear8bitLt(in_features=2560, out_features=10240, bias=True)\n",
228
+ " (gelu_impl): BloomGelu()\n",
229
+ " (dense_4h_to_h): Linear8bitLt(in_features=10240, out_features=2560, bias=True)\n",
230
+ " )\n",
231
+ " )\n",
232
+ " )\n",
233
+ " (ln_f): LayerNorm((2560,), eps=1e-05, elementwise_affine=True)\n",
234
+ " )\n",
235
+ " (lm_head): CastOutputToFloat(\n",
236
+ " (0): Linear(in_features=2560, out_features=250880, bias=False)\n",
237
+ " )\n",
238
+ ")"
239
+ ]
240
+ },
241
+ "execution_count": 7,
242
+ "metadata": {},
243
+ "output_type": "execute_result"
244
+ }
245
+ ],
246
+ "source": [
247
+ "model"
248
+ ]
249
+ },
250
+ {
251
+ "cell_type": "code",
252
+ "execution_count": 5,
253
+ "id": "0de04fc8-1541-445d-8a6c-528862e18f69",
254
+ "metadata": {
255
+ "tags": []
256
+ },
257
+ "outputs": [],
258
+ "source": [
259
+ "def print_trainable_parameters(model):\n",
260
+ " \"\"\"\n",
261
+ " Prints the number of trainable parameters in the model.\n",
262
+ " \"\"\"\n",
263
+ " trainable_params = 0\n",
264
+ " all_param = 0\n",
265
+ " for _, param in model.named_parameters():\n",
266
+ " all_param += param.numel()\n",
267
+ " if param.requires_grad:\n",
268
+ " trainable_params += param.numel()\n",
269
+ " print(\n",
270
+ " f\"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}\"\n",
271
+ " )"
272
+ ]
273
+ },
274
+ {
275
+ "cell_type": "code",
276
+ "execution_count": 6,
277
+ "id": "ac1c4734-530a-4c9c-a055-8c8d3f46b169",
278
+ "metadata": {
279
+ "tags": []
280
+ },
281
+ "outputs": [
282
+ {
283
+ "name": "stdout",
284
+ "output_type": "stream",
285
+ "text": [
286
+ "trainable params: 4915200 || all params: 3007472640 || trainable%: 0.1634329082375293\n"
287
+ ]
288
+ }
289
+ ],
290
+ "source": [
291
+ "from peft import LoraConfig, get_peft_model \n",
292
+ "\n",
293
+ "config = LoraConfig(\n",
294
+ " r=16,\n",
295
+ " lora_alpha=32,\n",
296
+ " lora_dropout=0.05,\n",
297
+ " bias=\"none\",\n",
298
+ " task_type=\"CAUSAL_LM\"\n",
299
+ ")\n",
300
+ "\n",
301
+ "model = get_peft_model(model, config)\n",
302
+ "print_trainable_parameters(model)"
303
+ ]
304
+ },
305
+ {
306
+ "cell_type": "code",
307
+ "execution_count": 8,
308
+ "id": "683c0239-9384-4d80-b2d0-64738e9c53f5",
309
+ "metadata": {
310
+ "tags": []
311
+ },
312
+ "outputs": [
313
+ {
314
+ "data": {
315
+ "text/plain": [
316
+ "{'train': <datasets.iterable_dataset.IterableDataset at 0x7ff380f5fcd0>}"
317
+ ]
318
+ },
319
+ "execution_count": 8,
320
+ "metadata": {},
321
+ "output_type": "execute_result"
322
+ }
323
+ ],
324
+ "source": [
325
+ "data"
326
+ ]
327
+ },
328
+ {
329
+ "cell_type": "code",
330
+ "execution_count": 12,
331
+ "id": "ab933bdc-8d59-44e3-b210-a5c517660ef3",
332
+ "metadata": {
333
+ "tags": []
334
+ },
335
+ "outputs": [
336
+ {
337
+ "data": {
338
+ "text/plain": [
339
+ "<datasets.iterable_dataset.IterableDataset at 0x7f0e30bce340>"
340
+ ]
341
+ },
342
+ "execution_count": 12,
343
+ "metadata": {},
344
+ "output_type": "execute_result"
345
+ }
346
+ ],
347
+ "source": []
348
+ },
349
+ {
350
+ "cell_type": "code",
351
+ "execution_count": 8,
352
+ "id": "edabb62f-d5b3-4d5a-9220-751b940e0a5b",
353
+ "metadata": {
354
+ "tags": []
355
+ },
356
+ "outputs": [
357
+ {
358
+ "name": "stderr",
359
+ "output_type": "stream",
360
+ "text": [
361
+ "Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving.\n",
362
+ "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33maashay96\u001b[0m (\u001b[33mindic-lm\u001b[0m). Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n"
363
+ ]
364
+ },
365
+ {
366
+ "data": {
367
+ "text/plain": [
368
+ "True"
369
+ ]
370
+ },
371
+ "execution_count": 8,
372
+ "metadata": {},
373
+ "output_type": "execute_result"
374
+ }
375
+ ],
376
+ "source": [
377
+ "!pip install wandb\n",
378
+ "import wandb\n",
379
+ "wandb.login()\n",
380
+ "\n",
381
+ "\n"
382
+ ]
383
+ },
384
+ {
385
+ "cell_type": "code",
386
+ "execution_count": null,
387
+ "id": "0ce63418-3aba-4549-8a50-922a5cf10cb1",
388
+ "metadata": {
389
+ "scrolled": true,
390
+ "tags": []
391
+ },
392
+ "outputs": [],
393
+ "source": [
394
+ "import transformers\n",
395
+ "from datasets import load_dataset\n",
396
+ "#data = load_dataset(\"Abirate/english_quotes\")\n",
397
+ "#data = data.map(lambda samples: tokenizer(samples['quote']), batched=True)\n",
398
+ "\n",
399
+ "trainer = transformers.Trainer(\n",
400
+ " model=model, \n",
401
+ " train_dataset=multilingual_dataset,\n",
402
+ " args=transformers.TrainingArguments(\n",
403
+ " per_device_train_batch_size=4, \n",
404
+ " gradient_accumulation_steps=16,\n",
405
+ " #gradient_checkpointing=True,\n",
406
+ " warmup_steps=100, \n",
407
+ " save_steps=1000,\n",
408
+ " #num_train_epochs=3,\n",
409
+ " max_steps=20000, \n",
410
+ " learning_rate=3e-4, \n",
411
+ " fp16=True,\n",
412
+ " logging_steps=1, \n",
413
+ " output_dir='outputs',report_to='wandb'\n",
414
+ " ),\n",
415
+ " data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False)\n",
416
+ ")\n",
417
+ "model.config.use_cache = False # silence the warnings. Please re-enable for inference!\n",
418
+ "trainer.train()"
419
+ ]
420
+ },
421
+ {
422
+ "cell_type": "code",
423
+ "execution_count": null,
424
+ "id": "0ceeb7a2-7f94-4153-96b0-af19acf90bdb",
425
+ "metadata": {
426
+ "tags": []
427
+ },
428
+ "outputs": [],
429
+ "source": [
430
+ "model.push_to_hub(\"aashay96/indic-BloomLM\", use_auth_token=True)"
431
+ ]
432
+ },
433
+ {
434
+ "cell_type": "code",
435
+ "execution_count": 11,
436
+ "id": "15eb4b53-1354-4729-9cb7-872b057b11be",
437
+ "metadata": {
438
+ "tags": []
439
+ },
440
+ "outputs": [
441
+ {
442
+ "name": "stdout",
443
+ "output_type": "stream",
444
+ "text": [
445
+ "\n",
446
+ "\n",
447
+ " आप कैसे हैं? आप अपने जीवन में क्या कर रहे हैं?\n"
448
+ ]
449
+ },
450
+ {
451
+ "name": "stderr",
452
+ "output_type": "stream",
453
+ "text": [
454
+ "wandb: Waiting for W&B process to finish... (success).\n"
455
+ ]
456
+ }
457
+ ],
458
+ "source": [
459
+ "import torch\n",
460
+ "from peft import PeftModel, PeftConfig\n",
461
+ "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
462
+ "\n",
463
+ "peft_model_id = \"aashay96/indic-BloomLM\"\n",
464
+ "config = PeftConfig.from_pretrained(peft_model_id)\n",
465
+ "model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, return_dict=True, load_in_8bit=True, device_map='auto')\n",
466
+ "tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)\n",
467
+ "\n",
468
+ "# Load the Lora model\n",
469
+ "model = PeftModel.from_pretrained(model, peft_model_id)\n",
470
+ "\n",
471
+ "\n",
472
+ "\n",
473
+ "batch = tokenizer(\"आप कैसे हैं\", return_tensors='pt')\n",
474
+ "\n",
475
+ "with torch.cuda.amp.autocast():\n",
476
+ " output_tokens = model.generate(**batch, max_new_tokens=10)\n",
477
+ "\n",
478
+ "print('\\n\\n', tokenizer.decode(output_tokens[0], skip_special_tokens=True))"
479
+ ]
480
+ }
481
+ ],
482
+ "metadata": {
483
+ "kernelspec": {
484
+ "display_name": "Python 3 (ipykernel)",
485
+ "language": "python",
486
+ "name": "python3"
487
+ },
488
+ "language_info": {
489
+ "codemirror_mode": {
490
+ "name": "ipython",
491
+ "version": 3
492
+ },
493
+ "file_extension": ".py",
494
+ "mimetype": "text/x-python",
495
+ "name": "python",
496
+ "nbconvert_exporter": "python",
497
+ "pygments_lexer": "ipython3",
498
+ "version": "3.8.10"
499
+ }
500
+ },
501
+ "nbformat": 4,
502
+ "nbformat_minor": 5
503
+ }