ngxson commited on
Commit
6c8c407
1 Parent(s): 83f7dbb

guide for lora

Browse files
Files changed (3) hide show
  1. README.md +8 -0
  2. finetune.ipynb +40 -22
  3. run_finetuned.ipynb +127 -0
README.md CHANGED
@@ -9,3 +9,11 @@ This model is [ModelCloud/tinyllama-15M-stories](https://huggingface.co/ModelClo
9
  The model is used for testing, not intended to be used in production (unless your product is some kind of bedtime story teller)
10
 
11
  Weight of router is initialized randomly
 
 
 
 
 
 
 
 
 
9
  The model is used for testing, not intended to be used in production (unless your product is some kind of bedtime story teller)
10
 
11
  Weight of router is initialized randomly
12
+
13
+ ## shakespeare LoRA adapter
14
+
15
+ A LoRA adapter trained on first 100 paragraphs of shakespeare can be found inside `moe_shakespeare15M`
16
+
17
+ With input: `Look in thy glass`
18
+ - Original model generates: `Look in thy glass was a little girl. She was only three years old and she was three years old. She was`
19
+ - LoRA adapter generates: `Look in thy glass in love of the eye: That's when when the eye see thy on the sun'`
finetune.ipynb CHANGED
@@ -13,19 +13,37 @@
13
  "model_path = os.getcwd()\n",
14
  "print(model_path)\n",
15
  "tokenizer = AutoTokenizer.from_pretrained(model_path, legacy=False)\n",
16
- "model = AutoModelForCausalLM.from_pretrained(model_path, use_safetensors=True, local_files_only=True)"
 
17
  ]
18
  },
19
  {
20
  "cell_type": "code",
21
- "execution_count": null,
22
  "id": "93e9ec6a-4a57-484f-a1a5-ecb6674e8f77",
23
  "metadata": {},
24
- "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  "source": [
26
  "#inputs = tokenizer('', return_tensors=\"pt\")\n",
27
  "#outputs = model.generate(inputs['input_ids'], max_new_tokens=20, temperature=0)\n",
28
- "#print(tokenizer.decode(outputs[0], skip_special_tokens=True))"
 
 
29
  ]
30
  },
31
  {
@@ -45,11 +63,11 @@
45
  "metadata": {},
46
  "outputs": [],
47
  "source": [
48
- "from peft import LoraConfig\n",
49
  "\n",
50
- "config = LoraConfig(\n",
51
- " r=32,\n",
52
- " lora_alpha=64,\n",
53
  " target_modules=[\n",
54
  " \"q_proj\",\n",
55
  " \"k_proj\",\n",
@@ -65,6 +83,9 @@
65
  " task_type=\"CAUSAL_LM\",\n",
66
  ")\n",
67
  "\n",
 
 
 
68
  "#print(model)"
69
  ]
70
  },
@@ -107,31 +128,28 @@
107
  "run_name = project\n",
108
  "output_dir = \"./\" + run_name\n",
109
  "\n",
 
 
110
  "checkpointing_args = {\"use_reentrant\": False}\n",
111
  "trainer = transformers.Trainer(\n",
112
  " model=model,\n",
113
  " train_dataset=tokenized_train_dataset,\n",
114
  " args=transformers.TrainingArguments(\n",
115
  " output_dir=output_dir,\n",
116
- " warmup_steps=10,\n",
117
- " per_device_train_batch_size=2,\n",
118
- " gradient_accumulation_steps=1,\n",
119
  " gradient_checkpointing=True,\n",
120
- " max_steps=3000,\n",
121
  " learning_rate=2.5e-5, # Want a small lr for finetuning\n",
122
  " # fp16=True, \n",
123
  " optim=\"adamw_torch\",\n",
124
- " # logging_steps=25, # When to start reporting loss\n",
125
- " # logging_dir=\"./logs\", # Directory for storing logs\n",
126
- " save_strategy=\"steps\", # Save the model checkpoint every logging step\n",
127
- " save_steps=50, # Save checkpoints every 50 steps\n",
128
- " logging_steps=100,\n",
129
  " save_total_limit=4,\n",
130
- " # evaluation_strategy=\"steps\", # Evaluate the model every logging step\n",
131
- " # eval_steps=25, # Evaluate and save checkpoints every 50 steps\n",
132
- " # do_eval=True, # Perform evaluation at the end of training\n",
133
- " report_to=\"none\", # Comment this out if you don't want to use weights & baises\n",
134
- " run_name=f\"{run_name}-{datetime.now().strftime('%Y-%m-%d-%H-%M')}\" # Name of the W&B run (optional)\n",
135
  " ),\n",
136
  " data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),\n",
137
  ")\n",
 
13
  "model_path = os.getcwd()\n",
14
  "print(model_path)\n",
15
  "tokenizer = AutoTokenizer.from_pretrained(model_path, legacy=False)\n",
16
+ "model = AutoModelForCausalLM.from_pretrained(model_path, use_safetensors=True, local_files_only=True)\n",
17
+ "tokenizer.pad_token = tokenizer.eos_token"
18
  ]
19
  },
20
  {
21
  "cell_type": "code",
22
+ "execution_count": 7,
23
  "id": "93e9ec6a-4a57-484f-a1a5-ecb6674e8f77",
24
  "metadata": {},
25
+ "outputs": [
26
+ {
27
+ "data": {
28
+ "text/plain": [
29
+ "LlamaTokenizerFast(name_or_path='/var/home/ngxson/jupyter/stories-15M', vocab_size=32000, model_max_length=2048, is_fast=True, padding_side='left', truncation_side='right', special_tokens={'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>'}, clean_up_tokenization_spaces=False), added_tokens_decoder={\n",
30
+ "\t0: AddedToken(\"<unk>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n",
31
+ "\t1: AddedToken(\"<s>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n",
32
+ "\t2: AddedToken(\"</s>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n",
33
+ "}"
34
+ ]
35
+ },
36
+ "execution_count": 7,
37
+ "metadata": {},
38
+ "output_type": "execute_result"
39
+ }
40
+ ],
41
  "source": [
42
  "#inputs = tokenizer('', return_tensors=\"pt\")\n",
43
  "#outputs = model.generate(inputs['input_ids'], max_new_tokens=20, temperature=0)\n",
44
+ "#print(tokenizer.decode(outputs[0], skip_special_tokens=True))\n",
45
+ "\n",
46
+ "tokenizer"
47
  ]
48
  },
49
  {
 
63
  "metadata": {},
64
  "outputs": [],
65
  "source": [
66
+ "from peft import LoraConfig, get_peft_model\n",
67
  "\n",
68
+ "peft_config = LoraConfig(\n",
69
+ " r=64,\n",
70
+ " lora_alpha=128,\n",
71
  " target_modules=[\n",
72
  " \"q_proj\",\n",
73
  " \"k_proj\",\n",
 
83
  " task_type=\"CAUSAL_LM\",\n",
84
  ")\n",
85
  "\n",
86
+ "model = get_peft_model(model, peft_config)\n",
87
+ "model.print_trainable_parameters()\n",
88
+ "\n",
89
  "#print(model)"
90
  ]
91
  },
 
128
  "run_name = project\n",
129
  "output_dir = \"./\" + run_name\n",
130
  "\n",
131
+ "tokenizer.pad_token = tokenizer.eos_token\n",
132
+ "\n",
133
  "checkpointing_args = {\"use_reentrant\": False}\n",
134
  "trainer = transformers.Trainer(\n",
135
  " model=model,\n",
136
  " train_dataset=tokenized_train_dataset,\n",
137
  " args=transformers.TrainingArguments(\n",
138
  " output_dir=output_dir,\n",
139
+ " warmup_steps=100,\n",
140
+ " per_device_train_batch_size=50,\n",
141
+ " gradient_accumulation_steps=5,\n",
142
  " gradient_checkpointing=True,\n",
143
+ " max_steps=500,\n",
144
  " learning_rate=2.5e-5, # Want a small lr for finetuning\n",
145
  " # fp16=True, \n",
146
  " optim=\"adamw_torch\",\n",
147
+ " save_strategy=\"steps\",\n",
148
+ " save_steps=100,\n",
149
+ " logging_steps=20,\n",
 
 
150
  " save_total_limit=4,\n",
151
+ " report_to=\"none\", \n",
152
+ " run_name=f\"{run_name}-{datetime.now().strftime('%Y-%m-%d-%H-%M')}\"\n",
 
 
 
153
  " ),\n",
154
  " data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),\n",
155
  ")\n",
run_finetuned.ipynb ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "ca60092b-a133-40d5-bce7-be261eb13ba3",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "/var/home/ngxson/jupyter/stories-15M\n"
14
+ ]
15
+ }
16
+ ],
17
+ "source": [
18
+ "import os\n",
19
+ "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
20
+ "\n",
21
+ "model_path = os.getcwd()\n",
22
+ "print(model_path)\n",
23
+ "tokenizer = AutoTokenizer.from_pretrained(model_path, legacy=False)\n",
24
+ "tokenizer.pad_token = tokenizer.eos_token\n",
25
+ "model = AutoModelForCausalLM.from_pretrained(model_path, use_safetensors=True, local_files_only=True)"
26
+ ]
27
+ },
28
+ {
29
+ "cell_type": "code",
30
+ "execution_count": 2,
31
+ "id": "d8197b9a-9c94-4c14-9b89-5e16f129f71b",
32
+ "metadata": {},
33
+ "outputs": [
34
+ {
35
+ "name": "stderr",
36
+ "output_type": "stream",
37
+ "text": [
38
+ "The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
39
+ "Setting `pad_token_id` to `eos_token_id`:2 for open-end generation.\n",
40
+ "The attention mask is not set and cannot be inferred from input because pad token is same as eos token.As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n"
41
+ ]
42
+ },
43
+ {
44
+ "name": "stdout",
45
+ "output_type": "stream",
46
+ "text": [
47
+ "Look in thy glass was a little girl. She was only three years old and she was three years old. She was\n"
48
+ ]
49
+ }
50
+ ],
51
+ "source": [
52
+ "inputs = tokenizer('Look in thy glass', return_tensors=\"pt\")\n",
53
+ "outputs = model.generate(inputs['input_ids'], max_new_tokens=20)\n",
54
+ "print(tokenizer.decode(outputs[0], skip_special_tokens=True))"
55
+ ]
56
+ },
57
+ {
58
+ "cell_type": "code",
59
+ "execution_count": null,
60
+ "id": "242b314c-d702-4cc1-862e-aaf59e986527",
61
+ "metadata": {},
62
+ "outputs": [],
63
+ "source": [
64
+ "from peft import PeftModel\n",
65
+ "CHECKPOINT_PATH = 'moe_shakespeare15M/checkpoint-500'\n",
66
+ "ft_model = PeftModel.from_pretrained(model, CHECKPOINT_PATH)"
67
+ ]
68
+ },
69
+ {
70
+ "cell_type": "code",
71
+ "execution_count": 4,
72
+ "id": "a0abc08e-7e77-4efe-8e1b-465eff9672b3",
73
+ "metadata": {},
74
+ "outputs": [
75
+ {
76
+ "name": "stderr",
77
+ "output_type": "stream",
78
+ "text": [
79
+ "The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
80
+ "Setting `pad_token_id` to `eos_token_id`:2 for open-end generation.\n"
81
+ ]
82
+ },
83
+ {
84
+ "name": "stdout",
85
+ "output_type": "stream",
86
+ "text": [
87
+ "Look in thy glass in love of the eye:\n",
88
+ "That's when when the eye see thy on the sun'\n"
89
+ ]
90
+ }
91
+ ],
92
+ "source": [
93
+ "outputs = ft_model.generate(inputs['input_ids'], max_new_tokens=20)\n",
94
+ "print(tokenizer.decode(outputs[0], skip_special_tokens=True))"
95
+ ]
96
+ },
97
+ {
98
+ "cell_type": "code",
99
+ "execution_count": null,
100
+ "id": "0733e354-6b16-4c8f-a7f9-6207d75feee1",
101
+ "metadata": {},
102
+ "outputs": [],
103
+ "source": []
104
+ }
105
+ ],
106
+ "metadata": {
107
+ "kernelspec": {
108
+ "display_name": "Python 3 (ipykernel)",
109
+ "language": "python",
110
+ "name": "python3"
111
+ },
112
+ "language_info": {
113
+ "codemirror_mode": {
114
+ "name": "ipython",
115
+ "version": 3
116
+ },
117
+ "file_extension": ".py",
118
+ "mimetype": "text/x-python",
119
+ "name": "python",
120
+ "nbconvert_exporter": "python",
121
+ "pygments_lexer": "ipython3",
122
+ "version": "3.10.12"
123
+ }
124
+ },
125
+ "nbformat": 4,
126
+ "nbformat_minor": 5
127
+ }