andreshere commited on
Commit
c2552ff
1 Parent(s): 1488955

andreshere/llama-2-7b-mental-health-counseler

Browse files
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: llama2
3
+ library_name: peft
4
+ tags:
5
+ - generated_from_trainer
6
+ base_model: meta-llama/Llama-2-7b-chat-hf
7
+ model-index:
8
+ - name: meta-llama/Llama-2-7b-chat-hf
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # meta-llama/Llama-2-7b-chat-hf
16
+
17
+ This model is a fine-tuned version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on an unknown dataset.
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 0.0001
37
+ - train_batch_size: 8
38
+ - eval_batch_size: 8
39
+ - seed: 42
40
+ - gradient_accumulation_steps: 2
41
+ - total_train_batch_size: 16
42
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
+ - lr_scheduler_type: linear
44
+ - num_epochs: 4
45
+
46
+ ### Training results
47
+
48
+
49
+
50
+ ### Framework versions
51
+
52
+ - PEFT 0.11.1
53
+ - Transformers 4.41.2
54
+ - Pytorch 2.3.1+cu121
55
+ - Datasets 2.19.2
56
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "LlamaForCausalLM",
5
+ "parent_library": "transformers.models.llama.modeling_llama"
6
+ },
7
+ "base_model_name_or_path": "meta-llama/Llama-2-7b-chat-hf",
8
+ "bias": "none",
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_dropout": 0.1,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "v_proj",
27
+ "k_proj",
28
+ "q_proj",
29
+ "o_proj"
30
+ ],
31
+ "task_type": null,
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c4a176cae3bf7082f2b8750daede32c4ace2e530a84ad6d5172d6723c1bae68
3
+ size 134252336
fine-tuning.llama2.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments
3
+ from datasets import load_dataset, Dataset
4
+ from peft import LoraConfig, get_peft_model
5
+ from transformers import BitsAndBytesConfig, DataCollatorForLanguageModeling
6
+ import evaluate
7
+ import pandas as pd
8
+ import time
9
+
10
+ # Quantization configuration
11
+ bnb_config = BitsAndBytesConfig(
12
+ load_in_4bit=True,
13
+ bnb_4bit_quant_type="nf4",
14
+ bnb_4bit_use_double_quant=True,
15
+ bnb_4bit_compute_dtype=torch.float16
16
+ )
17
+
18
+ # Load model and tokenizer
19
+ model_name = "meta-llama/Llama-2-7b-chat-hf"
20
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=True)
21
+
22
+ # Set the padding token
23
+ tokenizer.pad_token = tokenizer.eos_token
24
+
25
+ model = AutoModelForCausalLM.from_pretrained(
26
+ model_name,
27
+ quantization_config=bnb_config,
28
+ device_map="auto",
29
+ use_auth_token=True
30
+ )
31
+
32
+ print(f"Model {model_name} loaded and quantized in 4 bits")
33
+
34
+ # Load dataset
35
+ dataset = load_dataset("andreshere/counsel_chat", use_auth_token=True)
36
+ print(f"Dataset loaded")
37
+
38
+ # Configure LoRA
39
+ lora_config = LoraConfig(
40
+ r=32,
41
+ lora_alpha=32,
42
+ target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
43
+ lora_dropout=0.1
44
+ )
45
+
46
+ model = get_peft_model(model, lora_config)
47
+
48
+ # Adjust dataset without weights
49
+ def preprocess_function(examples):
50
+ inputs = examples['Context']
51
+ targets = examples['Response']
52
+ model_inputs = tokenizer(inputs, max_length=512, truncation=True, padding='max_length')
53
+ labels = tokenizer(targets, max_length=512, truncation=True, padding='max_length').input_ids
54
+
55
+ model_inputs["labels"] = labels
56
+ model_inputs["Context"] = inputs
57
+ model_inputs["Response"] = targets
58
+ return model_inputs
59
+
60
+ tokenized_dataset = dataset.map(preprocess_function, batched=True)
61
+
62
+ train_dataset = tokenized_dataset['train']
63
+ eval_dataset = tokenized_dataset['test']
64
+
65
+ data_collator = DataCollatorForLanguageModeling(
66
+ tokenizer=tokenizer,
67
+ mlm=False,
68
+ )
69
+
70
+ # Prepare training arguments
71
+ training_args = TrainingArguments(
72
+ output_dir='./',
73
+ do_train=True,
74
+ do_eval=True,
75
+ eval_strategy='epoch', # Update to use 'eval_strategy' instead of 'evaluation_strategy'
76
+ gradient_accumulation_steps=2,
77
+ auto_find_batch_size=True,
78
+ weight_decay=0.01,
79
+ num_train_epochs=4,
80
+ learning_rate=1e-4,
81
+ logging_dir='./logs',
82
+ logging_strategy="steps",
83
+ logging_steps=10,
84
+ save_strategy="epoch",
85
+ save_total_limit=2,
86
+ save_safetensors=True,
87
+ eval_steps=10,
88
+ report_to="tensorboard",
89
+ hub_strategy="every_save"
90
+ )
91
+
92
+ # Initialize trainer
93
+ trainer = Trainer(
94
+ model=model,
95
+ args=training_args,
96
+ train_dataset=train_dataset,
97
+ eval_dataset=eval_dataset,
98
+ data_collator=data_collator
99
+ )
100
+
101
+ # Train the model
102
+ trainer.train()
103
+
104
+ # Save the entire model
105
+ trainer.save_model()
106
+
107
+ # Push the entire model to the Hugging Face Hub
108
+ trainer.push_to_hub("andreshere/llama-2-7b-mental-health-counseler")
109
+
110
+ # Load evaluation metrics
111
+ rouge = evaluate.load('rouge')
112
+ bleu = evaluate.load('bleu')
113
+ bertscore = evaluate.load('bertscore')
114
+ meteor = evaluate.load('meteor')
115
+
116
+ # Function to compute metrics and log compute time
117
+ def compute_metrics_and_log_time(pred):
118
+ labels_ids = pred.label_ids
119
+ pred_ids = pred.predictions
120
+ decoded_preds = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
121
+ decoded_labels = tokenizer.batch_decode(labels_ids, skip_special_tokens=True)
122
+
123
+ # Compute metrics
124
+ rouge_result = rouge.compute(predictions=decoded_preds, references=decoded_labels)
125
+ bleu_result = bleu.compute(predictions=decoded_preds, references=decoded_labels)
126
+ bertscore_result = bertscore.compute(predictions=decoded_preds, references=decoded_labels)
127
+ meteor_result = meteor.compute(predictions=decoded_preds, references=decoded_labels)
128
+
129
+ result = {
130
+ "rouge": rouge_result,
131
+ "bleu": bleu_result,
132
+ "bertscore": bertscore_result,
133
+ "meteor": meteor_result,
134
+ }
135
+
136
+ # Log compute time for each response
137
+ compute_times = []
138
+ contexts = []
139
+ original_responses = []
140
+ model_responses = []
141
+
142
+ for i, context in enumerate(pred.features['Context']):
143
+ start_time = time.time()
144
+ model_response = decoded_preds[i]
145
+ end_time = time.time()
146
+ compute_time = end_time - start_time
147
+
148
+ compute_times.append(compute_time)
149
+ contexts.append(context)
150
+ original_responses.append(decoded_labels[i])
151
+ model_responses.append(model_response)
152
+
153
+ # Save the log to a DataFrame
154
+ log_df = pd.DataFrame({
155
+ "Context": contexts,
156
+ "Original Response": original_responses,
157
+ "Model Response": model_responses,
158
+ "Compute Time": compute_times
159
+ })
160
+
161
+ log_df.to_csv("response_log.csv", index=False)
162
+
163
+ return result
164
+
165
+ # Evaluate model
166
+ metrics = trainer.evaluate(eval_dataset=eval_dataset, metric_key_prefix="eval", compute_metrics=compute_metrics_and_log_time)
167
+ print(metrics)
168
+
169
+ # Export metrics to CSV
170
+ metrics_df = pd.DataFrame(metrics, index=[0])
171
+ metrics_df.to_csv("fine-tuned-metrics.csv", index=False)
172
+
logs/events.out.tfevents.1717866092.andrestfm-002 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67d954ad275921629b859935966641f16a0078e335aead3309a02eca1437d621
3
+ size 5228
logs/events.out.tfevents.1717866336.andrestfm-002 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68506358bcf2fd8e53cfc50212c5fda7c76387d0b1d707e503c2e6ff8178151c
3
+ size 5225
logs/events.out.tfevents.1717866484.andrestfm-002 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d613f4d97994c6550d1accdc11b1569220f968003989c6287f53e5ceeff5e25d
3
+ size 5225
logs/events.out.tfevents.1717866647.andrestfm-002 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db49236353ff72105b600b66a2fc42fbcc344995c42685eeb7a610ce292e473e
3
+ size 5225
logs/events.out.tfevents.1717867034.andrestfm-002 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93e265c3a520cd761e6c4ca063a4ca3eb6f26eb3a92f999dbbf46855d4e5abb0
3
+ size 5225
logs/events.out.tfevents.1717867246.andrestfm-002 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7de59758d2dd303e0c7cc39057fc0a543cd447901da8d466303fc5c1318f609
3
+ size 17458
logs/events.out.tfevents.1717867605.andrestfm-002 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4b135c9aecab4e959747dfb3a192335021d6bb8a08bc4057c378de7eac53149
3
+ size 48654
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19fd8c9ecc27cc925d0b88edf38f4a94e07e4d3d93eed8d20ce5ccf43119b6f6
3
+ size 5048