ashioyajotham commited on
Commit
20fd975
1 Parent(s): 627120c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +181 -0
app.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ !pip install -q -U trl transformers accelerate git+https://github.com/huggingface/peft.git
2
+
3
+ !pip install -q datasets bitsandbytes einops wandb
4
+
5
+
6
+ from datasets import load_dataset
7
+
8
+ # Specify the name of the dataset
9
+ dataset_name = "yahma/alpaca-cleaned"
10
+
11
+ # Load the dataset from the specified name and select the "train" split
12
+ dataset = load_dataset(dataset_name, split="train")
13
+
14
+ # We will be loading the Falcon 7B model, applying 4bit quantization to it, and then adding LoRA adapters to the model.
15
+ import torch
16
+
17
+ from transformers import FalconForCausalLM, AutoTokenizer, BitsAndBytesConfig
18
+
19
+ # Defining the name of the Falcon model
20
+ model_name = "ybelkada/falcon-7b-sharded-bf16"
21
+
22
+ # Configuring the BitsAndBytes quantization
23
+ bnb_config = BitsAndBytesConfig(
24
+ load_in_4bit=True,
25
+ bnb_4bit_quant_type="nf4",
26
+ bnb_4bit_compute_dtype=torch.float16,
27
+ )
28
+
29
+ # Loading the Falcon model with quantization configuration
30
+ model = FalconForCausalLM.from_pretrained(
31
+ model_name,
32
+ quantization_config=bnb_config,
33
+ trust_remote_code=True
34
+ )
35
+
36
+ # Disabling cache usage in the model configuration
37
+ model.config.use_cache = False
38
+
39
+ # Load the tokenizer for the Falcon 7B model with remote code trust
40
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
41
+
42
+ # Set the padding token to be the same as the end-of-sequence token
43
+ tokenizer.pad_token = tokenizer.eos_token
44
+
45
+ # Import the necessary module for LoRA configuration
46
+ from peft import LoraConfig
47
+
48
+ # Define the parameters for LoRA configuration
49
+ lora_alpha = 16
50
+ lora_dropout = 0.1
51
+ lora_r = 64
52
+
53
+ # Create the LoRA configuration object
54
+ peft_config = LoraConfig(
55
+ lora_alpha=lora_alpha,
56
+ lora_dropout=lora_dropout,
57
+ r=lora_r,
58
+ bias="none",
59
+ task_type="CAUSAL_LM",
60
+ target_modules=[
61
+ "query_key_value",
62
+ "dense",
63
+ "dense_h_to_4h",
64
+ "dense_4h_to_h",
65
+ ]
66
+ )
67
+
68
+ from transformers import TrainingArguments
69
+ # Define the directory to save training results
70
+ output_dir = "./results"
71
+
72
+ # Set the batch size per device during training
73
+ per_device_train_batch_size = 4
74
+
75
+ # Number of steps to accumulate gradients before updating the model
76
+ gradient_accumulation_steps = 4
77
+
78
+ # Choose the optimizer type (e.g., "paged_adamw_32bit")
79
+ optim = "paged_adamw_32bit"
80
+
81
+ # Interval to save model checkpoints (every 10 steps)
82
+ save_steps = 10
83
+
84
+ # Interval to log training metrics (every 10 steps)
85
+ logging_steps = 10
86
+
87
+ # Learning rate for optimization
88
+ learning_rate = 2e-4
89
+
90
+ # Maximum gradient norm for gradient clipping
91
+ max_grad_norm = 0.3
92
+
93
+ # Maximum number of training steps
94
+ max_steps = 50
95
+
96
+ # Warmup ratio for learning rate scheduling
97
+ warmup_ratio = 0.03
98
+
99
+ # Type of learning rate scheduler (e.g., "constant")
100
+ lr_scheduler_type = "constant"
101
+
102
+ # Create a TrainingArguments object to configure the training process
103
+ training_arguments = TrainingArguments(
104
+ output_dir=output_dir,
105
+ per_device_train_batch_size=per_device_train_batch_size,
106
+ gradient_accumulation_steps=gradient_accumulation_steps,
107
+ optim=optim,
108
+ save_steps=save_steps,
109
+ logging_steps=logging_steps,
110
+ learning_rate=learning_rate,
111
+ fp16=True, # Use mixed precision training (16-bit)
112
+ max_grad_norm=max_grad_norm,
113
+ max_steps=max_steps,
114
+ warmup_ratio=warmup_ratio,
115
+ group_by_length=True,
116
+ lr_scheduler_type=lr_scheduler_type,
117
+ )
118
+
119
+
120
+ dataset = dataset.map(lambda x: {"text": x["input"]+x["output"]})
121
+
122
+ # Import the SFTTrainer from the TRL library
123
+ from trl import SFTTrainer
124
+
125
+ # Set the maximum sequence length
126
+ max_seq_length = 512
127
+
128
+ # Create a trainer instance using SFTTrainer
129
+ trainer = SFTTrainer(
130
+ model=model,
131
+ train_dataset=dataset,
132
+ peft_config=peft_config,
133
+ dataset_text_field="text",
134
+ max_seq_length=max_seq_length,
135
+ tokenizer=tokenizer,
136
+ args=training_arguments,
137
+ )
138
+
139
+
140
+ # Iterate through the named modules of the trainer's model
141
+ for name, module in trainer.model.named_modules():
142
+
143
+ # Check if the name contains "norm"
144
+ if "norm" in name:
145
+ # Convert the module to use torch.float32 data type
146
+ module = module.to(torch.float32)
147
+
148
+ trainer.train()
149
+
150
+
151
+ prompt = "Generate a python script to add prime numbers between one and ten"
152
+
153
+ inputs = tokenizer.encode(prompt, return_tensors='pt')
154
+
155
+ outputs = model.generate(inputs, max_length=100, temperature = .7, do_sample=True)
156
+
157
+ completion = tokenizer.decode(outputs[0])
158
+
159
+ print(completion)
160
+
161
+
162
+
163
+
164
+ from transformers import AutoModelForCausalLM, AutoTokenizer
165
+
166
+ checkpoint_name="ArmelR/starcoder-gradio-v0"
167
+ model = AutoModelForCausalLM.from_pretrained(checkpoint_name)
168
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint_name)
169
+
170
+ prompt = "Create a gradio application that help to convert temperature in celcius into temperature in Fahrenheit"
171
+ inputs = tokenizer(f"Question: {prompt}\n\nAnswer: ", return_tensors="pt")
172
+
173
+ outputs = model.generate(
174
+ inputs["input_ids"],
175
+ temperature=0.2,
176
+ top_p=0.95,
177
+ max_new_tokens=200
178
+ )
179
+
180
+ input_len=len(inputs["input_ids"])
181
+ print(tokenizer.decode(outputs[0][input_len:]))