File size: 2,297 Bytes
6841785
 
 
94ad50a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0158d31
94ad50a
 
 
 
0158d31
 
94ad50a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
---
license: apache-2.0
---
This is a demo of how to pretrain a mistral architecture model by SFT Trainer ,and it needs only 70 lines Python code.

```
import torch
from transformers import TrainingArguments, MistralForCausalLM, MistralModel, MistralConfig, AutoTokenizer
from datasets import load_dataset
from trl import SFTTrainer

configuration = MistralConfig(vocab_size=32000,
        hidden_size=2048,
        intermediate_size=7168,
        num_hidden_layers=24,
        num_attention_heads=32,
        num_key_value_heads=8,
        hidden_act="silu",
        max_position_embeddings=4096,
        pad_token_id=2,
        bos_token_id=1,
        eos_token_id=2)

model = MistralForCausalLM(configuration)
#model = MistralForCausalLM.from_pretrained("./6B_code_outputs/checkpoint-10000")
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2", local_files_only=False)
tokenizer.pad_token = tokenizer.eos_token

dataset = load_dataset('HuggingFaceTB/cosmopedia-20k', split="train")
#dataset = load_dataset('Elriggs/openwebtext-100k', split="train")
dataset = dataset.shuffle(seed=42)
print(f'Number of prompts: {len(dataset)}')
print(f'Column names are: {dataset.column_names}')

def create_prompt_formats(sample):
    """
    Format various fields of the sample ('instruction', 'context', 'response')
    Then concatenate them using two newline characters
    :param sample: Sample dictionnary
    """
    output_texts = []
    for i in range(len(sample['text'])):
      formatted_prompt = sample['text'][i]
      output_texts.append(formatted_prompt)
    #print(output_texts)
    return output_texts


trainer = SFTTrainer(
    model,
    train_dataset=dataset,
    tokenizer = tokenizer,
    max_seq_length=2048,
    formatting_func=create_prompt_formats,
    args=TrainingArguments(
            per_device_train_batch_size=2,
            gradient_accumulation_steps=1,
            warmup_steps=2,
            max_steps=10000,
            learning_rate=1e-4,
            logging_steps=1,
            output_dir="1B_outputs", overwrite_output_dir=True,save_steps=1000,
            optim="paged_adamw_32bit",report_to="none"
        )
)
trainer.train()
trainer.model.save_pretrained("1B-final", dtype=torch.float32)
trainer.tokenizer.save_pretrained("1B-final")

```