English
generated_from_trainer
tomekkorbak commited on
Commit
8de10e9
1 Parent(s): bdc4a9a

update model card README.md

Browse files
Files changed (1) hide show
  1. README.md +192 -0
README.md ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: mit
5
+ tags:
6
+ - generated_from_trainer
7
+ datasets:
8
+ - tomekkorbak/pii-pile-chunk3-0-50000
9
+ - tomekkorbak/pii-pile-chunk3-50000-100000
10
+ - tomekkorbak/pii-pile-chunk3-100000-150000
11
+ - tomekkorbak/pii-pile-chunk3-150000-200000
12
+ - tomekkorbak/pii-pile-chunk3-200000-250000
13
+ - tomekkorbak/pii-pile-chunk3-250000-300000
14
+ - tomekkorbak/pii-pile-chunk3-300000-350000
15
+ - tomekkorbak/pii-pile-chunk3-350000-400000
16
+ - tomekkorbak/pii-pile-chunk3-400000-450000
17
+ - tomekkorbak/pii-pile-chunk3-450000-500000
18
+ - tomekkorbak/pii-pile-chunk3-500000-550000
19
+ - tomekkorbak/pii-pile-chunk3-550000-600000
20
+ - tomekkorbak/pii-pile-chunk3-600000-650000
21
+ - tomekkorbak/pii-pile-chunk3-650000-700000
22
+ - tomekkorbak/pii-pile-chunk3-700000-750000
23
+ - tomekkorbak/pii-pile-chunk3-750000-800000
24
+ - tomekkorbak/pii-pile-chunk3-800000-850000
25
+ - tomekkorbak/pii-pile-chunk3-850000-900000
26
+ - tomekkorbak/pii-pile-chunk3-900000-950000
27
+ - tomekkorbak/pii-pile-chunk3-950000-1000000
28
+ - tomekkorbak/pii-pile-chunk3-1000000-1050000
29
+ - tomekkorbak/pii-pile-chunk3-1050000-1100000
30
+ - tomekkorbak/pii-pile-chunk3-1100000-1150000
31
+ - tomekkorbak/pii-pile-chunk3-1150000-1200000
32
+ - tomekkorbak/pii-pile-chunk3-1200000-1250000
33
+ - tomekkorbak/pii-pile-chunk3-1250000-1300000
34
+ - tomekkorbak/pii-pile-chunk3-1300000-1350000
35
+ - tomekkorbak/pii-pile-chunk3-1350000-1400000
36
+ - tomekkorbak/pii-pile-chunk3-1400000-1450000
37
+ - tomekkorbak/pii-pile-chunk3-1450000-1500000
38
+ - tomekkorbak/pii-pile-chunk3-1500000-1550000
39
+ - tomekkorbak/pii-pile-chunk3-1550000-1600000
40
+ - tomekkorbak/pii-pile-chunk3-1600000-1650000
41
+ - tomekkorbak/pii-pile-chunk3-1650000-1700000
42
+ - tomekkorbak/pii-pile-chunk3-1700000-1750000
43
+ - tomekkorbak/pii-pile-chunk3-1750000-1800000
44
+ - tomekkorbak/pii-pile-chunk3-1800000-1850000
45
+ - tomekkorbak/pii-pile-chunk3-1850000-1900000
46
+ - tomekkorbak/pii-pile-chunk3-1900000-1950000
47
+ model-index:
48
+ - name: compassionate_elion
49
+ results: []
50
+ ---
51
+
52
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
53
+ should probably proofread and complete it, then remove this comment. -->
54
+
55
+ # compassionate_elion
56
+
57
+ This model was trained from scratch on the tomekkorbak/pii-pile-chunk3-0-50000, the tomekkorbak/pii-pile-chunk3-50000-100000, the tomekkorbak/pii-pile-chunk3-100000-150000, the tomekkorbak/pii-pile-chunk3-150000-200000, the tomekkorbak/pii-pile-chunk3-200000-250000, the tomekkorbak/pii-pile-chunk3-250000-300000, the tomekkorbak/pii-pile-chunk3-300000-350000, the tomekkorbak/pii-pile-chunk3-350000-400000, the tomekkorbak/pii-pile-chunk3-400000-450000, the tomekkorbak/pii-pile-chunk3-450000-500000, the tomekkorbak/pii-pile-chunk3-500000-550000, the tomekkorbak/pii-pile-chunk3-550000-600000, the tomekkorbak/pii-pile-chunk3-600000-650000, the tomekkorbak/pii-pile-chunk3-650000-700000, the tomekkorbak/pii-pile-chunk3-700000-750000, the tomekkorbak/pii-pile-chunk3-750000-800000, the tomekkorbak/pii-pile-chunk3-800000-850000, the tomekkorbak/pii-pile-chunk3-850000-900000, the tomekkorbak/pii-pile-chunk3-900000-950000, the tomekkorbak/pii-pile-chunk3-950000-1000000, the tomekkorbak/pii-pile-chunk3-1000000-1050000, the tomekkorbak/pii-pile-chunk3-1050000-1100000, the tomekkorbak/pii-pile-chunk3-1100000-1150000, the tomekkorbak/pii-pile-chunk3-1150000-1200000, the tomekkorbak/pii-pile-chunk3-1200000-1250000, the tomekkorbak/pii-pile-chunk3-1250000-1300000, the tomekkorbak/pii-pile-chunk3-1300000-1350000, the tomekkorbak/pii-pile-chunk3-1350000-1400000, the tomekkorbak/pii-pile-chunk3-1400000-1450000, the tomekkorbak/pii-pile-chunk3-1450000-1500000, the tomekkorbak/pii-pile-chunk3-1500000-1550000, the tomekkorbak/pii-pile-chunk3-1550000-1600000, the tomekkorbak/pii-pile-chunk3-1600000-1650000, the tomekkorbak/pii-pile-chunk3-1650000-1700000, the tomekkorbak/pii-pile-chunk3-1700000-1750000, the tomekkorbak/pii-pile-chunk3-1750000-1800000, the tomekkorbak/pii-pile-chunk3-1800000-1850000, the tomekkorbak/pii-pile-chunk3-1850000-1900000 and the tomekkorbak/pii-pile-chunk3-1900000-1950000 datasets.
58
+
59
+ ## Model description
60
+
61
+ More information needed
62
+
63
+ ## Intended uses & limitations
64
+
65
+ More information needed
66
+
67
+ ## Training and evaluation data
68
+
69
+ More information needed
70
+
71
+ ## Training procedure
72
+
73
+ ### Training hyperparameters
74
+
75
+ The following hyperparameters were used during training:
76
+ - learning_rate: 0.0005
77
+ - train_batch_size: 16
78
+ - eval_batch_size: 8
79
+ - seed: 42
80
+ - gradient_accumulation_steps: 8
81
+ - total_train_batch_size: 128
82
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
83
+ - lr_scheduler_type: linear
84
+ - lr_scheduler_warmup_ratio: 0.01
85
+ - training_steps: 2362
86
+ - mixed_precision_training: Native AMP
87
+
88
+ ### Framework versions
89
+
90
+ - Transformers 4.24.0
91
+ - Pytorch 1.11.0+cu113
92
+ - Datasets 2.5.1
93
+ - Tokenizers 0.11.6
94
+
95
+
96
+ # Full config
97
+ {'dataset': {'conditional_training_config': {'aligned_prefix': '<|aligned|>',
98
+ 'drop_token_fraction': 0.01,
99
+ 'misaligned_prefix': '<|misaligned|>',
100
+ 'threshold': 0.0},
101
+ 'datasets': ['tomekkorbak/pii-pile-chunk3-0-50000',
102
+ 'tomekkorbak/pii-pile-chunk3-50000-100000',
103
+ 'tomekkorbak/pii-pile-chunk3-100000-150000',
104
+ 'tomekkorbak/pii-pile-chunk3-150000-200000',
105
+ 'tomekkorbak/pii-pile-chunk3-200000-250000',
106
+ 'tomekkorbak/pii-pile-chunk3-250000-300000',
107
+ 'tomekkorbak/pii-pile-chunk3-300000-350000',
108
+ 'tomekkorbak/pii-pile-chunk3-350000-400000',
109
+ 'tomekkorbak/pii-pile-chunk3-400000-450000',
110
+ 'tomekkorbak/pii-pile-chunk3-450000-500000',
111
+ 'tomekkorbak/pii-pile-chunk3-500000-550000',
112
+ 'tomekkorbak/pii-pile-chunk3-550000-600000',
113
+ 'tomekkorbak/pii-pile-chunk3-600000-650000',
114
+ 'tomekkorbak/pii-pile-chunk3-650000-700000',
115
+ 'tomekkorbak/pii-pile-chunk3-700000-750000',
116
+ 'tomekkorbak/pii-pile-chunk3-750000-800000',
117
+ 'tomekkorbak/pii-pile-chunk3-800000-850000',
118
+ 'tomekkorbak/pii-pile-chunk3-850000-900000',
119
+ 'tomekkorbak/pii-pile-chunk3-900000-950000',
120
+ 'tomekkorbak/pii-pile-chunk3-950000-1000000',
121
+ 'tomekkorbak/pii-pile-chunk3-1000000-1050000',
122
+ 'tomekkorbak/pii-pile-chunk3-1050000-1100000',
123
+ 'tomekkorbak/pii-pile-chunk3-1100000-1150000',
124
+ 'tomekkorbak/pii-pile-chunk3-1150000-1200000',
125
+ 'tomekkorbak/pii-pile-chunk3-1200000-1250000',
126
+ 'tomekkorbak/pii-pile-chunk3-1250000-1300000',
127
+ 'tomekkorbak/pii-pile-chunk3-1300000-1350000',
128
+ 'tomekkorbak/pii-pile-chunk3-1350000-1400000',
129
+ 'tomekkorbak/pii-pile-chunk3-1400000-1450000',
130
+ 'tomekkorbak/pii-pile-chunk3-1450000-1500000',
131
+ 'tomekkorbak/pii-pile-chunk3-1500000-1550000',
132
+ 'tomekkorbak/pii-pile-chunk3-1550000-1600000',
133
+ 'tomekkorbak/pii-pile-chunk3-1600000-1650000',
134
+ 'tomekkorbak/pii-pile-chunk3-1650000-1700000',
135
+ 'tomekkorbak/pii-pile-chunk3-1700000-1750000',
136
+ 'tomekkorbak/pii-pile-chunk3-1750000-1800000',
137
+ 'tomekkorbak/pii-pile-chunk3-1800000-1850000',
138
+ 'tomekkorbak/pii-pile-chunk3-1850000-1900000',
139
+ 'tomekkorbak/pii-pile-chunk3-1900000-1950000'],
140
+ 'is_split_by_sentences': True,
141
+ 'skip_tokens': 2990407680},
142
+ 'generation': {'force_call_on': [25177],
143
+ 'metrics_configs': [{}, {'n': 1}, {'n': 2}, {'n': 5}],
144
+ 'scenario_configs': [{'generate_kwargs': {'bad_words_ids': [[50257],
145
+ [50258]],
146
+ 'do_sample': True,
147
+ 'max_length': 128,
148
+ 'min_length': 10,
149
+ 'temperature': 0.7,
150
+ 'top_k': 0,
151
+ 'top_p': 0.9},
152
+ 'name': 'unconditional',
153
+ 'num_samples': 4096,
154
+ 'prefix': '<|aligned|>'}],
155
+ 'scorer_config': {}},
156
+ 'kl_gpt3_callback': {'force_call_on': [25177],
157
+ 'gpt3_kwargs': {'model_name': 'davinci'},
158
+ 'max_tokens': 64,
159
+ 'num_samples': 4096,
160
+ 'prefix': '<|aligned|>'},
161
+ 'model': {'from_scratch': False,
162
+ 'gpt2_config_kwargs': {'reorder_and_upcast_attn': True,
163
+ 'scale_attn_by': True},
164
+ 'model_kwargs': {'revision': '5c64636da035c40bb8b1186648a39822071476cb'},
165
+ 'num_additional_tokens': 2,
166
+ 'path_or_name': 'tomekkorbak/cranky_lichterman'},
167
+ 'objective': {'name': 'MLE'},
168
+ 'tokenizer': {'path_or_name': 'gpt2',
169
+ 'special_tokens': ['<|aligned|>', '<|misaligned|>']},
170
+ 'training': {'dataloader_num_workers': 0,
171
+ 'effective_batch_size': 128,
172
+ 'evaluation_strategy': 'no',
173
+ 'fp16': True,
174
+ 'hub_model_id': 'compassionate_elion',
175
+ 'hub_strategy': 'all_checkpoints',
176
+ 'learning_rate': 0.0005,
177
+ 'logging_first_step': True,
178
+ 'logging_steps': 1,
179
+ 'num_tokens': 3300000000,
180
+ 'output_dir': 'training_output2',
181
+ 'per_device_train_batch_size': 16,
182
+ 'push_to_hub': True,
183
+ 'remove_unused_columns': False,
184
+ 'save_steps': 251,
185
+ 'save_strategy': 'steps',
186
+ 'seed': 42,
187
+ 'tokens_already_seen': 2990407680,
188
+ 'warmup_ratio': 0.01,
189
+ 'weight_decay': 0.1}}
190
+
191
+ # Wandb URL:
192
+ https://wandb.ai/tomekkorbak/apo/runs/mt2ulgpd