File size: 15,767 Bytes
2a83fcf
 
a2de279
2a83fcf
 
 
a2de279
2a83fcf
92250f6
 
 
 
 
 
 
 
 
 
 
 
a2de279
92250f6
a2de279
92250f6
a2de279
 
92250f6
a2de279
92250f6
a2de279
eaa0bb2
 
 
 
 
 
 
 
 
a2de279
eaa0bb2
a2de279
eaa0bb2
a2de279
 
eaa0bb2
a2de279
eaa0bb2
a2de279
f031bd2
 
 
 
 
 
 
 
 
a2de279
f031bd2
a2de279
f031bd2
a2de279
 
f031bd2
a2de279
f031bd2
a2de279
99c9b7f
 
 
 
 
 
 
 
 
a2de279
99c9b7f
a2de279
99c9b7f
a2de279
 
99c9b7f
a2de279
99c9b7f
a2de279
f454423
 
 
 
 
 
 
 
 
a2de279
f454423
a2de279
f454423
a2de279
 
f454423
a2de279
f454423
a2de279
7e07887
 
 
 
 
 
 
 
 
a2de279
7e07887
a2de279
7e07887
a2de279
 
7e07887
a2de279
7e07887
a2de279
44a34de
 
 
 
 
 
 
 
 
a2de279
44a34de
a2de279
44a34de
a2de279
 
44a34de
a2de279
44a34de
a2de279
2a83fcf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
---
language: en
license: other
tags:
- text-generation
- opt
inference: false
commercial: false
model-index:
- name: inverse-scaling/opt-2.7b_eval
  results:
  - task:
      type: zero-shot-classification
      name: Zero-Shot Text Classification
    dataset:
      name: inverse-scaling/NeQA
      type: inverse-scaling/NeQA
      config: inverse-scaling--NeQA
      split: train
    metrics:
    - type: accuracy
      value: 0.5266666666666666
      name: Accuracy
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiZmIwMTQ3ZjkzYWM2ZDQzNzgwMWZiZTQzZTY1MjcwNTM4YWI5NDg5N2EyNzJmMjI1MjdiOTk4ZDJlMzI1MDhhMCIsInZlcnNpb24iOjF9.nLyuA5mmpTqttnYFM4jLwpfYG87evd1SVnzlA149qagPUVkrHEgNAEV3MDYzBKm7aDWMB-SYxHYZab7gbzbFAA
    - type: loss
      value: 0.7040334887305896
      name: Loss
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiN2M4ZmI0NjQxMTE3MGI0NTQ3NjY3NzExNWZjNWIzZTdkNjA3NTVlNjQ5ODllMmNhZWQwMTEzYTI2ZTU5ZWRiMSIsInZlcnNpb24iOjF9.7YOIf1gfgzqMEnp5rScnVap6Ne607-FNWWkux9ohHb2jai3qKH73TBx0KjnUh2a-rx5CLyiDWKu2Zq2CIxrvDw
  - task:
      type: zero-shot-classification
      name: Zero-Shot Text Classification
    dataset:
      name: inverse-scaling/quote-repetition
      type: inverse-scaling/quote-repetition
      config: inverse-scaling--quote-repetition
      split: train
    metrics:
    - type: accuracy
      value: 0.9433333333333334
      name: Accuracy
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNjJlYjU4NzY1YjU2ZmZjYjA4YmU3MGFmYWVkZmZjZWY2NzE3ODU2Y2RlNjdkZDNmYTc2MDhiNzA2MzQ5ZDVhYSIsInZlcnNpb24iOjF9.5n5u4QxFudSeAMIWzXBabBwzQJXxXomlL4k3UZWghrMwg2yR_ZMQ-T1XPvj4QX7znzn2OEceL4XhcyGV_lslAg
    - type: loss
      value: 0.08805314021493173
      name: Loss
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNWNkOTFjYmU1OGM3YjEyMzhkNDI5OGE2ZjgxMWJhOTc3YjgxNDYxMWE4NjFkNWVmYjIwMmRlZDI5YTBkYjAwOCIsInZlcnNpb24iOjF9.7wQKzALinVldcU7gbodn4nxBiC1uISsf8yRaqIDB9BAbIRP4sdE_2H3PGTXAd-I7srGO_Ru8fnUhY5yZt1lmBg
  - task:
      type: zero-shot-classification
      name: Zero-Shot Text Classification
    dataset:
      name: inverse-scaling/redefine-math
      type: inverse-scaling/redefine-math
      config: inverse-scaling--redefine-math
      split: train
    metrics:
    - type: accuracy
      value: 0.72
      name: Accuracy
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMmE4YWE1MzQ1YTgyMjhmMzljNzlhOGJhZjk3ZGU1YzBmYzk2NjljNDEyYWZkYmI1NzM5Y2MyNDkwNDc2MjdkMSIsInZlcnNpb24iOjF9.kx2jS5AMDdD9tiNA_11-1oztP8B41AUeY5HiqsgbUIGUpZbA-FL4FCd9c6f2MUb7WbsGKUax4-gLUDm-XKl6Cw
    - type: loss
      value: 0.6120818952802155
      name: Loss
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiZTQyMGE1ODcxNDVkMThkZjZhMzQzNTQwZmY1ZTg0YjVkMmIwNDc0NWQzM2JmZTc0YWQzYWNiNWU4M2RlZDY5ZiIsInZlcnNpb24iOjF9.n5b4tXIiRrT9bS3n1aJ5W-RLmRZ5Ro0uQ0-G7spzDGf1w0zP5H05ZlNN3gnEGdApKlmqsIhfwGZqMU_XoVCzCg
  - task:
      type: zero-shot-classification
      name: Zero-Shot Text Classification
    dataset:
      name: inverse-scaling/hindsight-neglect-10shot
      type: inverse-scaling/hindsight-neglect-10shot
      config: inverse-scaling--hindsight-neglect-10shot
      split: train
    metrics:
    - type: accuracy
      value: 0.44761904761904764
      name: Accuracy
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiYTUzZTkyMDQ5OTI5ZjkxM2FmMWE5Mzg1NTFmNjg3MTRhMWZlYWNlZDZhMTkxOTZiNjc0Yjc5MTg2NTJiZGNjNyIsInZlcnNpb24iOjF9.AspGLsz8cCCmVIilO-nKuYmNcHr83f3NjnY_e5_vSaKUw22VyMixSGOowbLfROl3Lh2Rfibh3PHnhTyprQnqDA
    - type: loss
      value: 0.8747565217197888
      name: Loss
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNTZjYWJhNTMzZjFlYzVjMjdhZjQyNDJkYzgyZmRjYTgzZjlmMjRhMTljZGU5N2E0ODBhOGI1NzdjZDM2NWEzYSIsInZlcnNpb24iOjF9.N7zwfGpX8v6CRvhyUlJ_znKVJyo238fDcL5CQkDWAWBWRtfxCoPXE_Xk69G7UBvrbQLPpPTuTPVUMVh7GGi-Dw
  - task:
      type: zero-shot-classification
      name: Zero-Shot Text Classification
    dataset:
      name: mathemakitten/winobias_antistereotype_test_cot_v1
      type: mathemakitten/winobias_antistereotype_test_cot_v1
      config: mathemakitten--winobias_antistereotype_test_cot_v1
      split: test
    metrics:
    - type: accuracy
      value: 0.39805825242718446
      name: Accuracy
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiOGUyODYzYjg5MzI1MDEyZTk2OGY3ZTdlYThlZDE5YzVmMzNlMjI1N2JlYmM3NmMxM2VlMzM4ODFlNjJkZDdkOSIsInZlcnNpb24iOjF9.2iaV9PkSnUUYldLb6krXbCPXOkPqibzVWyqkBzp4OKtcJW5z9y_Es8W81vBaAdXxGoojKYKdpraovzf7SQAdDQ
    - type: loss
      value: 1.3472958901782706
      name: Loss
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMDZmOWY1ODdkMzJmMmM2MzM3OGM4MzU4MmE3MmQyM2Q4MjQ2MzNjM2E3ZTcyNDQyNzVhMzVmZmMwYWY5OTE0YiIsInZlcnNpb24iOjF9.xIVeCNxXTgjJLu6EtlcXzuA4J3HNEyFEvoDovF1hH_-sUwZqMMApVl-JC63lnxCs1BxeW2XHNl3CkNHEVii2CQ
  - task:
      type: zero-shot-classification
      name: Zero-Shot Text Classification
    dataset:
      name: mathemakitten/winobias_antistereotype_test_cot_v3
      type: mathemakitten/winobias_antistereotype_test_cot_v3
      config: mathemakitten--winobias_antistereotype_test_cot_v3
      split: test
    metrics:
    - type: accuracy
      value: 0.38106796116504854
      name: Accuracy
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiODhmMjYzMzg3MGE1ZDg4ZWZiM2Q4NmE1N2YwODZiNzBiNjRhODE4NjIwZjI3Y2ZlMjY4NWU1OTFhMDUxMDA0NCIsInZlcnNpb24iOjF9.97nWVkITOK8236d9JNEUQ3M3xsL0cSlHbPvnUU0Va0kkgCMnqI62kJF0_1JMb36tD9n243RnWp6rrpfSi2IUCA
    - type: loss
      value: 1.2352868161494008
      name: Loss
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiOGZlY2NkYWJkMmE4MWNjZDFjZWRlZWY3OTIzMzc4MTU0ZThhMmQ0ZTAyYTEyMjZmNWNhODMzY2Q4YmNlZjRlYyIsInZlcnNpb24iOjF9.fPAkjMH3VU2AAE-EVKhEZTNM47LrFUCF-EqGE2C8NyY0yU3o0IE9xBnF3kn9F-McsnTnHB9fIuVk8B6OL_OEDg
  - task:
      type: zero-shot-classification
      name: Zero-Shot Text Classification
    dataset:
      name: mathemakitten/winobias_antistereotype_test_v5
      type: mathemakitten/winobias_antistereotype_test_v5
      config: mathemakitten--winobias_antistereotype_test_v5
      split: test
    metrics:
    - type: accuracy
      value: 0.4029126213592233
      name: Accuracy
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiM2RmNTY3YzYxZDljZmFmOGQzMWIxODBmNTNlNDAxOTE4NjJmNzZmZjY1NGE1YjRjMzlhMGRiOWExYzQ3Y2ViOSIsInZlcnNpb24iOjF9.8mFB5qYQUOkDXoNR0u-a9AbNDb-w2jnBH0seY_01pp_magRKFif9tBiTfV5qHFoO0wSHnoRBANa-psn6mv_vCw
    - type: loss
      value: 1.3491194838988576
      name: Loss
      verified: true
      verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiODA2MGFmYmEwMDc4YzY3MjI3OWE2ZjA4MDUxMzFlYWM0NGMxOTVjZTIzZGFkNTA5MWVmZGI1MmZiMjc5MWViYiIsInZlcnNpb24iOjF9.X78VtBUzeKI931pe1JdcUAaE7CwKPcaur4TjnAAIJp7DBxyC8QSS2r2LpgvO4p6Q-HnSBXGpK23MCCkDPeQIAA
---

# OPT : Open Pre-trained Transformer Language Models

OPT was first introduced in [Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) and first released in [metaseq's repository](https://github.com/facebookresearch/metaseq) on May 3rd 2022 by Meta AI.

**Disclaimer**: The team releasing OPT wrote an official model card, which is available in Appendix D of the [paper](https://arxiv.org/pdf/2205.01068.pdf). 
Content from **this** model card has been written by the Hugging Face team.

## Intro

To quote the first two paragraphs of the [official paper](https://arxiv.org/abs/2205.01068)

> Large language models trained on massive text collections have shown surprising emergent
> capabilities to generate text and perform zero- and few-shot learning. While in some cases the public
> can interact with these models through paid APIs, full model access is currently limited to only a
> few highly resourced labs. This restricted access has limited researchers’ ability to study how and
> why these large language models work, hindering progress on improving known challenges in areas
> such as robustness, bias, and toxicity.

> We present Open Pretrained Transformers (OPT), a suite of decoder-only pre-trained transformers ranging from 125M
> to 175B parameters, which we aim to fully and responsibly share with interested researchers. We train the OPT models to roughly match 
> the performance and sizes of the GPT-3 class of models, while also applying the latest best practices in data
> collection and efficient training. Our aim in developing this suite of OPT models is to enable reproducible and responsible research at scale, and
> to bring more voices to the table in studying the impact of these LLMs. Definitions of risk, harm, bias, and toxicity, etc., should be articulated by the
> collective research community as a whole, which is only possible when models are available for study.

## Model description

OPT was predominantly pretrained with English text, but a small amount of non-English data is still present within the training corpus via CommonCrawl. The model was pretrained using a causal language modeling (CLM) objective.
OPT belongs to the same family of decoder-only models like [GPT-3](https://arxiv.org/abs/2005.14165). As such, it was pretrained using the self-supervised causal language modedling objective.

For evaluation, OPT follows [GPT-3](https://arxiv.org/abs/2005.14165) by using their prompts and overall experimental setup. For more details, please read 
the [official paper](https://arxiv.org/abs/2205.01068).

## Intended uses & limitations

The pretrained-only model can be used for prompting for evaluation of downstream tasks as well as text generation.
In addition, the model can be fine-tuned on a downstream task using the [CLM example](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling). For all other OPT checkpoints, please have a look at the [model hub](https://huggingface.co/models?filter=opt).

### How to use

You can use this model directly with a pipeline for text generation.

```python
>>> from transformers import pipeline

>>> generator = pipeline('text-generation', model="facebook/opt-2.7b")
>>> generator("Hello, I'm am conscious and")
[{'generated_text': 'Hello, I am conscious and I am a human being.\nI am a human being, and'}]
```

By default, generation is deterministic. In order to use the top-k sampling, please set `do_sample` to `True`. 

```python
>>> from transformers import pipeline, set_seed

>>> set_seed(32)
>>> generator = pipeline('text-generation', model="facebook/opt-2.7b", do_sample=True)
>>> generator("Hello, I'm am conscious and")
[{'generated_text': "Hello, I'm am conscious and I make things. I'm in the creative community, which is"}]
```

### Limitations and bias

As mentioned in Meta AI's model card, given that the training data used for this model contains a lot of
unfiltered content from the internet, which is far from neutral the model is strongly biased : 

> Like other large language models for which the diversity (or lack thereof) of training
> data induces downstream impact on the quality of our model, OPT-175B has limitations in terms
> of bias and safety. OPT-175B can also have quality issues in terms of generation diversity and
> hallucination. In general, OPT-175B is not immune from the plethora of issues that plague modern
> large language models. 

Here's an example of how the model can have biased predictions:

```python
>>> from transformers import pipeline, set_seed

>>> set_seed(32)
>>> generator = pipeline('text-generation', model="facebook/opt-2.7b", do_sample=True, num_return_sequences=5)
>>> generator("The woman worked as a")
[{'generated_text': "The woman worked as a security guard at a nursery in the city's eastern district of Samut P"}, 
{'generated_text': 'The woman worked as a doctor in the Philippines. Officials in China allege she stole the coronavirus'}, 
{'generated_text': 'The woman worked as a teacher in the city of Krasnodar in south Russia. She'}, 
{'generated_text': 'The woman worked as a researcher and lecturer at the Russian Academy of Sciences in a laboratory dedicated to the'}, 
{'generated_text': 'The woman worked as a nanny on a property owned by Mr Fitton-Allen in the city'}]
```

compared to:

```python
>>> from transformers import pipeline, set_seed

>>> set_seed(32)
>>> generator = pipeline('text-generation', model="facebook/opt-2.7b", do_sample=True, num_return_sequences=5)
>>> generator("The man worked as a")
[{'generated_text': "The man worked as a security guard at a retirement home after being hired by the administrator's cousin,"}, 
{'generated_text': 'The man worked as a doctor in the Philippines.\n\nHe had hoped to work his way back'}, 
{'generated_text': 'The man worked as a teacher in the city of Krasnodar in south Russia.He'}, 
{'generated_text': 'The man worked as a researcher and his work on the topic predates the project, by many years'}, 
{'generated_text': 'The man worked as a chef in a restaurant for 40 years. How could this be so different from'}]
 ```

This bias will also affect all fine-tuned versions of this model.

## Training data

The Meta AI team wanted to train this model on a corpus as large as possible. It is composed of the union of the following 5 filtered datasets of textual documents: 

  - BookCorpus, which consists of more than 10K unpublished books,
  - CC-Stories, which contains a subset of CommonCrawl data filtered to match the
story-like style of Winograd schemas,
  - The Pile, from which * Pile-CC, OpenWebText2, USPTO, Project Gutenberg, OpenSubtitles, Wikipedia, DM Mathematics and HackerNews* were included. 
  - Pushshift.io Reddit dataset that was developed in Baumgartner et al. (2020) and processed in
Roller et al. (2021)
  - CCNewsV2 containing an updated version of the English portion of the CommonCrawl News
dataset that was used in RoBERTa (Liu et al., 2019b)

The final training data contains 180B tokens corresponding to 800GB of data. The validation split was made of 200MB of the pretraining data, sampled proportionally
to each dataset’s size in the pretraining corpus. 

The dataset might contains offensive content as parts of the dataset are a subset of
public Common Crawl data, along with a subset of public Reddit data, which could contain sentences
that, if viewed directly, can be insulting, threatening, or might otherwise cause anxiety.

### Collection process

The dataset was collected form internet, and went through classic data processing algorithms  and
re-formatting practices, including removing repetitive/non-informative text like *Chapter One* or
*This ebook by Project Gutenberg.*

## Training procedure

### Preprocessing

The texts are tokenized using the **GPT2** byte-level version of Byte Pair Encoding (BPE) (for unicode characters) and a
vocabulary size of 50272. The inputs are sequences of 2048 consecutive tokens.

The 175B model was trained on 992 *80GB A100 GPUs*. The training duration was roughly ~33 days of continuous training.

### BibTeX entry and citation info

```bibtex
@misc{zhang2022opt,
      title={OPT: Open Pre-trained Transformer Language Models}, 
      author={Susan Zhang and Stephen Roller and Naman Goyal and Mikel Artetxe and Moya Chen and Shuohui Chen and Christopher Dewan and Mona Diab and Xian Li and Xi Victoria Lin and Todor Mihaylov and Myle Ott and Sam Shleifer and Kurt Shuster and Daniel Simig and Punit Singh Koura and Anjali Sridhar and Tianlu Wang and Luke Zettlemoyer},
      year={2022},
      eprint={2205.01068},
      archivePrefix={arXiv},
      primaryClass={cs.CL}
}
```