michaelfeil commited on
Commit
5b9fc54
1 Parent(s): 9bc80b0

Upload EleutherAI/gpt-j-6b ctranslate fp16 weights

Browse files
Files changed (7) hide show
  1. .gitattributes +8 -25
  2. README.md +213 -0
  3. added_tokens.json +1 -0
  4. merges.txt +0 -0
  5. model.bin +2 -2
  6. special_tokens_map.json +1 -0
  7. vocab.json +0 -0
.gitattributes CHANGED
@@ -1,34 +1,17 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
  *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tflite filter=lfs diff=lfs merge=lfs -text
29
- *.tgz filter=lfs diff=lfs merge=lfs -text
30
- *.wasm filter=lfs diff=lfs merge=lfs -text
31
- *.xz filter=lfs diff=lfs merge=lfs -text
32
- *.zip filter=lfs diff=lfs merge=lfs -text
33
- *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
4
  *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.arrow filter=lfs diff=lfs merge=lfs -text
10
+ *.ftz filter=lfs diff=lfs merge=lfs -text
11
  *.joblib filter=lfs diff=lfs merge=lfs -text
 
 
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
14
  *.pb filter=lfs diff=lfs merge=lfs -text
 
 
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
17
  *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ tags:
5
+ - ctranslate2
6
+ - int8
7
+ - float16
8
+ - pytorch
9
+ - causal-lm
10
+ license: apache-2.0
11
+ datasets:
12
+ - the_pile
13
+
14
+ ---
15
+ # # Fast-Inference with Ctranslate2
16
+ Speedup inference while reducing memory by 2x-4x using int8 inference in C++ on CPU or GPU.
17
+
18
+ quantized version of [EleutherAI/gpt-j-6b](https://huggingface.co/EleutherAI/gpt-j-6b)
19
+ ```bash
20
+ pip install hf-hub-ctranslate2>=2.0.6
21
+ ```
22
+ Converted on 2023-05-19 using
23
+ ```
24
+ ct2-transformers-converter --model EleutherAI/gpt-j-6b --output_dir /home/feil_m/tmp-ct2fast-gpt-j-6b --force --copy_files merges.txt tokenizer.json README.md tokenizer_config.json vocab.json special_tokens_map.json added_tokens.json .gitattributes --quantization float16
25
+ ```
26
+
27
+ Checkpoint compatible to [ctranslate2>=3.13.0](https://github.com/OpenNMT/CTranslate2) and [hf-hub-ctranslate2>=2.0.6](https://github.com/michaelfeil/hf-hub-ctranslate2)
28
+ - `compute_type=int8_float16` for `device="cuda"`
29
+ - `compute_type=int8` for `device="cpu"`
30
+
31
+ ```python
32
+ from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub
33
+ from transformers import AutoTokenizer
34
+
35
+ model_name = "michaelfeil/ct2fast-gpt-j-6b"
36
+ # use either TranslatorCT2fromHfHub or GeneratorCT2fromHfHub here, depending on model.
37
+ model = GeneratorCT2fromHfHub(
38
+ # load in int8 on CUDA
39
+ model_name_or_path=model_name,
40
+ device="cuda",
41
+ compute_type="int8_float16",
42
+ tokenizer=AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6b")
43
+ )
44
+ outputs = model.generate(
45
+ text=["How do you call a fast Flan-ingo?", "User: How are you doing? Bot:"],
46
+ )
47
+ print(outputs)
48
+ ```
49
+
50
+ # Licence and other remarks:
51
+ This is just a quantized version. Licence conditions are intended to be idential to original huggingface repo.
52
+
53
+ # Original description
54
+
55
+
56
+ # GPT-J 6B
57
+
58
+ ## Model Description
59
+
60
+ GPT-J 6B is a transformer model trained using Ben Wang's [Mesh Transformer JAX](https://github.com/kingoflolz/mesh-transformer-jax/). "GPT-J" refers to the class of model, while "6B" represents the number of trainable parameters.
61
+
62
+ <figure>
63
+
64
+ | Hyperparameter | Value |
65
+ |----------------------|------------|
66
+ | \\(n_{parameters}\\) | 6053381344 |
67
+ | \\(n_{layers}\\) | 28&ast; |
68
+ | \\(d_{model}\\) | 4096 |
69
+ | \\(d_{ff}\\) | 16384 |
70
+ | \\(n_{heads}\\) | 16 |
71
+ | \\(d_{head}\\) | 256 |
72
+ | \\(n_{ctx}\\) | 2048 |
73
+ | \\(n_{vocab}\\) | 50257/50400&dagger; (same tokenizer as GPT-2/3) |
74
+ | Positional Encoding | [Rotary Position Embedding (RoPE)](https://arxiv.org/abs/2104.09864) |
75
+ | RoPE Dimensions | [64](https://github.com/kingoflolz/mesh-transformer-jax/blob/f2aa66e0925de6593dcbb70e72399b97b4130482/mesh_transformer/layers.py#L223) |
76
+ <figcaption><p><strong>&ast;</strong> Each layer consists of one feedforward block and one self attention block.</p>
77
+ <p><strong>&dagger;</strong> Although the embedding matrix has a size of 50400, only 50257 entries are used by the GPT-2 tokenizer.</p></figcaption></figure>
78
+
79
+ The model consists of 28 layers with a model dimension of 4096, and a feedforward dimension of 16384. The model
80
+ dimension is split into 16 heads, each with a dimension of 256. Rotary Position Embedding (RoPE) is applied to 64
81
+ dimensions of each head. The model is trained with a tokenization vocabulary of 50257, using the same set of BPEs as
82
+ GPT-2/GPT-3.
83
+
84
+ ## Intended Use and Limitations
85
+
86
+ GPT-J learns an inner representation of the English language that can be used to
87
+ extract features useful for downstream tasks. The model is best at what it was
88
+ pretrained for however, which is generating text from a prompt.
89
+
90
+ ### Out-of-scope use
91
+
92
+ GPT-J-6B is **not** intended for deployment without fine-tuning, supervision,
93
+ and/or moderation. It is not a in itself a product and cannot be used for
94
+ human-facing interactions. For example, the model may generate harmful or
95
+ offensive text. Please evaluate the risks associated with your particular use case.
96
+
97
+ GPT-J-6B was trained on an English-language only dataset, and is thus **not**
98
+ suitable for translation or generating text in other languages.
99
+
100
+ GPT-J-6B has not been fine-tuned for downstream contexts in which
101
+ language models are commonly deployed, such as writing genre prose,
102
+ or commercial chatbots. This means GPT-J-6B will **not**
103
+ respond to a given prompt the way a product like ChatGPT does. This is because,
104
+ unlike this model, ChatGPT was fine-tuned using methods such as Reinforcement
105
+ Learning from Human Feedback (RLHF) to better “follow” human instructions.
106
+
107
+ ### Limitations and Biases
108
+
109
+ The core functionality of GPT-J is taking a string of text and predicting the next token. While language models are widely used for tasks other than this, there are a lot of unknowns with this work. When prompting GPT-J it is important to remember that the statistically most likely next token is often not the token that produces the most "accurate" text. Never depend upon GPT-J to produce factually accurate output.
110
+
111
+ GPT-J was trained on the Pile, a dataset known to contain profanity, lewd, and otherwise abrasive language. Depending upon use case GPT-J may produce socially unacceptable text. See [Sections 5 and 6 of the Pile paper](https://arxiv.org/abs/2101.00027) for a more detailed analysis of the biases in the Pile.
112
+
113
+ As with all language models, it is hard to predict in advance how GPT-J will respond to particular prompts and offensive content may occur without warning. We recommend having a human curate or filter the outputs before releasing them, both to censor undesirable content and to improve the quality of the results.
114
+
115
+ ### How to use
116
+
117
+ This model can be easily loaded using the `AutoModelForCausalLM` functionality:
118
+
119
+ ```python
120
+ from transformers import AutoTokenizer, AutoModelForCausalLM
121
+
122
+ tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
123
+ model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
124
+ ```
125
+
126
+ ## Training data
127
+
128
+ GPT-J 6B was trained on [the Pile](https://pile.eleuther.ai), a large-scale curated dataset created by [EleutherAI](https://www.eleuther.ai).
129
+
130
+ ## Training procedure
131
+
132
+ This model was trained for 402 billion tokens over 383,500 steps on TPU v3-256 pod. It was trained as an autoregressive language model, using cross-entropy loss to maximize the likelihood of predicting the next token correctly.
133
+
134
+ ## Evaluation results
135
+
136
+ <figure>
137
+
138
+ | Model | Public | Training FLOPs | LAMBADA PPL ↓ | LAMBADA Acc ↑ | Winogrande ↑ | Hellaswag ↑ | PIQA ↑ | Dataset Size (GB) |
139
+ |--------------------------|-------------|----------------|--- |--- |--- |--- |--- |-------------------|
140
+ | Random Chance | &check; | 0 | ~a lot | ~0% | 50% | 25% | 25% | 0 |
141
+ | GPT-3 Ada&ddagger; | &cross; | ----- | 9.95 | 51.6% | 52.9% | 43.4% | 70.5% | ----- |
142
+ | GPT-2 1.5B | &check; | ----- | 10.63 | 51.21% | 59.4% | 50.9% | 70.8% | 40 |
143
+ | GPT-Neo 1.3B&ddagger; | &check; | 3.0e21 | 7.50 | 57.2% | 55.0% | 48.9% | 71.1% | 825 |
144
+ | Megatron-2.5B&ast; | &cross; | 2.4e21 | ----- | 61.7% | ----- | ----- | ----- | 174 |
145
+ | GPT-Neo 2.7B&ddagger; | &check; | 6.8e21 | 5.63 | 62.2% | 56.5% | 55.8% | 73.0% | 825 |
146
+ | GPT-3 1.3B&ast;&ddagger; | &cross; | 2.4e21 | 5.44 | 63.6% | 58.7% | 54.7% | 75.1% | ~800 |
147
+ | GPT-3 Babbage&ddagger; | &cross; | ----- | 5.58 | 62.4% | 59.0% | 54.5% | 75.5% | ----- |
148
+ | Megatron-8.3B&ast; | &cross; | 7.8e21 | ----- | 66.5% | ----- | ----- | ----- | 174 |
149
+ | GPT-3 2.7B&ast;&ddagger; | &cross; | 4.8e21 | 4.60 | 67.1% | 62.3% | 62.8% | 75.6% | ~800 |
150
+ | Megatron-11B&dagger; | &check; | 1.0e22 | ----- | ----- | ----- | ----- | ----- | 161 |
151
+ | **GPT-J 6B&ddagger;** | **&check;** | **1.5e22** | **3.99** | **69.7%** | **65.3%** | **66.1%** | **76.5%** | **825** |
152
+ | GPT-3 6.7B&ast;&ddagger; | &cross; | 1.2e22 | 4.00 | 70.3% | 64.5% | 67.4% | 78.0% | ~800 |
153
+ | GPT-3 Curie&ddagger; | &cross; | ----- | 4.00 | 69.3% | 65.6% | 68.5% | 77.9% | ----- |
154
+ | GPT-3 13B&ast;&ddagger; | &cross; | 2.3e22 | 3.56 | 72.5% | 67.9% | 70.9% | 78.5% | ~800 |
155
+ | GPT-3 175B&ast;&ddagger; | &cross; | 3.1e23 | 3.00 | 76.2% | 70.2% | 78.9% | 81.0% | ~800 |
156
+ | GPT-3 Davinci&ddagger; | &cross; | ----- | 3.0 | 75% | 72% | 78% | 80% | ----- |
157
+ <figcaption><p>Models roughly sorted by performance, or by FLOPs if not available.</p>
158
+
159
+ <p><strong>&ast;</strong> Evaluation numbers reported by their respective authors. All other numbers are provided by
160
+ running <a href="https://github.com/EleutherAI/lm-evaluation-harness/"><code>lm-evaluation-harness</code></a> either with released
161
+ weights or with API access. Due to subtle implementation differences as well as different zero shot task framing, these
162
+ might not be directly comparable. See <a href="https://blog.eleuther.ai/gpt3-model-sizes/">this blog post</a> for more
163
+ details.</p>
164
+
165
+ <p><strong>†</strong> Megatron-11B provides no comparable metrics, and several implementations using the released weights do not
166
+ reproduce the generation quality and evaluations. (see <a href="https://github.com/huggingface/transformers/pull/10301">1</a>
167
+ <a href="https://github.com/pytorch/fairseq/issues/2358">2</a> <a href="https://github.com/pytorch/fairseq/issues/2719">3</a>)
168
+ Thus, evaluation was not attempted.</p>
169
+
170
+ <p><strong>‡</strong> These models have been trained with data which contains possible test set contamination. The OpenAI GPT-3 models
171
+ failed to deduplicate training data for certain test sets, while the GPT-Neo models as well as this one is
172
+ trained on the Pile, which has not been deduplicated against any test sets.</p></figcaption></figure>
173
+
174
+ ## Citation and Related Information
175
+
176
+ ### BibTeX entry
177
+
178
+ To cite this model:
179
+ ```bibtex
180
+ @misc{gpt-j,
181
+ author = {Wang, Ben and Komatsuzaki, Aran},
182
+ title = {{GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model}},
183
+ howpublished = {\url{https://github.com/kingoflolz/mesh-transformer-jax}},
184
+ year = 2021,
185
+ month = May
186
+ }
187
+ ```
188
+
189
+ To cite the codebase that trained this model:
190
+ ```bibtex
191
+ @misc{mesh-transformer-jax,
192
+ author = {Wang, Ben},
193
+ title = {{Mesh-Transformer-JAX: Model-Parallel Implementation of Transformer Language Model with JAX}},
194
+ howpublished = {\url{https://github.com/kingoflolz/mesh-transformer-jax}},
195
+ year = 2021,
196
+ month = May
197
+ }
198
+ ```
199
+
200
+ If you use this model, we would love to hear about it! Reach out on [GitHub](https://github.com/kingoflolz/mesh-transformer-jax), Discord, or shoot Ben an email.
201
+
202
+ ## Acknowledgements
203
+
204
+ This project would not have been possible without compute generously provided by Google through the
205
+ [TPU Research Cloud](https://sites.research.google/trc/), as well as the Cloud TPU team for providing early access to the [Cloud TPU VM](https://cloud.google.com/blog/products/compute/introducing-cloud-tpu-vms) Alpha.
206
+
207
+ Thanks to everyone who have helped out one way or another (listed alphabetically):
208
+ - [James Bradbury](https://twitter.com/jekbradbury) for valuable assistance with debugging JAX issues.
209
+ - [Stella Biderman](https://www.stellabiderman.com), [Eric Hallahan](https://twitter.com/erichallahan), [Kurumuz](https://github.com/kurumuz/), and [Finetune](https://github.com/finetuneanon/) for converting the model to be compatible with the `transformers` package.
210
+ - [Leo Gao](https://twitter.com/nabla_theta) for running zero shot evaluations for the baseline models for the table.
211
+ - [Laurence Golding](https://github.com/researcher2/) for adding some features to the web demo.
212
+ - [Aran Komatsuzaki](https://twitter.com/arankomatsuzaki) for advice with experiment design and writing the blog posts.
213
+ - [Janko Prester](https://github.com/jprester/) for creating the web demo frontend.
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<|extratoken_14|>": 50270, "<|extratoken_121|>": 50377, "<|extratoken_3|>": 50259, "<|extratoken_25|>": 50281, "<|extratoken_101|>": 50357, "<|extratoken_138|>": 50394, "<|extratoken_10|>": 50266, "<|extratoken_21|>": 50277, "<|extratoken_32|>": 50288, "<|extratoken_46|>": 50302, "<|extratoken_22|>": 50278, "<|extratoken_40|>": 50296, "<|extratoken_96|>": 50352, "<|extratoken_92|>": 50348, "<|extratoken_95|>": 50351, "<|extratoken_141|>": 50397, "<|extratoken_78|>": 50334, "<|extratoken_86|>": 50342, "<|extratoken_56|>": 50312, "<|extratoken_124|>": 50380, "<|extratoken_127|>": 50383, "<|extratoken_122|>": 50378, "<|extratoken_123|>": 50379, "<|extratoken_111|>": 50367, "<|extratoken_93|>": 50349, "<|extratoken_130|>": 50386, "<|extratoken_113|>": 50369, "<|extratoken_50|>": 50306, "<|extratoken_97|>": 50353, "<|extratoken_1|>": 50257, "<|extratoken_55|>": 50311, "<|extratoken_34|>": 50290, "<|extratoken_143|>": 50399, "<|extratoken_62|>": 50318, "<|extratoken_74|>": 50330, "<|extratoken_136|>": 50392, "<|extratoken_117|>": 50373, "<|extratoken_38|>": 50294, "<|extratoken_120|>": 50376, "<|extratoken_39|>": 50295, "<|extratoken_65|>": 50321, "<|extratoken_29|>": 50285, "<|extratoken_104|>": 50360, "<|extratoken_13|>": 50269, "<|extratoken_5|>": 50261, "<|extratoken_107|>": 50363, "<|extratoken_19|>": 50275, "<|extratoken_84|>": 50340, "<|extratoken_77|>": 50333, "<|extratoken_135|>": 50391, "<|extratoken_24|>": 50280, "<|extratoken_134|>": 50390, "<|extratoken_15|>": 50271, "<|extratoken_67|>": 50323, "<|extratoken_89|>": 50345, "<|extratoken_2|>": 50258, "<|extratoken_73|>": 50329, "<|extratoken_129|>": 50385, "<|extratoken_126|>": 50382, "<|extratoken_30|>": 50286, "<|extratoken_41|>": 50297, "<|extratoken_28|>": 50284, "<|extratoken_114|>": 50370, "<|extratoken_128|>": 50384, "<|extratoken_118|>": 50374, "<|extratoken_131|>": 50387, "<|extratoken_68|>": 50324, "<|extratoken_125|>": 50381, "<|extratoken_103|>": 50359, "<|extratoken_8|>": 50264, "<|extratoken_64|>": 50320, "<|extratoken_52|>": 50308, "<|extratoken_45|>": 50301, "<|extratoken_43|>": 50299, "<|extratoken_18|>": 50274, "<|extratoken_139|>": 50395, "<|extratoken_85|>": 50341, "<|extratoken_88|>": 50344, "<|extratoken_63|>": 50319, "<|extratoken_4|>": 50260, "<|extratoken_48|>": 50304, "<|extratoken_112|>": 50368, "<|extratoken_17|>": 50273, "<|extratoken_49|>": 50305, "<|extratoken_108|>": 50364, "<|extratoken_110|>": 50366, "<|extratoken_42|>": 50298, "<|extratoken_70|>": 50326, "<|extratoken_6|>": 50262, "<|extratoken_35|>": 50291, "<|extratoken_23|>": 50279, "<|extratoken_66|>": 50322, "<|extratoken_60|>": 50316, "<|extratoken_71|>": 50327, "<|extratoken_51|>": 50307, "<|extratoken_133|>": 50389, "<|extratoken_20|>": 50276, "<|extratoken_76|>": 50332, "<|extratoken_81|>": 50337, "<|extratoken_142|>": 50398, "<|extratoken_116|>": 50372, "<|extratoken_57|>": 50313, "<|extratoken_75|>": 50331, "<|extratoken_37|>": 50293, "<|extratoken_33|>": 50289, "<|extratoken_16|>": 50272, "<|extratoken_61|>": 50317, "<|extratoken_7|>": 50263, "<|extratoken_12|>": 50268, "<|extratoken_36|>": 50292, "<|extratoken_80|>": 50336, "<|extratoken_98|>": 50354, "<|extratoken_105|>": 50361, "<|extratoken_91|>": 50347, "<|extratoken_53|>": 50309, "<|extratoken_137|>": 50393, "<|extratoken_9|>": 50265, "<|extratoken_79|>": 50335, "<|extratoken_83|>": 50339, "<|extratoken_109|>": 50365, "<|extratoken_99|>": 50355, "<|extratoken_140|>": 50396, "<|extratoken_72|>": 50328, "<|extratoken_11|>": 50267, "<|extratoken_94|>": 50350, "<|extratoken_26|>": 50282, "<|extratoken_59|>": 50315, "<|extratoken_106|>": 50362, "<|extratoken_115|>": 50371, "<|extratoken_58|>": 50314, "<|extratoken_90|>": 50346, "<|extratoken_31|>": 50287, "<|extratoken_102|>": 50358, "<|extratoken_47|>": 50303, "<|extratoken_100|>": 50356, "<|extratoken_82|>": 50338, "<|extratoken_44|>": 50300, "<|extratoken_69|>": 50325, "<|extratoken_54|>": 50310, "<|extratoken_132|>": 50388, "<|extratoken_27|>": 50283, "<|extratoken_87|>": 50343, "<|extratoken_119|>": 50375}
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4d5833e76247771a838debe34d1f7c10dd46b8d6c3e95c9619b0f7802e15627
3
- size 6056298684
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5187fa4b65af98a681d57fe88b97606612702bf0239e0faee4cf69c7a9d5a03e
3
+ size 12101781268
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}}
vocab.json ADDED
The diff for this file is too large to render. See raw diff