vernieuwe commited on
Commit
d17e854
1 Parent(s): 81b8f9d

Upload 10 files

Browse files
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ task_categories:
3
+ - text-generation
4
+ ---
5
+ # Description
6
+ This language model is the version 0.0 of a Gradio Coding Assistant. It is an instruction fine-tuned version of [StarCoder](https://huggingface.co/bigcode/starcoder) that is
7
+ designed to provide assistance to developers who use [gradio](https://www.gradio.app).
8
+
9
+ # Dataset
10
+ The dataset is multi-source. Its content comes from the following sources
11
+ - The stack
12
+
13
+ More precisely, we looked into [the-stack-dedup](https://huggingface.co/datasets/bigcode/the-stack-dedup) which contain codes permissive licenses. We shortlisted the files whose
14
+ content incorporated the keyword `gradio`.
15
+ - GitHub Issues
16
+
17
+ We scrapped all the issues of the official repository [the-gradio-app/gradio](https://github.com/gradio-app/gradio) and added them to our training dataset.
18
+ - Spaces on Hugging Face Hub
19
+
20
+ We used the [HuggingFace_Hub API](https://huggingface.co/docs/huggingface_hub/package_reference/hf_api) to scrape the data from the spaces which are designed with gradio. We kept track of those
21
+ with permissive licenses, namely MIT and Apache 2.0. This set of code was further deduplicated.
22
+
23
+ # Training setting and hyperparameters
24
+ For our fine-tuning, we decided to follow a 2-step strategy.
25
+ - Pretraining (Fine-tuning) with next token prediction on the previously built gradio dataset (this step should familiarize the model with the gradio syntax.).
26
+ - Instruction fine-tuning on an instruction dataset (this step should make the model conversational.).
27
+ For both steps, we made use of parameter-efficient fine-tuning via the library [PEFT](https://github.com/huggingface/peft), more precisely [LoRA](https://arxiv.org/abs/2106.09685). Our
28
+ training script is the famous [starcoder fine-tuning script](https://github.com/bigcode-project/starcoder).
29
+
30
+ ## Resources
31
+ Our training was done of 8 A100 GPUs of 80GB.
32
+
33
+ ## Pretraining
34
+ These are the parameters that we used :
35
+ - learning rate : 5e-4
36
+ - warmup_steps :
37
+ - gradient_accumulation_steps : 4
38
+ - batch_size : 1
39
+ - sequence length : 2048
40
+ - max_steps : 1000
41
+ - warmup_steps : 5
42
+ - weight_decay : 0.05
43
+ - learning rate scheduler : cosine
44
+
45
+ **LORA PARAMETERS** :
46
+ - r = 16
47
+ - alpha = 32
48
+ - dropout = 0.05
49
+
50
+ We stopped the training before the end and kept the *checkpoint-100* for the second step.
51
+
52
+ ## Fine-tuning
53
+ This step consisted into the instruction fine-tuning of the previous checkpoint. For that purpose, we used a modified version of [openassistant-guanaco](https://huggingface.co/datasets/timdettmers/openassistant-guanaco).
54
+ The template for the instruction fine-tuning was `Question: {question}\n\nAnswer: {answer}`. We used exactly the same parameters we used during the pretraining and we kept the *checkpoint-50*.
55
+
56
+ # Usage
57
+ The usage is straightforward and very similar to any other instruction fine-tuned model.
58
+
59
+ ```python
60
+ from transformers import AutoModelForCausalLM, AutoTokenizer
61
+
62
+ checkpoint_name="ArmelR/starcoder-gradio-v0"
63
+ model = AutoModelForCausalLM.from_pretrained(checkpoint_name)
64
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint_name)
65
+
66
+ prompt = "Create a gradio application that help to convert temperature in celcius into temperature in Fahrenheit"
67
+ inputs = tokenizer(f"Question: {prompt}\n\nAnswer: ", return_tensors="pt")
68
+
69
+ outputs = model.generate(
70
+ inputs["input_ids"],
71
+ temperature=0.2,
72
+ top_p=0.95,
73
+ max_new_tokens=200
74
+ )
75
+
76
+ input_len=len(inputs["input_ids"])
77
+ print(tokenizer.decode(outputs[0][input_len:]))
78
+ ```
79
+ # Updates
80
+ Gradio dataset `.filter(lambda x : ("gradio" in x["content"] or "gr." in x["content"]) and "streamlit" not in x["content"]`)
81
+ Guanaco `ArmelR/oasst1_guanaco`
82
+ - StarCoderbase (950, 1350)
83
+ - max_steps = 2000
84
+ - shuffle_buffer = 100
85
+ - batch_size = 2
86
+ - gradient_accumulation_steps = 4
87
+ - num_warmup_steps = 100
88
+ - weight_decay = 0.01
89
+ - StarCoderplus (2000)
90
+
91
+ Guanaco multi-turn (HuggingFaceH4/oasst1_en)
92
+ # More information
93
+ For further information, refer to [StarCoder](https://huggingface.co/bigcode/starcoder).
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bigcode/starcoderplus",
3
+ "activation_function": "gelu",
4
+ "architectures": [
5
+ "GPTBigCodeForCausalLM"
6
+ ],
7
+ "attention_softmax_in_fp32": true,
8
+ "attn_pdrop": 0.1,
9
+ "bos_token_id": 0,
10
+ "embd_pdrop": 0.1,
11
+ "eos_token_id": 0,
12
+ "inference_runner": 0,
13
+ "initializer_range": 0.02,
14
+ "layer_norm_epsilon": 1e-05,
15
+ "max_batch_size": null,
16
+ "max_sequence_length": null,
17
+ "model_type": "gpt_bigcode",
18
+ "multi_query": true,
19
+ "n_embd": 6144,
20
+ "n_head": 48,
21
+ "n_inner": 24576,
22
+ "n_layer": 40,
23
+ "n_positions": 8192,
24
+ "pad_key_length": true,
25
+ "pre_allocate_kv_cache": false,
26
+ "resid_pdrop": 0.1,
27
+ "scale_attention_softmax_in_fp32": true,
28
+ "scale_attn_weights": true,
29
+ "summary_activation": null,
30
+ "summary_first_dropout": 0.1,
31
+ "summary_proj_to_labels": true,
32
+ "summary_type": "cls_index",
33
+ "summary_use_proj": true,
34
+ "torch_dtype": "float16",
35
+ "transformers_version": "4.31.0",
36
+ "use_cache": true,
37
+ "validate_runner_input": true,
38
+ "vocab_size": 49152
39
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "transformers_version": "4.31.0"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 31034912768
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "pytorch_model-00001-of-00004.bin",
7
+ "transformer.h.0.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
8
+ "transformer.h.0.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
9
+ "transformer.h.0.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
10
+ "transformer.h.0.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
11
+ "transformer.h.0.ln_1.bias": "pytorch_model-00001-of-00004.bin",
12
+ "transformer.h.0.ln_1.weight": "pytorch_model-00001-of-00004.bin",
13
+ "transformer.h.0.ln_2.bias": "pytorch_model-00001-of-00004.bin",
14
+ "transformer.h.0.ln_2.weight": "pytorch_model-00001-of-00004.bin",
15
+ "transformer.h.0.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
16
+ "transformer.h.0.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
17
+ "transformer.h.0.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
18
+ "transformer.h.0.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
19
+ "transformer.h.1.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
20
+ "transformer.h.1.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
21
+ "transformer.h.1.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
22
+ "transformer.h.1.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
23
+ "transformer.h.1.ln_1.bias": "pytorch_model-00001-of-00004.bin",
24
+ "transformer.h.1.ln_1.weight": "pytorch_model-00001-of-00004.bin",
25
+ "transformer.h.1.ln_2.bias": "pytorch_model-00001-of-00004.bin",
26
+ "transformer.h.1.ln_2.weight": "pytorch_model-00001-of-00004.bin",
27
+ "transformer.h.1.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
28
+ "transformer.h.1.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
29
+ "transformer.h.1.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
30
+ "transformer.h.1.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
31
+ "transformer.h.10.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
32
+ "transformer.h.10.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
33
+ "transformer.h.10.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
34
+ "transformer.h.10.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
35
+ "transformer.h.10.ln_1.bias": "pytorch_model-00001-of-00004.bin",
36
+ "transformer.h.10.ln_1.weight": "pytorch_model-00001-of-00004.bin",
37
+ "transformer.h.10.ln_2.bias": "pytorch_model-00001-of-00004.bin",
38
+ "transformer.h.10.ln_2.weight": "pytorch_model-00001-of-00004.bin",
39
+ "transformer.h.10.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
40
+ "transformer.h.10.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
41
+ "transformer.h.10.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
42
+ "transformer.h.10.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
43
+ "transformer.h.11.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
44
+ "transformer.h.11.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
45
+ "transformer.h.11.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
46
+ "transformer.h.11.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
47
+ "transformer.h.11.ln_1.bias": "pytorch_model-00001-of-00004.bin",
48
+ "transformer.h.11.ln_1.weight": "pytorch_model-00001-of-00004.bin",
49
+ "transformer.h.11.ln_2.bias": "pytorch_model-00001-of-00004.bin",
50
+ "transformer.h.11.ln_2.weight": "pytorch_model-00001-of-00004.bin",
51
+ "transformer.h.11.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
52
+ "transformer.h.11.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
53
+ "transformer.h.11.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
54
+ "transformer.h.11.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
55
+ "transformer.h.12.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
56
+ "transformer.h.12.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
57
+ "transformer.h.12.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
58
+ "transformer.h.12.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
59
+ "transformer.h.12.ln_1.bias": "pytorch_model-00001-of-00004.bin",
60
+ "transformer.h.12.ln_1.weight": "pytorch_model-00001-of-00004.bin",
61
+ "transformer.h.12.ln_2.bias": "pytorch_model-00001-of-00004.bin",
62
+ "transformer.h.12.ln_2.weight": "pytorch_model-00001-of-00004.bin",
63
+ "transformer.h.12.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
64
+ "transformer.h.12.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
65
+ "transformer.h.12.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
66
+ "transformer.h.12.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
67
+ "transformer.h.13.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
68
+ "transformer.h.13.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
69
+ "transformer.h.13.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
70
+ "transformer.h.13.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
71
+ "transformer.h.13.ln_1.bias": "pytorch_model-00002-of-00004.bin",
72
+ "transformer.h.13.ln_1.weight": "pytorch_model-00002-of-00004.bin",
73
+ "transformer.h.13.ln_2.bias": "pytorch_model-00002-of-00004.bin",
74
+ "transformer.h.13.ln_2.weight": "pytorch_model-00002-of-00004.bin",
75
+ "transformer.h.13.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
76
+ "transformer.h.13.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
77
+ "transformer.h.13.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
78
+ "transformer.h.13.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
79
+ "transformer.h.14.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
80
+ "transformer.h.14.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
81
+ "transformer.h.14.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
82
+ "transformer.h.14.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
83
+ "transformer.h.14.ln_1.bias": "pytorch_model-00002-of-00004.bin",
84
+ "transformer.h.14.ln_1.weight": "pytorch_model-00002-of-00004.bin",
85
+ "transformer.h.14.ln_2.bias": "pytorch_model-00002-of-00004.bin",
86
+ "transformer.h.14.ln_2.weight": "pytorch_model-00002-of-00004.bin",
87
+ "transformer.h.14.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
88
+ "transformer.h.14.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
89
+ "transformer.h.14.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
90
+ "transformer.h.14.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
91
+ "transformer.h.15.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
92
+ "transformer.h.15.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
93
+ "transformer.h.15.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
94
+ "transformer.h.15.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
95
+ "transformer.h.15.ln_1.bias": "pytorch_model-00002-of-00004.bin",
96
+ "transformer.h.15.ln_1.weight": "pytorch_model-00002-of-00004.bin",
97
+ "transformer.h.15.ln_2.bias": "pytorch_model-00002-of-00004.bin",
98
+ "transformer.h.15.ln_2.weight": "pytorch_model-00002-of-00004.bin",
99
+ "transformer.h.15.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
100
+ "transformer.h.15.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
101
+ "transformer.h.15.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
102
+ "transformer.h.15.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
103
+ "transformer.h.16.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
104
+ "transformer.h.16.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
105
+ "transformer.h.16.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
106
+ "transformer.h.16.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
107
+ "transformer.h.16.ln_1.bias": "pytorch_model-00002-of-00004.bin",
108
+ "transformer.h.16.ln_1.weight": "pytorch_model-00002-of-00004.bin",
109
+ "transformer.h.16.ln_2.bias": "pytorch_model-00002-of-00004.bin",
110
+ "transformer.h.16.ln_2.weight": "pytorch_model-00002-of-00004.bin",
111
+ "transformer.h.16.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
112
+ "transformer.h.16.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
113
+ "transformer.h.16.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
114
+ "transformer.h.16.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
115
+ "transformer.h.17.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
116
+ "transformer.h.17.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
117
+ "transformer.h.17.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
118
+ "transformer.h.17.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
119
+ "transformer.h.17.ln_1.bias": "pytorch_model-00002-of-00004.bin",
120
+ "transformer.h.17.ln_1.weight": "pytorch_model-00002-of-00004.bin",
121
+ "transformer.h.17.ln_2.bias": "pytorch_model-00002-of-00004.bin",
122
+ "transformer.h.17.ln_2.weight": "pytorch_model-00002-of-00004.bin",
123
+ "transformer.h.17.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
124
+ "transformer.h.17.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
125
+ "transformer.h.17.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
126
+ "transformer.h.17.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
127
+ "transformer.h.18.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
128
+ "transformer.h.18.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
129
+ "transformer.h.18.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
130
+ "transformer.h.18.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
131
+ "transformer.h.18.ln_1.bias": "pytorch_model-00002-of-00004.bin",
132
+ "transformer.h.18.ln_1.weight": "pytorch_model-00002-of-00004.bin",
133
+ "transformer.h.18.ln_2.bias": "pytorch_model-00002-of-00004.bin",
134
+ "transformer.h.18.ln_2.weight": "pytorch_model-00002-of-00004.bin",
135
+ "transformer.h.18.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
136
+ "transformer.h.18.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
137
+ "transformer.h.18.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
138
+ "transformer.h.18.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
139
+ "transformer.h.19.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
140
+ "transformer.h.19.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
141
+ "transformer.h.19.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
142
+ "transformer.h.19.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
143
+ "transformer.h.19.ln_1.bias": "pytorch_model-00002-of-00004.bin",
144
+ "transformer.h.19.ln_1.weight": "pytorch_model-00002-of-00004.bin",
145
+ "transformer.h.19.ln_2.bias": "pytorch_model-00002-of-00004.bin",
146
+ "transformer.h.19.ln_2.weight": "pytorch_model-00002-of-00004.bin",
147
+ "transformer.h.19.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
148
+ "transformer.h.19.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
149
+ "transformer.h.19.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
150
+ "transformer.h.19.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
151
+ "transformer.h.2.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
152
+ "transformer.h.2.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
153
+ "transformer.h.2.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
154
+ "transformer.h.2.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
155
+ "transformer.h.2.ln_1.bias": "pytorch_model-00001-of-00004.bin",
156
+ "transformer.h.2.ln_1.weight": "pytorch_model-00001-of-00004.bin",
157
+ "transformer.h.2.ln_2.bias": "pytorch_model-00001-of-00004.bin",
158
+ "transformer.h.2.ln_2.weight": "pytorch_model-00001-of-00004.bin",
159
+ "transformer.h.2.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
160
+ "transformer.h.2.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
161
+ "transformer.h.2.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
162
+ "transformer.h.2.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
163
+ "transformer.h.20.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
164
+ "transformer.h.20.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
165
+ "transformer.h.20.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
166
+ "transformer.h.20.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
167
+ "transformer.h.20.ln_1.bias": "pytorch_model-00002-of-00004.bin",
168
+ "transformer.h.20.ln_1.weight": "pytorch_model-00002-of-00004.bin",
169
+ "transformer.h.20.ln_2.bias": "pytorch_model-00002-of-00004.bin",
170
+ "transformer.h.20.ln_2.weight": "pytorch_model-00002-of-00004.bin",
171
+ "transformer.h.20.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
172
+ "transformer.h.20.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
173
+ "transformer.h.20.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
174
+ "transformer.h.20.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
175
+ "transformer.h.21.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
176
+ "transformer.h.21.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
177
+ "transformer.h.21.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
178
+ "transformer.h.21.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
179
+ "transformer.h.21.ln_1.bias": "pytorch_model-00002-of-00004.bin",
180
+ "transformer.h.21.ln_1.weight": "pytorch_model-00002-of-00004.bin",
181
+ "transformer.h.21.ln_2.bias": "pytorch_model-00002-of-00004.bin",
182
+ "transformer.h.21.ln_2.weight": "pytorch_model-00002-of-00004.bin",
183
+ "transformer.h.21.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
184
+ "transformer.h.21.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
185
+ "transformer.h.21.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
186
+ "transformer.h.21.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
187
+ "transformer.h.22.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
188
+ "transformer.h.22.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
189
+ "transformer.h.22.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
190
+ "transformer.h.22.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
191
+ "transformer.h.22.ln_1.bias": "pytorch_model-00002-of-00004.bin",
192
+ "transformer.h.22.ln_1.weight": "pytorch_model-00002-of-00004.bin",
193
+ "transformer.h.22.ln_2.bias": "pytorch_model-00002-of-00004.bin",
194
+ "transformer.h.22.ln_2.weight": "pytorch_model-00002-of-00004.bin",
195
+ "transformer.h.22.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
196
+ "transformer.h.22.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
197
+ "transformer.h.22.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
198
+ "transformer.h.22.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
199
+ "transformer.h.23.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
200
+ "transformer.h.23.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
201
+ "transformer.h.23.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
202
+ "transformer.h.23.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
203
+ "transformer.h.23.ln_1.bias": "pytorch_model-00002-of-00004.bin",
204
+ "transformer.h.23.ln_1.weight": "pytorch_model-00002-of-00004.bin",
205
+ "transformer.h.23.ln_2.bias": "pytorch_model-00002-of-00004.bin",
206
+ "transformer.h.23.ln_2.weight": "pytorch_model-00002-of-00004.bin",
207
+ "transformer.h.23.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
208
+ "transformer.h.23.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
209
+ "transformer.h.23.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
210
+ "transformer.h.23.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
211
+ "transformer.h.24.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
212
+ "transformer.h.24.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
213
+ "transformer.h.24.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
214
+ "transformer.h.24.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
215
+ "transformer.h.24.ln_1.bias": "pytorch_model-00002-of-00004.bin",
216
+ "transformer.h.24.ln_1.weight": "pytorch_model-00002-of-00004.bin",
217
+ "transformer.h.24.ln_2.bias": "pytorch_model-00002-of-00004.bin",
218
+ "transformer.h.24.ln_2.weight": "pytorch_model-00002-of-00004.bin",
219
+ "transformer.h.24.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
220
+ "transformer.h.24.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
221
+ "transformer.h.24.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
222
+ "transformer.h.24.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
223
+ "transformer.h.25.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
224
+ "transformer.h.25.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
225
+ "transformer.h.25.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
226
+ "transformer.h.25.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
227
+ "transformer.h.25.ln_1.bias": "pytorch_model-00002-of-00004.bin",
228
+ "transformer.h.25.ln_1.weight": "pytorch_model-00002-of-00004.bin",
229
+ "transformer.h.25.ln_2.bias": "pytorch_model-00002-of-00004.bin",
230
+ "transformer.h.25.ln_2.weight": "pytorch_model-00002-of-00004.bin",
231
+ "transformer.h.25.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
232
+ "transformer.h.25.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
233
+ "transformer.h.25.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
234
+ "transformer.h.25.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
235
+ "transformer.h.26.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
236
+ "transformer.h.26.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
237
+ "transformer.h.26.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
238
+ "transformer.h.26.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
239
+ "transformer.h.26.ln_1.bias": "pytorch_model-00003-of-00004.bin",
240
+ "transformer.h.26.ln_1.weight": "pytorch_model-00003-of-00004.bin",
241
+ "transformer.h.26.ln_2.bias": "pytorch_model-00003-of-00004.bin",
242
+ "transformer.h.26.ln_2.weight": "pytorch_model-00003-of-00004.bin",
243
+ "transformer.h.26.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
244
+ "transformer.h.26.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
245
+ "transformer.h.26.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
246
+ "transformer.h.26.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
247
+ "transformer.h.27.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
248
+ "transformer.h.27.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
249
+ "transformer.h.27.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
250
+ "transformer.h.27.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
251
+ "transformer.h.27.ln_1.bias": "pytorch_model-00003-of-00004.bin",
252
+ "transformer.h.27.ln_1.weight": "pytorch_model-00003-of-00004.bin",
253
+ "transformer.h.27.ln_2.bias": "pytorch_model-00003-of-00004.bin",
254
+ "transformer.h.27.ln_2.weight": "pytorch_model-00003-of-00004.bin",
255
+ "transformer.h.27.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
256
+ "transformer.h.27.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
257
+ "transformer.h.27.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
258
+ "transformer.h.27.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
259
+ "transformer.h.28.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
260
+ "transformer.h.28.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
261
+ "transformer.h.28.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
262
+ "transformer.h.28.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
263
+ "transformer.h.28.ln_1.bias": "pytorch_model-00003-of-00004.bin",
264
+ "transformer.h.28.ln_1.weight": "pytorch_model-00003-of-00004.bin",
265
+ "transformer.h.28.ln_2.bias": "pytorch_model-00003-of-00004.bin",
266
+ "transformer.h.28.ln_2.weight": "pytorch_model-00003-of-00004.bin",
267
+ "transformer.h.28.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
268
+ "transformer.h.28.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
269
+ "transformer.h.28.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
270
+ "transformer.h.28.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
271
+ "transformer.h.29.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
272
+ "transformer.h.29.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
273
+ "transformer.h.29.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
274
+ "transformer.h.29.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
275
+ "transformer.h.29.ln_1.bias": "pytorch_model-00003-of-00004.bin",
276
+ "transformer.h.29.ln_1.weight": "pytorch_model-00003-of-00004.bin",
277
+ "transformer.h.29.ln_2.bias": "pytorch_model-00003-of-00004.bin",
278
+ "transformer.h.29.ln_2.weight": "pytorch_model-00003-of-00004.bin",
279
+ "transformer.h.29.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
280
+ "transformer.h.29.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
281
+ "transformer.h.29.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
282
+ "transformer.h.29.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
283
+ "transformer.h.3.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
284
+ "transformer.h.3.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
285
+ "transformer.h.3.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
286
+ "transformer.h.3.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
287
+ "transformer.h.3.ln_1.bias": "pytorch_model-00001-of-00004.bin",
288
+ "transformer.h.3.ln_1.weight": "pytorch_model-00001-of-00004.bin",
289
+ "transformer.h.3.ln_2.bias": "pytorch_model-00001-of-00004.bin",
290
+ "transformer.h.3.ln_2.weight": "pytorch_model-00001-of-00004.bin",
291
+ "transformer.h.3.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
292
+ "transformer.h.3.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
293
+ "transformer.h.3.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
294
+ "transformer.h.3.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
295
+ "transformer.h.30.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
296
+ "transformer.h.30.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
297
+ "transformer.h.30.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
298
+ "transformer.h.30.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
299
+ "transformer.h.30.ln_1.bias": "pytorch_model-00003-of-00004.bin",
300
+ "transformer.h.30.ln_1.weight": "pytorch_model-00003-of-00004.bin",
301
+ "transformer.h.30.ln_2.bias": "pytorch_model-00003-of-00004.bin",
302
+ "transformer.h.30.ln_2.weight": "pytorch_model-00003-of-00004.bin",
303
+ "transformer.h.30.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
304
+ "transformer.h.30.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
305
+ "transformer.h.30.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
306
+ "transformer.h.30.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
307
+ "transformer.h.31.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
308
+ "transformer.h.31.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
309
+ "transformer.h.31.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
310
+ "transformer.h.31.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
311
+ "transformer.h.31.ln_1.bias": "pytorch_model-00003-of-00004.bin",
312
+ "transformer.h.31.ln_1.weight": "pytorch_model-00003-of-00004.bin",
313
+ "transformer.h.31.ln_2.bias": "pytorch_model-00003-of-00004.bin",
314
+ "transformer.h.31.ln_2.weight": "pytorch_model-00003-of-00004.bin",
315
+ "transformer.h.31.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
316
+ "transformer.h.31.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
317
+ "transformer.h.31.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
318
+ "transformer.h.31.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
319
+ "transformer.h.32.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
320
+ "transformer.h.32.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
321
+ "transformer.h.32.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
322
+ "transformer.h.32.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
323
+ "transformer.h.32.ln_1.bias": "pytorch_model-00003-of-00004.bin",
324
+ "transformer.h.32.ln_1.weight": "pytorch_model-00003-of-00004.bin",
325
+ "transformer.h.32.ln_2.bias": "pytorch_model-00003-of-00004.bin",
326
+ "transformer.h.32.ln_2.weight": "pytorch_model-00003-of-00004.bin",
327
+ "transformer.h.32.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
328
+ "transformer.h.32.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
329
+ "transformer.h.32.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
330
+ "transformer.h.32.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
331
+ "transformer.h.33.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
332
+ "transformer.h.33.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
333
+ "transformer.h.33.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
334
+ "transformer.h.33.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
335
+ "transformer.h.33.ln_1.bias": "pytorch_model-00003-of-00004.bin",
336
+ "transformer.h.33.ln_1.weight": "pytorch_model-00003-of-00004.bin",
337
+ "transformer.h.33.ln_2.bias": "pytorch_model-00003-of-00004.bin",
338
+ "transformer.h.33.ln_2.weight": "pytorch_model-00003-of-00004.bin",
339
+ "transformer.h.33.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
340
+ "transformer.h.33.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
341
+ "transformer.h.33.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
342
+ "transformer.h.33.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
343
+ "transformer.h.34.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
344
+ "transformer.h.34.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
345
+ "transformer.h.34.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
346
+ "transformer.h.34.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
347
+ "transformer.h.34.ln_1.bias": "pytorch_model-00003-of-00004.bin",
348
+ "transformer.h.34.ln_1.weight": "pytorch_model-00003-of-00004.bin",
349
+ "transformer.h.34.ln_2.bias": "pytorch_model-00003-of-00004.bin",
350
+ "transformer.h.34.ln_2.weight": "pytorch_model-00003-of-00004.bin",
351
+ "transformer.h.34.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
352
+ "transformer.h.34.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
353
+ "transformer.h.34.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
354
+ "transformer.h.34.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
355
+ "transformer.h.35.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
356
+ "transformer.h.35.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
357
+ "transformer.h.35.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
358
+ "transformer.h.35.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
359
+ "transformer.h.35.ln_1.bias": "pytorch_model-00003-of-00004.bin",
360
+ "transformer.h.35.ln_1.weight": "pytorch_model-00003-of-00004.bin",
361
+ "transformer.h.35.ln_2.bias": "pytorch_model-00003-of-00004.bin",
362
+ "transformer.h.35.ln_2.weight": "pytorch_model-00003-of-00004.bin",
363
+ "transformer.h.35.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
364
+ "transformer.h.35.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
365
+ "transformer.h.35.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
366
+ "transformer.h.35.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
367
+ "transformer.h.36.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
368
+ "transformer.h.36.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
369
+ "transformer.h.36.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
370
+ "transformer.h.36.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
371
+ "transformer.h.36.ln_1.bias": "pytorch_model-00003-of-00004.bin",
372
+ "transformer.h.36.ln_1.weight": "pytorch_model-00003-of-00004.bin",
373
+ "transformer.h.36.ln_2.bias": "pytorch_model-00003-of-00004.bin",
374
+ "transformer.h.36.ln_2.weight": "pytorch_model-00003-of-00004.bin",
375
+ "transformer.h.36.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
376
+ "transformer.h.36.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
377
+ "transformer.h.36.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
378
+ "transformer.h.36.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
379
+ "transformer.h.37.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
380
+ "transformer.h.37.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
381
+ "transformer.h.37.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
382
+ "transformer.h.37.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
383
+ "transformer.h.37.ln_1.bias": "pytorch_model-00003-of-00004.bin",
384
+ "transformer.h.37.ln_1.weight": "pytorch_model-00003-of-00004.bin",
385
+ "transformer.h.37.ln_2.bias": "pytorch_model-00003-of-00004.bin",
386
+ "transformer.h.37.ln_2.weight": "pytorch_model-00003-of-00004.bin",
387
+ "transformer.h.37.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
388
+ "transformer.h.37.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
389
+ "transformer.h.37.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
390
+ "transformer.h.37.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
391
+ "transformer.h.38.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
392
+ "transformer.h.38.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
393
+ "transformer.h.38.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
394
+ "transformer.h.38.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
395
+ "transformer.h.38.ln_1.bias": "pytorch_model-00003-of-00004.bin",
396
+ "transformer.h.38.ln_1.weight": "pytorch_model-00003-of-00004.bin",
397
+ "transformer.h.38.ln_2.bias": "pytorch_model-00003-of-00004.bin",
398
+ "transformer.h.38.ln_2.weight": "pytorch_model-00003-of-00004.bin",
399
+ "transformer.h.38.mlp.c_fc.bias": "pytorch_model-00004-of-00004.bin",
400
+ "transformer.h.38.mlp.c_fc.weight": "pytorch_model-00004-of-00004.bin",
401
+ "transformer.h.38.mlp.c_proj.bias": "pytorch_model-00004-of-00004.bin",
402
+ "transformer.h.38.mlp.c_proj.weight": "pytorch_model-00004-of-00004.bin",
403
+ "transformer.h.39.attn.c_attn.bias": "pytorch_model-00004-of-00004.bin",
404
+ "transformer.h.39.attn.c_attn.weight": "pytorch_model-00004-of-00004.bin",
405
+ "transformer.h.39.attn.c_proj.bias": "pytorch_model-00004-of-00004.bin",
406
+ "transformer.h.39.attn.c_proj.weight": "pytorch_model-00004-of-00004.bin",
407
+ "transformer.h.39.ln_1.bias": "pytorch_model-00004-of-00004.bin",
408
+ "transformer.h.39.ln_1.weight": "pytorch_model-00004-of-00004.bin",
409
+ "transformer.h.39.ln_2.bias": "pytorch_model-00004-of-00004.bin",
410
+ "transformer.h.39.ln_2.weight": "pytorch_model-00004-of-00004.bin",
411
+ "transformer.h.39.mlp.c_fc.bias": "pytorch_model-00004-of-00004.bin",
412
+ "transformer.h.39.mlp.c_fc.weight": "pytorch_model-00004-of-00004.bin",
413
+ "transformer.h.39.mlp.c_proj.bias": "pytorch_model-00004-of-00004.bin",
414
+ "transformer.h.39.mlp.c_proj.weight": "pytorch_model-00004-of-00004.bin",
415
+ "transformer.h.4.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
416
+ "transformer.h.4.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
417
+ "transformer.h.4.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
418
+ "transformer.h.4.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
419
+ "transformer.h.4.ln_1.bias": "pytorch_model-00001-of-00004.bin",
420
+ "transformer.h.4.ln_1.weight": "pytorch_model-00001-of-00004.bin",
421
+ "transformer.h.4.ln_2.bias": "pytorch_model-00001-of-00004.bin",
422
+ "transformer.h.4.ln_2.weight": "pytorch_model-00001-of-00004.bin",
423
+ "transformer.h.4.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
424
+ "transformer.h.4.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
425
+ "transformer.h.4.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
426
+ "transformer.h.4.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
427
+ "transformer.h.5.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
428
+ "transformer.h.5.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
429
+ "transformer.h.5.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
430
+ "transformer.h.5.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
431
+ "transformer.h.5.ln_1.bias": "pytorch_model-00001-of-00004.bin",
432
+ "transformer.h.5.ln_1.weight": "pytorch_model-00001-of-00004.bin",
433
+ "transformer.h.5.ln_2.bias": "pytorch_model-00001-of-00004.bin",
434
+ "transformer.h.5.ln_2.weight": "pytorch_model-00001-of-00004.bin",
435
+ "transformer.h.5.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
436
+ "transformer.h.5.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
437
+ "transformer.h.5.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
438
+ "transformer.h.5.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
439
+ "transformer.h.6.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
440
+ "transformer.h.6.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
441
+ "transformer.h.6.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
442
+ "transformer.h.6.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
443
+ "transformer.h.6.ln_1.bias": "pytorch_model-00001-of-00004.bin",
444
+ "transformer.h.6.ln_1.weight": "pytorch_model-00001-of-00004.bin",
445
+ "transformer.h.6.ln_2.bias": "pytorch_model-00001-of-00004.bin",
446
+ "transformer.h.6.ln_2.weight": "pytorch_model-00001-of-00004.bin",
447
+ "transformer.h.6.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
448
+ "transformer.h.6.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
449
+ "transformer.h.6.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
450
+ "transformer.h.6.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
451
+ "transformer.h.7.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
452
+ "transformer.h.7.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
453
+ "transformer.h.7.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
454
+ "transformer.h.7.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
455
+ "transformer.h.7.ln_1.bias": "pytorch_model-00001-of-00004.bin",
456
+ "transformer.h.7.ln_1.weight": "pytorch_model-00001-of-00004.bin",
457
+ "transformer.h.7.ln_2.bias": "pytorch_model-00001-of-00004.bin",
458
+ "transformer.h.7.ln_2.weight": "pytorch_model-00001-of-00004.bin",
459
+ "transformer.h.7.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
460
+ "transformer.h.7.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
461
+ "transformer.h.7.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
462
+ "transformer.h.7.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
463
+ "transformer.h.8.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
464
+ "transformer.h.8.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
465
+ "transformer.h.8.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
466
+ "transformer.h.8.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
467
+ "transformer.h.8.ln_1.bias": "pytorch_model-00001-of-00004.bin",
468
+ "transformer.h.8.ln_1.weight": "pytorch_model-00001-of-00004.bin",
469
+ "transformer.h.8.ln_2.bias": "pytorch_model-00001-of-00004.bin",
470
+ "transformer.h.8.ln_2.weight": "pytorch_model-00001-of-00004.bin",
471
+ "transformer.h.8.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
472
+ "transformer.h.8.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
473
+ "transformer.h.8.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
474
+ "transformer.h.8.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
475
+ "transformer.h.9.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
476
+ "transformer.h.9.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
477
+ "transformer.h.9.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
478
+ "transformer.h.9.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
479
+ "transformer.h.9.ln_1.bias": "pytorch_model-00001-of-00004.bin",
480
+ "transformer.h.9.ln_1.weight": "pytorch_model-00001-of-00004.bin",
481
+ "transformer.h.9.ln_2.bias": "pytorch_model-00001-of-00004.bin",
482
+ "transformer.h.9.ln_2.weight": "pytorch_model-00001-of-00004.bin",
483
+ "transformer.h.9.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
484
+ "transformer.h.9.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
485
+ "transformer.h.9.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
486
+ "transformer.h.9.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
487
+ "transformer.ln_f.bias": "pytorch_model-00004-of-00004.bin",
488
+ "transformer.ln_f.weight": "pytorch_model-00004-of-00004.bin",
489
+ "transformer.wpe.weight": "pytorch_model-00001-of-00004.bin",
490
+ "transformer.wte.weight": "pytorch_model-00001-of-00004.bin"
491
+ }
492
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "<fim_prefix>",
5
+ "<fim_middle>",
6
+ "<fim_suffix>",
7
+ "<fim_pad>",
8
+ "<filename>",
9
+ "<gh_stars>",
10
+ "<issue_start>",
11
+ "<issue_comment>",
12
+ "<issue_closed>",
13
+ "<jupyter_start>",
14
+ "<jupyter_text>",
15
+ "<jupyter_code>",
16
+ "<jupyter_output>",
17
+ "<empty_output>",
18
+ "<commit_before>",
19
+ "<commit_msg>",
20
+ "<commit_after>",
21
+ "<reponame>"
22
+ ],
23
+ "bos_token": "<|endoftext|>",
24
+ "eos_token": "<|endoftext|>",
25
+ "unk_token": "<|endoftext|>"
26
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "additional_special_tokens": [
4
+ "<|endoftext|>",
5
+ "<fim_prefix>",
6
+ "<fim_middle>",
7
+ "<fim_suffix>",
8
+ "<fim_pad>",
9
+ "<filename>",
10
+ "<gh_stars>",
11
+ "<issue_start>",
12
+ "<issue_comment>",
13
+ "<issue_closed>",
14
+ "<jupyter_start>",
15
+ "<jupyter_text>",
16
+ "<jupyter_code>",
17
+ "<jupyter_output>",
18
+ "<empty_output>",
19
+ "<commit_before>",
20
+ "<commit_msg>",
21
+ "<commit_after>",
22
+ "<reponame>"
23
+ ],
24
+ "bos_token": "<|endoftext|>",
25
+ "clean_up_tokenization_spaces": true,
26
+ "eos_token": "<|endoftext|>",
27
+ "model_max_length": 1000000000000000019884624838656,
28
+ "tokenizer_class": "GPT2Tokenizer",
29
+ "unk_token": "<|endoftext|>",
30
+ "vocab_size": 49152
31
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff