mluca commited on
Commit
38198ce
·
1 Parent(s): 31651b3

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - autotrain
4
+ - text-generation
5
+ widget:
6
+ - text: "I love AutoTrain because "
7
+ ---
8
+
9
+ # Model Trained Using AutoTrain
adapter_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "gpt2-medium",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 16,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "c_attn"
18
+ ],
19
+ "task_type": "CAUSAL_LM"
20
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3720265f63f84254b115e8a91a70faa4d66d4e8b3361116fc5ce3b170c7582b
3
+ size 6308225
checkpoint-102/README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float16
17
+
18
+ The following `bitsandbytes` quantization config was used during training:
19
+ - load_in_8bit: False
20
+ - load_in_4bit: True
21
+ - llm_int8_threshold: 6.0
22
+ - llm_int8_skip_modules: None
23
+ - llm_int8_enable_fp32_cpu_offload: False
24
+ - llm_int8_has_fp16_weight: False
25
+ - bnb_4bit_quant_type: nf4
26
+ - bnb_4bit_use_double_quant: False
27
+ - bnb_4bit_compute_dtype: float16
28
+ ### Framework versions
29
+
30
+ - PEFT 0.4.0
31
+
32
+ - PEFT 0.4.0
checkpoint-102/adapter_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "gpt2-medium",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 16,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "c_attn"
18
+ ],
19
+ "task_type": "CAUSAL_LM"
20
+ }
checkpoint-102/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3720265f63f84254b115e8a91a70faa4d66d4e8b3361116fc5ce3b170c7582b
3
+ size 6308225
checkpoint-102/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-102/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e30cd7e5fdb8fca2827e61df4bfa88f2c4df06021e4503daf485e875bf8345e0
3
+ size 12623109
checkpoint-102/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c152074a486243089e4fc0fdee0a373a30fb0e0a6e40eb5fd0d36fdafc97a155
3
+ size 443
checkpoint-102/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6e1ec1ef1f7c01e2b2639b0504755dda6b01eec34f36e787b71285d023d5f62
3
+ size 14575
checkpoint-102/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52f9984fd1b97fd57f7922d85f53e112a12c7b483b0a55b762927ced3bbc11dd
3
+ size 627
checkpoint-102/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
checkpoint-102/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-102/tokenizer_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "clean_up_tokenization_spaces": true,
5
+ "eos_token": "<|endoftext|>",
6
+ "model_max_length": 1024,
7
+ "tokenizer_class": "GPT2Tokenizer",
8
+ "unk_token": "<|endoftext|>"
9
+ }
checkpoint-102/trainer_state.json ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "global_step": 102,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.18,
12
+ "learning_rate": 0.00010909090909090909,
13
+ "loss": 0.2103,
14
+ "step": 6
15
+ },
16
+ {
17
+ "epoch": 0.35,
18
+ "learning_rate": 0.0001978021978021978,
19
+ "loss": 0.1736,
20
+ "step": 12
21
+ },
22
+ {
23
+ "epoch": 0.53,
24
+ "learning_rate": 0.00018461538461538463,
25
+ "loss": 0.1319,
26
+ "step": 18
27
+ },
28
+ {
29
+ "epoch": 0.71,
30
+ "learning_rate": 0.00017142857142857143,
31
+ "loss": 0.1167,
32
+ "step": 24
33
+ },
34
+ {
35
+ "epoch": 0.88,
36
+ "learning_rate": 0.00015824175824175824,
37
+ "loss": 0.1057,
38
+ "step": 30
39
+ },
40
+ {
41
+ "epoch": 1.06,
42
+ "learning_rate": 0.00014505494505494506,
43
+ "loss": 0.0955,
44
+ "step": 36
45
+ },
46
+ {
47
+ "epoch": 1.24,
48
+ "learning_rate": 0.00013186813186813188,
49
+ "loss": 0.0915,
50
+ "step": 42
51
+ },
52
+ {
53
+ "epoch": 1.41,
54
+ "learning_rate": 0.00011868131868131869,
55
+ "loss": 0.0833,
56
+ "step": 48
57
+ },
58
+ {
59
+ "epoch": 1.59,
60
+ "learning_rate": 0.0001054945054945055,
61
+ "loss": 0.0807,
62
+ "step": 54
63
+ },
64
+ {
65
+ "epoch": 1.76,
66
+ "learning_rate": 9.230769230769232e-05,
67
+ "loss": 0.0751,
68
+ "step": 60
69
+ },
70
+ {
71
+ "epoch": 1.94,
72
+ "learning_rate": 7.912087912087912e-05,
73
+ "loss": 0.0723,
74
+ "step": 66
75
+ },
76
+ {
77
+ "epoch": 2.12,
78
+ "learning_rate": 6.593406593406594e-05,
79
+ "loss": 0.0655,
80
+ "step": 72
81
+ },
82
+ {
83
+ "epoch": 2.29,
84
+ "learning_rate": 5.274725274725275e-05,
85
+ "loss": 0.0635,
86
+ "step": 78
87
+ },
88
+ {
89
+ "epoch": 2.47,
90
+ "learning_rate": 3.956043956043956e-05,
91
+ "loss": 0.0605,
92
+ "step": 84
93
+ },
94
+ {
95
+ "epoch": 2.65,
96
+ "learning_rate": 2.6373626373626374e-05,
97
+ "loss": 0.0592,
98
+ "step": 90
99
+ },
100
+ {
101
+ "epoch": 2.82,
102
+ "learning_rate": 1.3186813186813187e-05,
103
+ "loss": 0.0564,
104
+ "step": 96
105
+ },
106
+ {
107
+ "epoch": 3.0,
108
+ "learning_rate": 0.0,
109
+ "loss": 0.0547,
110
+ "step": 102
111
+ }
112
+ ],
113
+ "max_steps": 102,
114
+ "num_train_epochs": 3,
115
+ "total_flos": 191627834425344.0,
116
+ "trial_name": null,
117
+ "trial_params": null
118
+ }
checkpoint-102/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5ca631338d505448edc0b68c9d38d7d97b0e1fdccbfa5d178d1d75631e1b75a
3
+ size 3963
checkpoint-102/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2-medium",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 1024,
16
+ "n_head": 16,
17
+ "n_inner": null,
18
+ "n_layer": 24,
19
+ "n_positions": 1024,
20
+ "n_special": 0,
21
+ "predict_special_tokens": true,
22
+ "reorder_and_upcast_attn": false,
23
+ "resid_pdrop": 0.1,
24
+ "scale_attn_by_inverse_layer_idx": false,
25
+ "scale_attn_weights": true,
26
+ "summary_activation": null,
27
+ "summary_first_dropout": 0.1,
28
+ "summary_proj_to_labels": true,
29
+ "summary_type": "cls_index",
30
+ "summary_use_proj": true,
31
+ "task_specific_params": {
32
+ "text-generation": {
33
+ "do_sample": true,
34
+ "max_length": 50
35
+ }
36
+ },
37
+ "torch_dtype": "float16",
38
+ "transformers_version": "4.31.0",
39
+ "use_cache": true,
40
+ "vocab_size": 50257
41
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.31.0"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f795ec7feda8ad4bacdaf12fbfdbe29c4ee1334323302d2be6f9cf579cc27782
3
+ size 709737437
runs/Aug01_10-14-32_1a6c8c49da0b/events.out.tfevents.1690884873.1a6c8c49da0b.10705.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18cbcdf988d2c9638e0917ec26a1fcfa4b1ffe8f2940aad47068728716ca4e68
3
+ size 7663
runs/Aug01_10-21-11_1a6c8c49da0b/events.out.tfevents.1690885271.1a6c8c49da0b.12468.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6c355746bca85d808ee48995f3897440d22d507ba21ff85266d52a51c502c3d
3
+ size 7663
runs/Aug01_11-58-48_1a6c8c49da0b/events.out.tfevents.1690891128.1a6c8c49da0b.37132.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d8b3ef3fa0bd30dd24e33984a703876bdba797e5d2d69d2674e07b014ec4667
3
+ size 7663
runs/Aug01_12-06-23_1a6c8c49da0b/events.out.tfevents.1690891583.1a6c8c49da0b.39062.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f2aab95a3676ab616cc8e441df992b772a2e9798c751da28592ea10b714a01c
3
+ size 7663
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "clean_up_tokenization_spaces": true,
5
+ "eos_token": "<|endoftext|>",
6
+ "model_max_length": 1024,
7
+ "tokenizer_class": "GPT2Tokenizer",
8
+ "unk_token": "<|endoftext|>"
9
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5ca631338d505448edc0b68c9d38d7d97b0e1fdccbfa5d178d1d75631e1b75a
3
+ size 3963
vocab.json ADDED
The diff for this file is too large to render. See raw diff