gguichard commited on
Commit
40a91e3
1 Parent(s): 88fca7a

Training in progress, epoch 1, checkpoint

Browse files
checkpoint-4/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 4.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: bfloat16
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.5.0
checkpoint-4/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "flaubert/flaubert_large_cased",
4
+ "bias": "all",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": [
13
+ "classifier.bias",
14
+ "classifier.weight"
15
+ ],
16
+ "peft_type": "LORA",
17
+ "r": 16,
18
+ "revision": null,
19
+ "target_modules": [
20
+ "q_lin",
21
+ "k_lin",
22
+ "v_lin",
23
+ "out_lin"
24
+ ],
25
+ "task_type": "TOKEN_CLS"
26
+ }
checkpoint-4/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69d01cdfa8b30ff745d5e612123a815e0dbf4ddb3e1700172d7314d8d02e5d09
3
+ size 30440349
checkpoint-4/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-4/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7815b4c9010c60064ea3325b4eee9b32c34b150449f33c6c62e8951c223fbe3d
3
+ size 60898982
checkpoint-4/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5336281f41cb1f2a85ef1320992a24517ea5d9b76a7aacbdce850019a0f51d6
3
+ size 14575
checkpoint-4/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae3094f0027b432eee076116a5efe082c1c77789c9c2288da023fc9e05266808
3
+ size 627
checkpoint-4/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<special0>",
4
+ "<special1>",
5
+ "<special2>",
6
+ "<special3>",
7
+ "<special4>",
8
+ "<special5>",
9
+ "<special6>",
10
+ "<special7>",
11
+ "<special8>",
12
+ "<special9>"
13
+ ],
14
+ "bos_token": "<s>",
15
+ "cls_token": "</s>",
16
+ "mask_token": "<special1>",
17
+ "pad_token": "<pad>",
18
+ "sep_token": "</s>",
19
+ "unk_token": "<unk>"
20
+ }
checkpoint-4/tokenizer_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<special0>",
4
+ "<special1>",
5
+ "<special2>",
6
+ "<special3>",
7
+ "<special4>",
8
+ "<special5>",
9
+ "<special6>",
10
+ "<special7>",
11
+ "<special8>",
12
+ "<special9>"
13
+ ],
14
+ "bos_token": "<s>",
15
+ "clean_up_tokenization_spaces": true,
16
+ "cls_token": "</s>",
17
+ "do_lower_case": false,
18
+ "id2lang": null,
19
+ "lang2id": null,
20
+ "mask_token": "<special1>",
21
+ "model_max_length": 512,
22
+ "pad_token": "<pad>",
23
+ "sep_token": "</s>",
24
+ "tokenizer_class": "FlaubertTokenizer",
25
+ "unk_token": "<unk>"
26
+ }
checkpoint-4/trainer_state.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 4,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_loss": NaN,
14
+ "eval_runtime": 11.7568,
15
+ "eval_samples_per_second": 430.984,
16
+ "eval_steps_per_second": 13.524,
17
+ "step": 4
18
+ }
19
+ ],
20
+ "logging_steps": 2000,
21
+ "max_steps": 400,
22
+ "num_train_epochs": 100,
23
+ "save_steps": 500,
24
+ "total_flos": 7161154002048.0,
25
+ "trial_name": null,
26
+ "trial_params": null
27
+ }
checkpoint-4/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:238151b110e2e4dfc6ebf1dd217dc2b2ee63a49132a41b9b3baa63652e9edd25
3
+ size 4219
checkpoint-4/vocab.json ADDED
The diff for this file is too large to render. See raw diff