Plim commited on
Commit
1fb6b5d
β€’
1 Parent(s): 565d898

End of training

Browse files
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.38,
3
+ "eval_loss": 16.912879943847656,
4
+ "eval_runtime": 8.6337,
5
+ "eval_samples": 200,
6
+ "eval_samples_per_second": 23.165,
7
+ "eval_steps_per_second": 2.896,
8
+ "eval_wer": 2.3789039481437833,
9
+ "train_loss": 13.584136962890625,
10
+ "train_runtime": 23.2007,
11
+ "train_samples": 1000,
12
+ "train_samples_per_second": 17.241,
13
+ "train_steps_per_second": 0.259
14
+ }
config.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
+ "activation_dropout": 0.1,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "Wav2Vec2ForCTC"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 768,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": true,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "mean",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": true,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_dropout": 0.0,
51
+ "feat_extract_norm": "layer",
52
+ "feat_proj_dropout": 0.0,
53
+ "feat_quantizer_dropout": 0.0,
54
+ "final_dropout": 0.0,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.0,
57
+ "hidden_size": 1024,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 4096,
60
+ "layer_norm_eps": 1e-05,
61
+ "layerdrop": 0.0,
62
+ "mask_feature_length": 64,
63
+ "mask_feature_min_masks": 0,
64
+ "mask_feature_prob": 0.25,
65
+ "mask_time_length": 10,
66
+ "mask_time_min_masks": 2,
67
+ "mask_time_prob": 0.75,
68
+ "model_type": "wav2vec2",
69
+ "num_adapter_layers": 3,
70
+ "num_attention_heads": 16,
71
+ "num_codevector_groups": 2,
72
+ "num_codevectors_per_group": 320,
73
+ "num_conv_pos_embedding_groups": 16,
74
+ "num_conv_pos_embeddings": 128,
75
+ "num_feat_extract_layers": 7,
76
+ "num_hidden_layers": 24,
77
+ "num_negatives": 100,
78
+ "output_hidden_size": 1024,
79
+ "pad_token_id": 40,
80
+ "proj_codevector_dim": 768,
81
+ "tdnn_dilation": [
82
+ 1,
83
+ 2,
84
+ 3,
85
+ 1,
86
+ 1
87
+ ],
88
+ "tdnn_dim": [
89
+ 512,
90
+ 512,
91
+ 512,
92
+ 512,
93
+ 1500
94
+ ],
95
+ "tdnn_kernel": [
96
+ 5,
97
+ 3,
98
+ 3,
99
+ 1,
100
+ 1
101
+ ],
102
+ "torch_dtype": "float32",
103
+ "transformers_version": "4.17.0.dev0",
104
+ "use_weighted_layer_sum": false,
105
+ "vocab_size": 41,
106
+ "xvector_output_dim": 512
107
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.38,
3
+ "eval_loss": 16.912879943847656,
4
+ "eval_runtime": 8.6337,
5
+ "eval_samples": 200,
6
+ "eval_samples_per_second": 23.165,
7
+ "eval_steps_per_second": 2.896,
8
+ "eval_wer": 2.3789039481437833
9
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1d7bd7ffd2ed6a01faf9f143c79eb1c4a1e163dd1a62f92c26af28850511bcd
3
+ size 1262091761
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.38,
3
+ "train_loss": 13.584136962890625,
4
+ "train_runtime": 23.2007,
5
+ "train_samples": 1000,
6
+ "train_samples_per_second": 17.241,
7
+ "train_steps_per_second": 0.259
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.384,
5
+ "global_step": 6,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.38,
12
+ "step": 6,
13
+ "total_flos": 5.41371015650304e+16,
14
+ "train_loss": 13.584136962890625,
15
+ "train_runtime": 23.2007,
16
+ "train_samples_per_second": 17.241,
17
+ "train_steps_per_second": 0.259
18
+ }
19
+ ],
20
+ "max_steps": 6,
21
+ "num_train_epochs": 1,
22
+ "total_flos": 5.41371015650304e+16,
23
+ "trial_name": null,
24
+ "trial_params": null
25
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4f6b5e530f353910710ef95150e349ddf6f70545b8095619803d7da46090983
3
+ size 2991
wandb/run-20220130_230018-ktkg6ghu/files/output.log CHANGED
@@ -23,3 +23,36 @@ Upload file training_args.bin: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ
23
  Upload file pytorch_model.bin: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1.18G/1.18G [00:50<00:00, 25.0MB/s]
24
  Upload file training_args.bin: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 2.92k/2.92k [00:50<?, ?B/s]
25
  Dropping the following result as it does not have all the necessary fields:β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 2.92k/2.92k [00:50<?, ?B/s]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  Upload file pytorch_model.bin: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 1.18G/1.18G [00:50<00:00, 25.0MB/s]
24
  Upload file training_args.bin: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 2.92k/2.92k [00:50<?, ?B/s]
25
  Dropping the following result as it does not have all the necessary fields:β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 2.92k/2.92k [00:50<?, ?B/s]
26
+ {}
27
+ 01/30/2022 23:03:03 - WARNING - huggingface_hub.repository - To https://huggingface.co/Plim/xls-r-300m-fr
28
+ ab9abf3..565d898 main -> main
29
+ To https://huggingface.co/Plim/xls-r-300m-fr
30
+ ab9abf3..565d898 main -> main
31
+ ***** train metrics *****
32
+ epoch = 0.38
33
+ train_loss = 13.5841
34
+ train_runtime = 0:00:23.20
35
+ train_samples = 1000
36
+ train_samples_per_second = 17.241
37
+ train_steps_per_second = 0.259
38
+ 01/30/2022 23:03:06 - INFO - __main__ - *** Evaluate ***
39
+ The following columns in the evaluation set don't have a corresponding argument in `Wav2Vec2ForCTC.forward` and have been ignored: input_length.
40
+ ***** Running Evaluation *****
41
+ Num examples = 200
42
+ Batch size = 8
43
+
44
+
45
+
46
+
47
+ 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 25/25 [00:08<00:00, 2.79it/s]
48
+ ***** eval metrics *****
49
+ epoch = 0.38
50
+ eval_loss = 16.9129
51
+ eval_runtime = 0:00:08.63
52
+ eval_samples = 200
53
+ eval_samples_per_second = 23.165
54
+ eval_steps_per_second = 2.896
55
+ 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 25/25 [00:08<00:00, 3.03it/s]
56
+ Saving model checkpoint to ./
57
+ Configuration saved in ./config.json
58
+ Model weights saved in ./pytorch_model.bin
wandb/run-20220130_230018-ktkg6ghu/files/wandb-summary.json CHANGED
@@ -1 +1 @@
1
- {"train/train_runtime": 23.2007, "train/train_samples_per_second": 17.241, "train/train_steps_per_second": 0.259, "train/total_flos": 5.41371015650304e+16, "train/train_loss": 13.584136962890625, "train/epoch": 0.38, "train/global_step": 6, "_runtime": 22, "_timestamp": 1643583641, "_step": 0}
 
1
+ {"train/train_runtime": 23.2007, "train/train_samples_per_second": 17.241, "train/train_steps_per_second": 0.259, "train/total_flos": 5.41371015650304e+16, "train/train_loss": 13.584136962890625, "train/epoch": 0.38, "train/global_step": 6, "_runtime": 176, "_timestamp": 1643583795, "_step": 1, "eval/loss": 16.912879943847656, "eval/wer": 2.3789039481437833, "eval/runtime": 8.6337, "eval/samples_per_second": 23.165, "eval/steps_per_second": 2.896}
wandb/run-20220130_230018-ktkg6ghu/logs/debug-internal.log CHANGED
@@ -81,3 +81,21 @@
81
  2022-01-30 23:02:52,640 DEBUG SenderThread:28776 [sender.py:send_request():248] send_request: stop_status
82
  2022-01-30 23:02:57,275 INFO Thread-8 :28776 [dir_watcher.py:_on_file_modified():230] file/dir modified: /workspace/xls-r-300m-fr/wandb/run-20220130_230018-ktkg6ghu/files/output.log
83
  2022-01-30 23:02:59,276 INFO Thread-8 :28776 [dir_watcher.py:_on_file_modified():230] file/dir modified: /workspace/xls-r-300m-fr/wandb/run-20220130_230018-ktkg6ghu/files/output.log
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  2022-01-30 23:02:52,640 DEBUG SenderThread:28776 [sender.py:send_request():248] send_request: stop_status
82
  2022-01-30 23:02:57,275 INFO Thread-8 :28776 [dir_watcher.py:_on_file_modified():230] file/dir modified: /workspace/xls-r-300m-fr/wandb/run-20220130_230018-ktkg6ghu/files/output.log
83
  2022-01-30 23:02:59,276 INFO Thread-8 :28776 [dir_watcher.py:_on_file_modified():230] file/dir modified: /workspace/xls-r-300m-fr/wandb/run-20220130_230018-ktkg6ghu/files/output.log
84
+ 2022-01-30 23:03:05,280 INFO Thread-8 :28776 [dir_watcher.py:_on_file_modified():230] file/dir modified: /workspace/xls-r-300m-fr/wandb/run-20220130_230018-ktkg6ghu/files/output.log
85
+ 2022-01-30 23:03:07,282 INFO Thread-8 :28776 [dir_watcher.py:_on_file_modified():230] file/dir modified: /workspace/xls-r-300m-fr/wandb/run-20220130_230018-ktkg6ghu/files/output.log
86
+ 2022-01-30 23:03:07,846 DEBUG HandlerThread:28776 [handler.py:handle_request():130] handle_request: stop_status
87
+ 2022-01-30 23:03:07,847 DEBUG SenderThread:28776 [sender.py:send_request():248] send_request: stop_status
88
+ 2022-01-30 23:03:10,283 INFO Thread-8 :28776 [dir_watcher.py:_on_file_modified():230] file/dir modified: /workspace/xls-r-300m-fr/wandb/run-20220130_230018-ktkg6ghu/files/output.log
89
+ 2022-01-30 23:03:12,285 INFO Thread-8 :28776 [dir_watcher.py:_on_file_modified():230] file/dir modified: /workspace/xls-r-300m-fr/wandb/run-20220130_230018-ktkg6ghu/files/output.log
90
+ 2022-01-30 23:03:14,286 INFO Thread-8 :28776 [dir_watcher.py:_on_file_modified():230] file/dir modified: /workspace/xls-r-300m-fr/wandb/run-20220130_230018-ktkg6ghu/files/output.log
91
+ 2022-01-30 23:03:15,041 DEBUG SenderThread:28776 [sender.py:send():234] send: metric
92
+ 2022-01-30 23:03:15,042 DEBUG SenderThread:28776 [sender.py:send():234] send: metric
93
+ 2022-01-30 23:03:15,042 DEBUG SenderThread:28776 [sender.py:send():234] send: metric
94
+ 2022-01-30 23:03:15,042 DEBUG SenderThread:28776 [sender.py:send():234] send: metric
95
+ 2022-01-30 23:03:15,042 DEBUG SenderThread:28776 [sender.py:send():234] send: metric
96
+ 2022-01-30 23:03:15,043 DEBUG SenderThread:28776 [sender.py:send():234] send: history
97
+ 2022-01-30 23:03:15,043 DEBUG SenderThread:28776 [sender.py:send():234] send: summary
98
+ 2022-01-30 23:03:15,044 INFO SenderThread:28776 [sender.py:_save_file():939] saving file wandb-summary.json with policy end
99
+ 2022-01-30 23:03:15,287 INFO Thread-8 :28776 [dir_watcher.py:_on_file_modified():230] file/dir modified: /workspace/xls-r-300m-fr/wandb/run-20220130_230018-ktkg6ghu/files/wandb-summary.json
100
+ 2022-01-30 23:03:16,288 INFO Thread-8 :28776 [dir_watcher.py:_on_file_modified():230] file/dir modified: /workspace/xls-r-300m-fr/wandb/run-20220130_230018-ktkg6ghu/files/output.log
101
+ 2022-01-30 23:03:17,289 INFO Thread-8 :28776 [dir_watcher.py:_on_file_modified():230] file/dir modified: /workspace/xls-r-300m-fr/wandb/run-20220130_230018-ktkg6ghu/files/output.log
wandb/run-20220130_230018-ktkg6ghu/run-ktkg6ghu.wandb CHANGED
Binary files a/wandb/run-20220130_230018-ktkg6ghu/run-ktkg6ghu.wandb and b/wandb/run-20220130_230018-ktkg6ghu/run-ktkg6ghu.wandb differ