Tech-oriented commited on
Commit
f451498
1 Parent(s): dafbf41

Training in progress, epoch 4

Browse files
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:01005cbb8e85152956ddb377cf179e3cd24dbe27e032cc71344529e0cc877c33
3
  size 437958648
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:126b48a280b83e7ba34ae365e6bbd6bdace7fc8a57eb48fa151a7a9df101dd9c
3
  size 437958648
run-2/checkpoint-1263/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.38.2",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
run-2/checkpoint-1263/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4021bca007e51bd1785bdb6e2dc68031a8fa334b31501436cdbe7e332f8c293e
3
+ size 437958648
run-2/checkpoint-1263/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6604072960b0399d05c965d9df1666a7e2ac61712adcd508634add3cdfac6bef
3
+ size 876038394
run-2/checkpoint-1263/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0361c78c7f9c133b62f42e625649205dfec0d3fde567de0a8c8b1c5407ee551e
3
+ size 14244
run-2/checkpoint-1263/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a315ce77c14b4dc205b5aa64878cab24cd12b081ee2f940a030f0c5030acfa2
3
+ size 1064
run-2/checkpoint-1263/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
run-2/checkpoint-1263/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
run-2/checkpoint-1263/tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
run-2/checkpoint-1263/trainer_state.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.8887614678899083,
3
+ "best_model_checkpoint": "bert-base-uncased-finetuned-sst2/run-2/checkpoint-1263",
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 1263,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 0.8761467889908257,
14
+ "eval_loss": 0.3037863075733185,
15
+ "eval_runtime": 2.2668,
16
+ "eval_samples_per_second": 384.685,
17
+ "eval_steps_per_second": 24.263,
18
+ "step": 421
19
+ },
20
+ {
21
+ "epoch": 1.19,
22
+ "grad_norm": 24.56251335144043,
23
+ "learning_rate": 5.135971766809031e-05,
24
+ "loss": 0.3191,
25
+ "step": 500
26
+ },
27
+ {
28
+ "epoch": 2.0,
29
+ "eval_accuracy": 0.8830275229357798,
30
+ "eval_loss": 0.4693216383457184,
31
+ "eval_runtime": 2.3381,
32
+ "eval_samples_per_second": 372.946,
33
+ "eval_steps_per_second": 23.523,
34
+ "step": 842
35
+ },
36
+ {
37
+ "epoch": 2.38,
38
+ "grad_norm": 3.9397337436676025,
39
+ "learning_rate": 3.5359805621956256e-05,
40
+ "loss": 0.1393,
41
+ "step": 1000
42
+ },
43
+ {
44
+ "epoch": 3.0,
45
+ "eval_accuracy": 0.8887614678899083,
46
+ "eval_loss": 0.533748984336853,
47
+ "eval_runtime": 2.3644,
48
+ "eval_samples_per_second": 368.807,
49
+ "eval_steps_per_second": 23.262,
50
+ "step": 1263
51
+ }
52
+ ],
53
+ "logging_steps": 500,
54
+ "max_steps": 2105,
55
+ "num_input_tokens_seen": 0,
56
+ "num_train_epochs": 5,
57
+ "save_steps": 500,
58
+ "total_flos": 292344132499080.0,
59
+ "train_batch_size": 16,
60
+ "trial_name": null,
61
+ "trial_params": {
62
+ "learning_rate": 6.735962971422436e-05,
63
+ "num_train_epochs": 5,
64
+ "per_device_train_batch_size": 16,
65
+ "seed": 2
66
+ }
67
+ }
run-2/checkpoint-1263/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55dfbe476697181e66ebf5f3b6624f6c1c676b71c40c6125f531db2ed5ba1a72
3
+ size 4920
run-2/checkpoint-1263/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
run-2/checkpoint-1684/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb613738e3ffdeb001374669941f1252a00de402f496459f82b20ed8886589ca
3
  size 437958648
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:126b48a280b83e7ba34ae365e6bbd6bdace7fc8a57eb48fa151a7a9df101dd9c
3
  size 437958648
run-2/checkpoint-1684/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:44a5c6d5a656ac7d3e2cc2addcc4aedc63c5cf7878138d7d2859c01f27069c59
3
  size 876038394
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c88d83b7c4b80b76b6350f881a9a9a167a3bb3046daa8467effc2df81097e2f3
3
  size 876038394
run-2/checkpoint-1684/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc93202deb3c5569360fb9c9c21c4e09f6147edd63c0d92d9b5861f7112a9d98
3
- size 14308
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00d997cfabcf13a666fb0d33a5b223dfb8244d0359b5298edda4316a39a79838
3
+ size 14244
run-2/checkpoint-1684/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d770bfc35626f498f5d3c5e0dc15b73670a01186e4e002b2705d76d56a23c52
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4dbad146630eaeb88e576a2d85a7f61000b173418b9bbe21da7cb968aa27e78
3
  size 1064
run-2/checkpoint-1684/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 0.8635321100917431,
3
  "best_model_checkpoint": "bert-base-uncased-finetuned-sst2/run-2/checkpoint-1684",
4
- "epoch": 1.0,
5
  "eval_steps": 500,
6
  "global_step": 1684,
7
  "is_hyper_param_search": true,
@@ -9,48 +9,75 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.3,
13
- "grad_norm": 267.4592590332031,
14
- "learning_rate": 6.490431462154475e-05,
15
- "loss": 0.6127,
 
 
 
 
 
 
 
 
 
16
  "step": 500
17
  },
18
  {
19
- "epoch": 0.59,
20
- "grad_norm": 9.022435188293457,
21
- "learning_rate": 3.749539797393294e-05,
22
- "loss": 0.5862,
 
 
 
 
 
 
 
 
 
23
  "step": 1000
24
  },
25
  {
26
- "epoch": 0.89,
27
- "grad_norm": 10.805120468139648,
28
- "learning_rate": 1.0086481326321143e-05,
29
- "loss": 0.4922,
 
 
 
 
 
 
 
 
 
30
  "step": 1500
31
  },
32
  {
33
- "epoch": 1.0,
34
- "eval_accuracy": 0.8635321100917431,
35
- "eval_loss": 0.47688528895378113,
36
- "eval_runtime": 2.4,
37
- "eval_samples_per_second": 363.329,
38
- "eval_steps_per_second": 22.916,
39
  "step": 1684
40
  }
41
  ],
42
  "logging_steps": 500,
43
- "max_steps": 1684,
44
  "num_input_tokens_seen": 0,
45
- "num_train_epochs": 1,
46
  "save_steps": 500,
47
- "total_flos": 73089373401840.0,
48
- "train_batch_size": 4,
49
  "trial_name": null,
50
  "trial_params": {
51
- "learning_rate": 9.231323126915654e-05,
52
- "num_train_epochs": 1,
53
- "per_device_train_batch_size": 4,
54
- "seed": 29
55
  }
56
  }
 
1
  {
2
+ "best_metric": 0.8899082568807339,
3
  "best_model_checkpoint": "bert-base-uncased-finetuned-sst2/run-2/checkpoint-1684",
4
+ "epoch": 4.0,
5
  "eval_steps": 500,
6
  "global_step": 1684,
7
  "is_hyper_param_search": true,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 0.8761467889908257,
14
+ "eval_loss": 0.3037863075733185,
15
+ "eval_runtime": 2.2668,
16
+ "eval_samples_per_second": 384.685,
17
+ "eval_steps_per_second": 24.263,
18
+ "step": 421
19
+ },
20
+ {
21
+ "epoch": 1.19,
22
+ "grad_norm": 24.56251335144043,
23
+ "learning_rate": 5.135971766809031e-05,
24
+ "loss": 0.3191,
25
  "step": 500
26
  },
27
  {
28
+ "epoch": 2.0,
29
+ "eval_accuracy": 0.8830275229357798,
30
+ "eval_loss": 0.4693216383457184,
31
+ "eval_runtime": 2.3381,
32
+ "eval_samples_per_second": 372.946,
33
+ "eval_steps_per_second": 23.523,
34
+ "step": 842
35
+ },
36
+ {
37
+ "epoch": 2.38,
38
+ "grad_norm": 3.9397337436676025,
39
+ "learning_rate": 3.5359805621956256e-05,
40
+ "loss": 0.1393,
41
  "step": 1000
42
  },
43
  {
44
+ "epoch": 3.0,
45
+ "eval_accuracy": 0.8887614678899083,
46
+ "eval_loss": 0.533748984336853,
47
+ "eval_runtime": 2.3644,
48
+ "eval_samples_per_second": 368.807,
49
+ "eval_steps_per_second": 23.262,
50
+ "step": 1263
51
+ },
52
+ {
53
+ "epoch": 3.56,
54
+ "grad_norm": 0.6269965767860413,
55
+ "learning_rate": 1.9359893575822205e-05,
56
+ "loss": 0.0624,
57
  "step": 1500
58
  },
59
  {
60
+ "epoch": 4.0,
61
+ "eval_accuracy": 0.8899082568807339,
62
+ "eval_loss": 0.5773999691009521,
63
+ "eval_runtime": 2.4005,
64
+ "eval_samples_per_second": 363.26,
65
+ "eval_steps_per_second": 22.912,
66
  "step": 1684
67
  }
68
  ],
69
  "logging_steps": 500,
70
+ "max_steps": 2105,
71
  "num_input_tokens_seen": 0,
72
+ "num_train_epochs": 5,
73
  "save_steps": 500,
74
+ "total_flos": 440324059476660.0,
75
+ "train_batch_size": 16,
76
  "trial_name": null,
77
  "trial_params": {
78
+ "learning_rate": 6.735962971422436e-05,
79
+ "num_train_epochs": 5,
80
+ "per_device_train_batch_size": 16,
81
+ "seed": 2
82
  }
83
  }
run-2/checkpoint-1684/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3c310707a9a9b300e8afc3494c59ee9399bd8e941233ac893f5f90c0f97df1e
3
  size 4920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55dfbe476697181e66ebf5f3b6624f6c1c676b71c40c6125f531db2ed5ba1a72
3
  size 4920
run-2/checkpoint-842/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.38.2",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
run-2/checkpoint-842/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3ae7173e18c5f6e56e98d91d905e244b9810eeb9480f292284b261cf5b55092
3
+ size 437958648
run-2/checkpoint-842/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74812e8b9204404065a592c4ec31dbb1c16728f28dc8de71c9c28f4fb50a4c1d
3
+ size 876038394
run-2/checkpoint-842/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e456f622217763a691fbe0c5358aa83cda2e4e292e7913f706817bb67c8d5be3
3
+ size 14244
run-2/checkpoint-842/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2d86120ab9ded684321ec427bf31db863672d0bc6db5e6a78e31476b9dd54dd
3
+ size 1064
run-2/checkpoint-842/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
run-2/checkpoint-842/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
run-2/checkpoint-842/tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
run-2/checkpoint-842/trainer_state.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.8830275229357798,
3
+ "best_model_checkpoint": "bert-base-uncased-finetuned-sst2/run-2/checkpoint-842",
4
+ "epoch": 2.0,
5
+ "eval_steps": 500,
6
+ "global_step": 842,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 0.8761467889908257,
14
+ "eval_loss": 0.3037863075733185,
15
+ "eval_runtime": 2.2668,
16
+ "eval_samples_per_second": 384.685,
17
+ "eval_steps_per_second": 24.263,
18
+ "step": 421
19
+ },
20
+ {
21
+ "epoch": 1.19,
22
+ "grad_norm": 24.56251335144043,
23
+ "learning_rate": 5.135971766809031e-05,
24
+ "loss": 0.3191,
25
+ "step": 500
26
+ },
27
+ {
28
+ "epoch": 2.0,
29
+ "eval_accuracy": 0.8830275229357798,
30
+ "eval_loss": 0.4693216383457184,
31
+ "eval_runtime": 2.3381,
32
+ "eval_samples_per_second": 372.946,
33
+ "eval_steps_per_second": 23.523,
34
+ "step": 842
35
+ }
36
+ ],
37
+ "logging_steps": 500,
38
+ "max_steps": 2105,
39
+ "num_input_tokens_seen": 0,
40
+ "num_train_epochs": 5,
41
+ "save_steps": 500,
42
+ "total_flos": 146698802249040.0,
43
+ "train_batch_size": 16,
44
+ "trial_name": null,
45
+ "trial_params": {
46
+ "learning_rate": 6.735962971422436e-05,
47
+ "num_train_epochs": 5,
48
+ "per_device_train_batch_size": 16,
49
+ "seed": 2
50
+ }
51
+ }
run-2/checkpoint-842/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55dfbe476697181e66ebf5f3b6624f6c1c676b71c40c6125f531db2ed5ba1a72
3
+ size 4920
run-2/checkpoint-842/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
runs/Mar03_17-42-01_90708aaa767a/events.out.tfevents.1709497917.90708aaa767a.21824.20 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:781cc6082e2c1d28081b76d85760483932c3583508940c692fa56ee99d2f4740
3
- size 5691
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:260094bdc65ae9b917cf9364699fe4de0ca00cbcb590ea2df1a0a249c5d4033a
3
+ size 7436