philschmid HF staff commited on
Commit
4366c37
1 Parent(s): 24a0d30

commit files to HF hub

Browse files
README.md ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ language: en
4
+ tags:
5
+ - sagemaker
6
+ - bart
7
+ - summarization
8
+ license: apache-2.0
9
+ datasets:
10
+ - samsum
11
+ widget:
12
+ - text: |
13
+ Jeff: Can I train a 🤗 Transformers model on Amazon SageMaker?
14
+ Philipp: Sure you can use the new Hugging Face Deep Learning Container.
15
+ Jeff: ok.
16
+ Jeff: and how can I get started?
17
+ Jeff: where can I find documentation?
18
+ Philipp: ok, ok you can find everything here. https://huggingface.co/blog/the-partnership-amazon-sagemaker-and-hugging-face
19
+ ---
20
+
21
+ ## `distilbart-cnn-12-6-samsum`
22
+
23
+ This model was trained using Amazon SageMaker and the new Hugging Face Deep Learning container.
24
+
25
+ For more information look at:
26
+ - [🤗 Transformers Documentation: Amazon SageMaker](https://huggingface.co/transformers/sagemaker.html)
27
+ - [Example Notebooks](https://github.com/huggingface/notebooks/tree/master/sagemaker)
28
+ - [Amazon SageMaker documentation for Hugging Face](https://docs.aws.amazon.com/sagemaker/latest/dg/hugging-face.html)
29
+ - [Python SDK SageMaker documentation for Hugging Face](https://sagemaker.readthedocs.io/en/stable/frameworks/huggingface/index.html)
30
+ - [Deep Learning Container](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers)
31
+
32
+ ## Result
33
+
34
+ ### Hyperparameters
35
+ ```json
36
+ {
37
+ "dataset_name": "samsum",
38
+ "do_eval": true,
39
+ "do_train": true,
40
+ "fp16": true,
41
+ "learning_rate": 5e-05,
42
+ "model_name_or_path": "sshleifer/distilbart-cnn-12-6",
43
+ "num_train_epochs": 3,
44
+ "output_dir": "/opt/ml/model",
45
+ "per_device_eval_batch_size": 8,
46
+ "per_device_train_batch_size": 8,
47
+ "seed": 7
48
+ }
49
+ ```
50
+
51
+ ### Training
52
+
53
+ | key | value |
54
+ | --- | ----- |
55
+ | epoch | 3.0 |
56
+ | init_mem_cpu_alloc_delta | 180338 |
57
+ | init_mem_cpu_peaked_delta | 18282 |
58
+ | init_mem_gpu_alloc_delta | 1222242816 |
59
+ | init_mem_gpu_peaked_delta | 0 |
60
+ | train_mem_cpu_alloc_delta | 6971403 |
61
+ | train_mem_cpu_peaked_delta | 640733 |
62
+ | train_mem_gpu_alloc_delta | 4910897664 |
63
+ | train_mem_gpu_peaked_delta | 23331969536 |
64
+ | train_runtime | 155.2034 |
65
+ | train_samples | 14732 |
66
+ | train_samples_per_second | 2.242 |
67
+
68
+ ### Evaluation
69
+
70
+ | key | value |
71
+ | --- | ----- |
72
+ | epoch | 3.0 |
73
+ | eval_loss | 1.4209576845169067 |
74
+ | eval_mem_cpu_alloc_delta | 868003 |
75
+ | eval_mem_cpu_peaked_delta | 18250 |
76
+ | eval_mem_gpu_alloc_delta | 0 |
77
+ | eval_mem_gpu_peaked_delta | 328244736 |
78
+ | eval_runtime | 0.6088 |
79
+ | eval_samples | 818 |
80
+ | eval_samples_per_second | 1343.647 |
81
+
82
+
83
+ ## Usage
84
+ ```python
85
+ from transformers import pipeline
86
+ summarizer = pipeline("summarization", model="philschmid/distilbart-cnn-12-6-samsum")
87
+
88
+ conversation = '''Jeff: Can I train a 🤗 Transformers model on Amazon SageMaker?
89
+ Philipp: Sure you can use the new Hugging Face Deep Learning Container.
90
+ Jeff: ok.
91
+ Jeff: and how can I get started?
92
+ Jeff: where can I find documentation?
93
+ Philipp: ok, ok you can find everything here. https://huggingface.co/blog/the-partnership-amazon-sagemaker-and-hugging-face
94
+ '''
95
+ nlp(conversation)
96
+ ```
all_results.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_loss": 1.4209576845169067,
4
+ "eval_mem_cpu_alloc_delta": 868003,
5
+ "eval_mem_cpu_peaked_delta": 18250,
6
+ "eval_mem_gpu_alloc_delta": 0,
7
+ "eval_mem_gpu_peaked_delta": 328244736,
8
+ "eval_runtime": 0.6088,
9
+ "eval_samples": 818,
10
+ "eval_samples_per_second": 1343.647,
11
+ "init_mem_cpu_alloc_delta": 180338,
12
+ "init_mem_cpu_peaked_delta": 18282,
13
+ "init_mem_gpu_alloc_delta": 1222242816,
14
+ "init_mem_gpu_peaked_delta": 0,
15
+ "train_mem_cpu_alloc_delta": 6971403,
16
+ "train_mem_cpu_peaked_delta": 640733,
17
+ "train_mem_gpu_alloc_delta": 4910897664,
18
+ "train_mem_gpu_peaked_delta": 23331969536,
19
+ "train_runtime": 155.2034,
20
+ "train_samples": 14732,
21
+ "train_samples_per_second": 2.242
22
+ }
config.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "sshleifer/distilbart-cnn-12-6",
3
+ "_num_labels": 3,
4
+ "activation_dropout": 0.0,
5
+ "activation_function": "gelu",
6
+ "add_bias_logits": false,
7
+ "add_final_layer_norm": false,
8
+ "architectures": [
9
+ "BartForConditionalGeneration"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 0,
13
+ "classif_dropout": 0.0,
14
+ "classifier_dropout": 0.0,
15
+ "d_model": 1024,
16
+ "decoder_attention_heads": 16,
17
+ "decoder_ffn_dim": 4096,
18
+ "decoder_layerdrop": 0.0,
19
+ "decoder_layers": 6,
20
+ "decoder_start_token_id": 2,
21
+ "dropout": 0.1,
22
+ "early_stopping": true,
23
+ "encoder_attention_heads": 16,
24
+ "encoder_ffn_dim": 4096,
25
+ "encoder_layerdrop": 0.0,
26
+ "encoder_layers": 12,
27
+ "eos_token_id": 2,
28
+ "extra_pos_embeddings": 2,
29
+ "force_bos_token_to_be_generated": true,
30
+ "forced_bos_token_id": 0,
31
+ "forced_eos_token_id": 2,
32
+ "gradient_checkpointing": false,
33
+ "id2label": {
34
+ "0": "LABEL_0",
35
+ "1": "LABEL_1",
36
+ "2": "LABEL_2"
37
+ },
38
+ "init_std": 0.02,
39
+ "is_encoder_decoder": true,
40
+ "label2id": {
41
+ "LABEL_0": 0,
42
+ "LABEL_1": 1,
43
+ "LABEL_2": 2
44
+ },
45
+ "length_penalty": 2.0,
46
+ "max_length": 142,
47
+ "max_position_embeddings": 1024,
48
+ "min_length": 56,
49
+ "model_type": "bart",
50
+ "no_repeat_ngram_size": 3,
51
+ "normalize_before": false,
52
+ "normalize_embedding": true,
53
+ "num_beams": 4,
54
+ "num_hidden_layers": 12,
55
+ "output_past": true,
56
+ "pad_token_id": 1,
57
+ "prefix": " ",
58
+ "replacing_rate": 0,
59
+ "scale_embedding": false,
60
+ "static_position_embeddings": false,
61
+ "student_decoder_layers": null,
62
+ "student_encoder_layers": null,
63
+ "task_specific_params": {
64
+ "summarization": {
65
+ "early_stopping": true,
66
+ "length_penalty": 2.0,
67
+ "max_length": 142,
68
+ "min_length": 56,
69
+ "no_repeat_ngram_size": 3,
70
+ "num_beams": 4
71
+ }
72
+ },
73
+ "transformers_version": "4.4.2",
74
+ "use_cache": true,
75
+ "vocab_size": 50264
76
+ }
eval_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_loss": 1.4209576845169067,
4
+ "eval_mem_cpu_alloc_delta": 868003,
5
+ "eval_mem_cpu_peaked_delta": 18250,
6
+ "eval_mem_gpu_alloc_delta": 0,
7
+ "eval_mem_gpu_peaked_delta": 328244736,
8
+ "eval_runtime": 0.6088,
9
+ "eval_samples": 818,
10
+ "eval_samples_per_second": 1343.647
11
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67abec0e716c8fc11e87bb3f759ce59f2b77b7cff98d73a71a9127bf6d2825f1
3
+ size 1222383347
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "errors": "replace", "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "sshleifer/distilbart-cnn-12-6"}
train_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "init_mem_cpu_alloc_delta": 180338,
4
+ "init_mem_cpu_peaked_delta": 18282,
5
+ "init_mem_gpu_alloc_delta": 1222242816,
6
+ "init_mem_gpu_peaked_delta": 0,
7
+ "train_mem_cpu_alloc_delta": 6971403,
8
+ "train_mem_cpu_peaked_delta": 640733,
9
+ "train_mem_gpu_alloc_delta": 4910897664,
10
+ "train_mem_gpu_peaked_delta": 23331969536,
11
+ "train_runtime": 155.2034,
12
+ "train_samples": 14732,
13
+ "train_samples_per_second": 2.242
14
+ }
trainer_state.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "global_step": 348,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 3.0,
12
+ "step": 348,
13
+ "total_flos": 3.256941561040077e+16,
14
+ "train_runtime": 155.2034,
15
+ "train_samples_per_second": 2.242
16
+ }
17
+ ],
18
+ "max_steps": 348,
19
+ "num_train_epochs": 3,
20
+ "total_flos": 3.256941561040077e+16,
21
+ "trial_name": null,
22
+ "trial_params": null
23
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:445bcdc5aceb3f3e7eb6bf5074c748abcba6917e3b5045d97705cd824089e224
3
+ size 2351
vocab.json ADDED
The diff for this file is too large to render. See raw diff