system HF staff commited on
Commit
0275c4a
1 Parent(s): 26a846a

Commit From AutoTrain

Browse files
.gitattributes CHANGED
@@ -25,3 +25,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
30
+ *.pkl filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags: autotrain
3
+ language: unk
4
+ widget:
5
+ - text: "I love AutoTrain 🤗"
6
+ datasets:
7
+ - datien228/autotrain-data-summary-text
8
+ co2_eq_emissions: 1850.790132860878
9
+ ---
10
+
11
+ # Model Trained Using AutoTrain
12
+
13
+ - Problem type: Summarization
14
+ - Model ID: 1079039131
15
+ - CO2 Emissions (in grams): 1850.790132860878
16
+
17
+ ## Validation Metrics
18
+
19
+ - Loss: 1.8720897436141968
20
+ - Rouge1: 40.3451
21
+ - Rouge2: 17.4156
22
+ - RougeL: 30.9608
23
+ - RougeLsum: 38.8329
24
+ - Gen Len: 67.0434
25
+
26
+ ## Usage
27
+
28
+ You can use cURL to access this model:
29
+
30
+ ```
31
+ $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/datien228/autotrain-summary-text-1079039131
32
+ ```
config.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AutoTrain",
3
+ "_num_labels": 3,
4
+ "activation_dropout": 0.0,
5
+ "activation_function": "gelu",
6
+ "add_bias_logits": false,
7
+ "add_final_layer_norm": false,
8
+ "architectures": [
9
+ "BartForConditionalGeneration"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 0,
13
+ "classif_dropout": 0.0,
14
+ "classifier_dropout": 0.0,
15
+ "d_model": 1024,
16
+ "decoder_attention_heads": 16,
17
+ "decoder_ffn_dim": 4096,
18
+ "decoder_layerdrop": 0.0,
19
+ "decoder_layers": 6,
20
+ "decoder_start_token_id": 2,
21
+ "dropout": 0.1,
22
+ "early_stopping": true,
23
+ "encoder_attention_heads": 16,
24
+ "encoder_ffn_dim": 4096,
25
+ "encoder_layerdrop": 0.0,
26
+ "encoder_layers": 12,
27
+ "eos_token_id": 2,
28
+ "extra_pos_embeddings": 2,
29
+ "force_bos_token_to_be_generated": true,
30
+ "forced_bos_token_id": 0,
31
+ "forced_eos_token_id": 2,
32
+ "gradient_checkpointing": false,
33
+ "id2label": {
34
+ "0": "LABEL_0",
35
+ "1": "LABEL_1",
36
+ "2": "LABEL_2"
37
+ },
38
+ "init_std": 0.02,
39
+ "is_encoder_decoder": true,
40
+ "label2id": {
41
+ "LABEL_0": 0,
42
+ "LABEL_1": 1,
43
+ "LABEL_2": 2
44
+ },
45
+ "length_penalty": 2.0,
46
+ "max_length": 142,
47
+ "max_position_embeddings": 1024,
48
+ "min_length": 56,
49
+ "model_type": "bart",
50
+ "no_repeat_ngram_size": 3,
51
+ "normalize_before": false,
52
+ "normalize_embedding": true,
53
+ "num_beams": 4,
54
+ "num_hidden_layers": 12,
55
+ "output_past": true,
56
+ "pad_token_id": 1,
57
+ "prefix": " ",
58
+ "replacing_rate": 0,
59
+ "scale_embedding": false,
60
+ "static_position_embeddings": false,
61
+ "student_decoder_layers": null,
62
+ "student_encoder_layers": null,
63
+ "task_specific_params": {
64
+ "summarization": {
65
+ "early_stopping": true,
66
+ "length_penalty": 2.0,
67
+ "max_length": 142,
68
+ "min_length": 56,
69
+ "no_repeat_ngram_size": 3,
70
+ "num_beams": 4
71
+ }
72
+ },
73
+ "torch_dtype": "float32",
74
+ "transformers_version": "4.15.0",
75
+ "use_cache": true,
76
+ "vocab_size": 50264
77
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df0e77e933ded5032d732744af6e63ddeb4b21a473675b65488e5059d0647382
3
+ size 1222374713
sample_input.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cc5ca5ec72773558baee18f0d42ee1b7c166d4b133101515e97d6a5046ae7a2
3
+ size 25139
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "errors": "replace", "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "trim_offsets": true, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "AutoTrain", "tokenizer_class": "BartTokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff