nam194 commited on
Commit
2288475
1 Parent(s): 179901b

commit files to HF hub

Browse files
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<mask>": 64000
3
+ }
bpe.codes ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": "1f6b064db7c6ad21e8da97aea2f20044aa29ddd3",
3
+ "_name_or_path": "nam194/textsum_epoch_5",
4
+ "architectures": [
5
+ "EncoderDecoderModel"
6
+ ],
7
+ "decoder": {
8
+ "_name_or_path": "vinai/phobert-base",
9
+ "add_cross_attention": true,
10
+ "architectures": [
11
+ "RobertaForMaskedLM"
12
+ ],
13
+ "attention_probs_dropout_prob": 0.1,
14
+ "bad_words_ids": null,
15
+ "begin_suppress_tokens": null,
16
+ "bos_token_id": 0,
17
+ "chunk_size_feed_forward": 0,
18
+ "classifier_dropout": null,
19
+ "cross_attention_hidden_size": null,
20
+ "decoder_start_token_id": null,
21
+ "diversity_penalty": 0.0,
22
+ "do_sample": false,
23
+ "early_stopping": false,
24
+ "encoder_no_repeat_ngram_size": 0,
25
+ "eos_token_id": 2,
26
+ "exponential_decay_length_penalty": null,
27
+ "finetuning_task": null,
28
+ "forced_bos_token_id": null,
29
+ "forced_eos_token_id": null,
30
+ "gradient_checkpointing": false,
31
+ "hidden_act": "gelu",
32
+ "hidden_dropout_prob": 0.1,
33
+ "hidden_size": 768,
34
+ "id2label": {
35
+ "0": "LABEL_0",
36
+ "1": "LABEL_1"
37
+ },
38
+ "initializer_range": 0.02,
39
+ "intermediate_size": 3072,
40
+ "is_decoder": true,
41
+ "is_encoder_decoder": false,
42
+ "label2id": {
43
+ "LABEL_0": 0,
44
+ "LABEL_1": 1
45
+ },
46
+ "layer_norm_eps": 1e-05,
47
+ "length_penalty": 1.0,
48
+ "max_length": 20,
49
+ "max_position_embeddings": 258,
50
+ "min_length": 0,
51
+ "model_type": "roberta",
52
+ "no_repeat_ngram_size": 0,
53
+ "num_attention_heads": 12,
54
+ "num_beam_groups": 1,
55
+ "num_beams": 1,
56
+ "num_hidden_layers": 12,
57
+ "num_return_sequences": 1,
58
+ "output_attentions": false,
59
+ "output_hidden_states": false,
60
+ "output_scores": false,
61
+ "pad_token_id": 1,
62
+ "position_embedding_type": "absolute",
63
+ "prefix": null,
64
+ "problem_type": null,
65
+ "pruned_heads": {},
66
+ "remove_invalid_values": false,
67
+ "repetition_penalty": 1.0,
68
+ "return_dict": true,
69
+ "return_dict_in_generate": false,
70
+ "sep_token_id": null,
71
+ "suppress_tokens": null,
72
+ "task_specific_params": null,
73
+ "temperature": 1.0,
74
+ "tf_legacy_loss": false,
75
+ "tie_encoder_decoder": false,
76
+ "tie_word_embeddings": true,
77
+ "tokenizer_class": "PhobertTokenizer",
78
+ "top_k": 50,
79
+ "top_p": 1.0,
80
+ "torch_dtype": null,
81
+ "torchscript": false,
82
+ "transformers_version": "4.26.1",
83
+ "type_vocab_size": 1,
84
+ "typical_p": 1.0,
85
+ "use_bfloat16": false,
86
+ "use_cache": true,
87
+ "vocab_size": 64001
88
+ },
89
+ "decoder_start_token_id": 0,
90
+ "early_stopping": true,
91
+ "encoder": {
92
+ "_name_or_path": "vinai/phobert-base",
93
+ "add_cross_attention": false,
94
+ "architectures": [
95
+ "RobertaForMaskedLM"
96
+ ],
97
+ "attention_probs_dropout_prob": 0.1,
98
+ "bad_words_ids": null,
99
+ "begin_suppress_tokens": null,
100
+ "bos_token_id": 0,
101
+ "chunk_size_feed_forward": 0,
102
+ "classifier_dropout": null,
103
+ "cross_attention_hidden_size": null,
104
+ "decoder_start_token_id": null,
105
+ "diversity_penalty": 0.0,
106
+ "do_sample": false,
107
+ "early_stopping": false,
108
+ "encoder_no_repeat_ngram_size": 0,
109
+ "eos_token_id": 2,
110
+ "exponential_decay_length_penalty": null,
111
+ "finetuning_task": null,
112
+ "forced_bos_token_id": null,
113
+ "forced_eos_token_id": null,
114
+ "gradient_checkpointing": false,
115
+ "hidden_act": "gelu",
116
+ "hidden_dropout_prob": 0.1,
117
+ "hidden_size": 768,
118
+ "id2label": {
119
+ "0": "LABEL_0",
120
+ "1": "LABEL_1"
121
+ },
122
+ "initializer_range": 0.02,
123
+ "intermediate_size": 3072,
124
+ "is_decoder": false,
125
+ "is_encoder_decoder": false,
126
+ "label2id": {
127
+ "LABEL_0": 0,
128
+ "LABEL_1": 1
129
+ },
130
+ "layer_norm_eps": 1e-05,
131
+ "length_penalty": 1.0,
132
+ "max_length": 20,
133
+ "max_position_embeddings": 258,
134
+ "min_length": 0,
135
+ "model_type": "roberta",
136
+ "no_repeat_ngram_size": 0,
137
+ "num_attention_heads": 12,
138
+ "num_beam_groups": 1,
139
+ "num_beams": 1,
140
+ "num_hidden_layers": 12,
141
+ "num_return_sequences": 1,
142
+ "output_attentions": false,
143
+ "output_hidden_states": false,
144
+ "output_scores": false,
145
+ "pad_token_id": 1,
146
+ "position_embedding_type": "absolute",
147
+ "prefix": null,
148
+ "problem_type": null,
149
+ "pruned_heads": {},
150
+ "remove_invalid_values": false,
151
+ "repetition_penalty": 1.0,
152
+ "return_dict": true,
153
+ "return_dict_in_generate": false,
154
+ "sep_token_id": null,
155
+ "suppress_tokens": null,
156
+ "task_specific_params": null,
157
+ "temperature": 1.0,
158
+ "tf_legacy_loss": false,
159
+ "tie_encoder_decoder": false,
160
+ "tie_word_embeddings": true,
161
+ "tokenizer_class": "PhobertTokenizer",
162
+ "top_k": 50,
163
+ "top_p": 1.0,
164
+ "torch_dtype": null,
165
+ "torchscript": false,
166
+ "transformers_version": "4.26.1",
167
+ "type_vocab_size": 1,
168
+ "typical_p": 1.0,
169
+ "use_bfloat16": false,
170
+ "use_cache": true,
171
+ "vocab_size": 64001
172
+ },
173
+ "eos_token_id": 2,
174
+ "is_encoder_decoder": true,
175
+ "length_penalty": 2.0,
176
+ "max_length": 64,
177
+ "model_type": "encoder-decoder",
178
+ "no_repeat_ngram_size": 3,
179
+ "num_beams": 4,
180
+ "pad_token_id": 1,
181
+ "torch_dtype": "float32",
182
+ "transformers_version": null,
183
+ "vocab_size": 64001
184
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "decoder_start_token_id": 0,
5
+ "early_stopping": true,
6
+ "eos_token_id": 2,
7
+ "length_penalty": 2.0,
8
+ "max_length": 64,
9
+ "no_repeat_ngram_size": 3,
10
+ "num_beams": 4,
11
+ "pad_token_id": 1,
12
+ "transformers_version": "4.26.1"
13
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:271ec1e009f822092ff88a7ded783b46ba152f73bde0c45544d22fce85ea4826
3
+ size 1193903801
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": "<mask>",
6
+ "pad_token": "<pad>",
7
+ "sep_token": "</s>",
8
+ "unk_token": "<unk>"
9
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": "<mask>",
6
+ "model_max_length": 256,
7
+ "name_or_path": "vinai/phobert-base",
8
+ "pad_token": "<pad>",
9
+ "sep_token": "</s>",
10
+ "special_tokens_map_file": null,
11
+ "tokenizer_class": "PhobertTokenizer",
12
+ "unk_token": "<unk>"
13
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff