qgyd2021 commited on
Commit
d56a253
1 Parent(s): b62ffac

Model save

Browse files
final/config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "qgyd2021/few_shot_ner",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "gradient_checkpointing": false,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_ctx": 1024,
16
+ "n_embd": 768,
17
+ "n_head": 12,
18
+ "n_inner": null,
19
+ "n_layer": 12,
20
+ "n_positions": 1024,
21
+ "output_past": true,
22
+ "reorder_and_upcast_attn": false,
23
+ "resid_pdrop": 0.1,
24
+ "scale_attn_by_inverse_layer_idx": false,
25
+ "scale_attn_weights": true,
26
+ "summary_activation": null,
27
+ "summary_first_dropout": 0.1,
28
+ "summary_proj_to_labels": true,
29
+ "summary_type": "cls_index",
30
+ "summary_use_proj": true,
31
+ "task_specific_params": {
32
+ "text-generation": {
33
+ "do_sample": true,
34
+ "max_length": 320
35
+ }
36
+ },
37
+ "tokenizer_class": "BertTokenizer",
38
+ "torch_dtype": "float32",
39
+ "transformers_version": "4.30.2",
40
+ "use_cache": true,
41
+ "vocab_size": 21128
42
+ }
final/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.30.2"
6
+ }
final/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27de4408bf63713dceaa728a75da00d7b2c31f792610f1c48bb0ce8c0c906bc8
3
+ size 408322909
final/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
final/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
final/tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": false,
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 1024,
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
15
+ }
final/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:925a93fd5366ff5e97fd1a3bede27b1d3f3bba63f6bac0930403cc1216f40fe1
3
+ size 3899
final/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd77e31ef88c7f32264c141e8b03db77a4c3bef31abcc663b065625e4dd66dfb
3
  size 408322909
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27de4408bf63713dceaa728a75da00d7b2c31f792610f1c48bb0ce8c0c906bc8
3
  size 408322909
runs/Jan12_00-18-12_nlp/events.out.tfevents.1704990084.nlp.21492.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4d54286328a7c383b22f6a4a8c20fc49c395d4d32d1632b20d9c34d65b1cd17
3
- size 17766
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6b3c08c6977dea4fe1a94e025418d3a68178f215bd54a712c47ebeae60d368e
3
+ size 18126