Shijia commited on
Commit
cc04b42
1 Parent(s): 6678daa

Training in progress, epoch 0

Browse files
Files changed (4) hide show
  1. config.json +7 -36
  2. model.safetensors +2 -2
  3. tokenizer_config.json +1 -1
  4. training_args.bin +1 -1
config.json CHANGED
@@ -1,23 +1,24 @@
1
  {
2
- "_name_or_path": "google/flan-t5-base",
3
  "architectures": [
4
  "T5ForConditionalGeneration"
5
  ],
6
  "classifier_dropout": 0.0,
7
- "d_ff": 2048,
8
  "d_kv": 64,
9
  "d_model": 768,
10
  "decoder_start_token_id": 0,
11
- "dense_act_fn": "gelu_new",
12
  "dropout_rate": 0.1,
13
  "eos_token_id": 1,
14
- "feed_forward_proj": "gated-gelu",
 
15
  "initializer_factor": 1.0,
16
  "is_encoder_decoder": true,
17
- "is_gated_act": true,
18
  "layer_norm_epsilon": 1e-06,
19
  "model_type": "t5",
20
- "n_positions": 512,
21
  "num_decoder_layers": 12,
22
  "num_heads": 12,
23
  "num_layers": 12,
@@ -25,36 +26,6 @@
25
  "pad_token_id": 0,
26
  "relative_attention_max_distance": 128,
27
  "relative_attention_num_buckets": 32,
28
- "task_specific_params": {
29
- "summarization": {
30
- "early_stopping": true,
31
- "length_penalty": 2.0,
32
- "max_length": 200,
33
- "min_length": 30,
34
- "no_repeat_ngram_size": 3,
35
- "num_beams": 4,
36
- "prefix": "summarize: "
37
- },
38
- "translation_en_to_de": {
39
- "early_stopping": true,
40
- "max_length": 300,
41
- "num_beams": 4,
42
- "prefix": "translate English to German: "
43
- },
44
- "translation_en_to_fr": {
45
- "early_stopping": true,
46
- "max_length": 300,
47
- "num_beams": 4,
48
- "prefix": "translate English to French: "
49
- },
50
- "translation_en_to_ro": {
51
- "early_stopping": true,
52
- "max_length": 300,
53
- "num_beams": 4,
54
- "prefix": "translate English to Romanian: "
55
- }
56
- },
57
- "tie_word_embeddings": false,
58
  "torch_dtype": "float32",
59
  "transformers_version": "4.35.2",
60
  "use_cache": true,
 
1
  {
2
+ "_name_or_path": "luqh/ClinicalT5-base",
3
  "architectures": [
4
  "T5ForConditionalGeneration"
5
  ],
6
  "classifier_dropout": 0.0,
7
+ "d_ff": 3072,
8
  "d_kv": 64,
9
  "d_model": 768,
10
  "decoder_start_token_id": 0,
11
+ "dense_act_fn": "relu",
12
  "dropout_rate": 0.1,
13
  "eos_token_id": 1,
14
+ "feed_forward_proj": "relu",
15
+ "gradient_checkpointing": false,
16
  "initializer_factor": 1.0,
17
  "is_encoder_decoder": true,
18
+ "is_gated_act": false,
19
  "layer_norm_epsilon": 1e-06,
20
  "model_type": "t5",
21
+ "n_positions": 1024,
22
  "num_decoder_layers": 12,
23
  "num_heads": 12,
24
  "num_layers": 12,
 
26
  "pad_token_id": 0,
27
  "relative_attention_max_distance": 128,
28
  "relative_attention_num_buckets": 32,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  "torch_dtype": "float32",
30
  "transformers_version": "4.35.2",
31
  "use_cache": true,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5b981cbf163a3315b81060a4d86f7aa0fd8747024f405495220b9f7f4d94dffa
3
- size 990345064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:297432dd585bf1306d27a0ef7fac9f85c36f508de6795bc1084ec7b18e415e5e
3
+ size 891644712
tokenizer_config.json CHANGED
@@ -931,7 +931,7 @@
931
  "eos_token": "</s>",
932
  "extra_ids": 100,
933
  "legacy": true,
934
- "model_max_length": 512,
935
  "pad_token": "<pad>",
936
  "sp_model_kwargs": {},
937
  "tokenizer_class": "T5Tokenizer",
 
931
  "eos_token": "</s>",
932
  "extra_ids": 100,
933
  "legacy": true,
934
+ "model_max_length": 1000000000000000019884624838656,
935
  "pad_token": "<pad>",
936
  "sp_model_kwargs": {},
937
  "tokenizer_class": "T5Tokenizer",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:47537114f4c6131f3398fcac04bd89311f7795f1e9cd07d2d4645b0b9cf582b5
3
  size 4664
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ea7a47dce5506276650ba7b163f4c7de73a55ffdf3f0bf37f82bac76e02ddb6
3
  size 4664