StephanAkkerman commited on
Commit
5e79875
1 Parent(s): 9921991

V7 - yiyanghkust's FinBERT

Browse files
added_tokens.json CHANGED
@@ -1,4 +1,4 @@
1
  {
2
- "@USER": 30522,
3
- "[URL]": 30523
4
  }
 
1
  {
2
+ "@USER": 30873,
3
+ "[URL]": 30874
4
  }
config.json CHANGED
@@ -1,26 +1,15 @@
1
  {
2
- "_name_or_path": "ProsusAI/finbert",
3
  "architectures": [
4
  "BertForMaskedLM"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "classifier_dropout": null,
8
- "gradient_checkpointing": false,
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 768,
12
- "id2label": {
13
- "0": "positive",
14
- "1": "negative",
15
- "2": "neutral"
16
- },
17
  "initializer_range": 0.02,
18
  "intermediate_size": 3072,
19
- "label2id": {
20
- "negative": 1,
21
- "neutral": 2,
22
- "positive": 0
23
- },
24
  "layer_norm_eps": 1e-12,
25
  "max_position_embeddings": 512,
26
  "model_type": "bert",
@@ -32,5 +21,5 @@
32
  "transformers_version": "4.35.0",
33
  "type_vocab_size": 2,
34
  "use_cache": true,
35
- "vocab_size": 30524
36
  }
 
1
  {
2
+ "_name_or_path": "yiyanghkust/finbert-pretrain",
3
  "architectures": [
4
  "BertForMaskedLM"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "classifier_dropout": null,
 
8
  "hidden_act": "gelu",
9
  "hidden_dropout_prob": 0.1,
10
  "hidden_size": 768,
 
 
 
 
 
11
  "initializer_range": 0.02,
12
  "intermediate_size": 3072,
 
 
 
 
 
13
  "layer_norm_eps": 1e-12,
14
  "max_position_embeddings": 512,
15
  "model_type": "bert",
 
21
  "transformers_version": "4.35.0",
22
  "type_vocab_size": 2,
23
  "use_cache": true,
24
+ "vocab_size": 30875
25
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:62c5cc157cb800258fd0389a7ad101a906eb4b281d53684dc15281e0850f9b6f
3
- size 438087048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d71398bd06f692e8a69609dd3201029d372c80fa9ed9754dbba1c5a30ffdd07a
3
+ size 439166724
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -8,7 +8,7 @@
8
  "single_word": false,
9
  "special": true
10
  },
11
- "100": {
12
  "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
@@ -16,7 +16,7 @@
16
  "single_word": false,
17
  "special": true
18
  },
19
- "101": {
20
  "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
@@ -24,7 +24,7 @@
24
  "single_word": false,
25
  "special": true
26
  },
27
- "102": {
28
  "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
@@ -32,7 +32,7 @@
32
  "single_word": false,
33
  "special": true
34
  },
35
- "103": {
36
  "content": "[MASK]",
37
  "lstrip": false,
38
  "normalized": false,
@@ -40,7 +40,7 @@
40
  "single_word": false,
41
  "special": true
42
  },
43
- "30522": {
44
  "content": "@USER",
45
  "lstrip": false,
46
  "normalized": true,
@@ -48,7 +48,7 @@
48
  "single_word": false,
49
  "special": false
50
  },
51
- "30523": {
52
  "content": "[URL]",
53
  "lstrip": false,
54
  "normalized": true,
@@ -62,7 +62,7 @@
62
  "do_basic_tokenize": true,
63
  "do_lower_case": true,
64
  "mask_token": "[MASK]",
65
- "model_max_length": 512,
66
  "never_split": null,
67
  "pad_token": "[PAD]",
68
  "sep_token": "[SEP]",
 
8
  "single_word": false,
9
  "special": true
10
  },
11
+ "2": {
12
  "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
 
16
  "single_word": false,
17
  "special": true
18
  },
19
+ "3": {
20
  "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
 
24
  "single_word": false,
25
  "special": true
26
  },
27
+ "4": {
28
  "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
 
32
  "single_word": false,
33
  "special": true
34
  },
35
+ "5": {
36
  "content": "[MASK]",
37
  "lstrip": false,
38
  "normalized": false,
 
40
  "single_word": false,
41
  "special": true
42
  },
43
+ "30873": {
44
  "content": "@USER",
45
  "lstrip": false,
46
  "normalized": true,
 
48
  "single_word": false,
49
  "special": false
50
  },
51
+ "30874": {
52
  "content": "[URL]",
53
  "lstrip": false,
54
  "normalized": true,
 
62
  "do_basic_tokenize": true,
63
  "do_lower_case": true,
64
  "mask_token": "[MASK]",
65
+ "model_max_length": 1000000000000000019884624838656,
66
  "never_split": null,
67
  "pad_token": "[PAD]",
68
  "sep_token": "[SEP]",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:acd47d8987763220961d8e748590b8c7e9e11346d2d8ecc9af6e134e57673548
3
  size 4536
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e2b08864c8da6bd3d6aebac02d76b3a5cd1b503e9a2eb78b7a96e3a8088cda1
3
  size 4536
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff