binbin83 commited on
Commit
8c32445
1 Parent(s): 7d7fa93

Update spaCy pipeline

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ fr_present_tense_value-any-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
37
+ transformer/model filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - spacy
4
+ - token-classification
5
+ language:
6
+ - fr
7
+ model-index:
8
+ - name: fr_present_tense_value
9
+ results:
10
+ - task:
11
+ name: NER
12
+ type: token-classification
13
+ metrics:
14
+ - name: NER Precision
15
+ type: precision
16
+ value: 0.7757731959
17
+ - name: NER Recall
18
+ type: recall
19
+ value: 0.7969991174
20
+ - name: NER F Score
21
+ type: f_score
22
+ value: 0.7862429256
23
+ ---
24
+ | Feature | Description |
25
+ | --- | --- |
26
+ | **Name** | `fr_present_tense_value` |
27
+ | **Version** | `0.0.1` |
28
+ | **spaCy** | `>=3.4.4,<3.5.0` |
29
+ | **Default Pipeline** | `transformer`, `ner` |
30
+ | **Components** | `transformer`, `ner` |
31
+ | **Vectors** | 0 keys, 0 unique vectors (0 dimensions) |
32
+ | **Sources** | n/a |
33
+ | **License** | n/a |
34
+ | **Author** | [n/a]() |
35
+
36
+ ### Label Scheme
37
+
38
+ <details>
39
+
40
+ <summary>View label scheme (3 labels for 1 components)</summary>
41
+
42
+ | Component | Labels |
43
+ | --- | --- |
44
+ | **`ner`** | `PRESENT_ENNONCIATION`, `PRESENT_GENERIQUE`, `PRESENT_HISTORIQUE` |
45
+
46
+ </details>
47
+
48
+ ### Accuracy
49
+
50
+ | Type | Score |
51
+ | --- | --- |
52
+ | `ENTS_F` | 78.62 |
53
+ | `ENTS_P` | 77.58 |
54
+ | `ENTS_R` | 79.70 |
55
+ | `TRANSFORMER_LOSS` | 82001.90 |
56
+ | `NER_LOSS` | 52384.87 |
config.cfg ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [paths]
2
+ train = "./data/data_present/training_data.spacy"
3
+ dev = "./data/data_present/testing_data.spacy"
4
+ vectors = null
5
+ init_tok2vec = null
6
+
7
+ [system]
8
+ gpu_allocator = "pytorch"
9
+ seed = 0
10
+
11
+ [nlp]
12
+ lang = "fr"
13
+ pipeline = ["transformer","ner"]
14
+ batch_size = 128
15
+ disabled = []
16
+ before_creation = null
17
+ after_creation = null
18
+ after_pipeline_creation = null
19
+ tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"}
20
+
21
+ [components]
22
+
23
+ [components.ner]
24
+ factory = "ner"
25
+ incorrect_spans_key = null
26
+ moves = null
27
+ scorer = {"@scorers":"spacy.ner_scorer.v1"}
28
+ update_with_oracle_cut_size = 100
29
+
30
+ [components.ner.model]
31
+ @architectures = "spacy.TransitionBasedParser.v2"
32
+ state_type = "ner"
33
+ extra_state_tokens = false
34
+ hidden_width = 64
35
+ maxout_pieces = 2
36
+ use_upper = false
37
+ nO = null
38
+
39
+ [components.ner.model.tok2vec]
40
+ @architectures = "spacy-transformers.TransformerListener.v1"
41
+ grad_factor = 1.0
42
+ pooling = {"@layers":"reduce_mean.v1"}
43
+ upstream = "*"
44
+
45
+ [components.transformer]
46
+ factory = "transformer"
47
+ max_batch_items = 4096
48
+ set_extra_annotations = {"@annotation_setters":"spacy-transformers.null_annotation_setter.v1"}
49
+
50
+ [components.transformer.model]
51
+ @architectures = "spacy-transformers.TransformerModel.v3"
52
+ name = "camembert-base"
53
+ mixed_precision = false
54
+
55
+ [components.transformer.model.get_spans]
56
+ @span_getters = "spacy-transformers.strided_spans.v1"
57
+ window = 128
58
+ stride = 96
59
+
60
+ [components.transformer.model.grad_scaler_config]
61
+
62
+ [components.transformer.model.tokenizer_config]
63
+ use_fast = true
64
+
65
+ [components.transformer.model.transformer_config]
66
+
67
+ [corpora]
68
+
69
+ [corpora.dev]
70
+ @readers = "spacy.Corpus.v1"
71
+ path = ${paths.dev}
72
+ max_length = 0
73
+ gold_preproc = false
74
+ limit = 0
75
+ augmenter = null
76
+
77
+ [corpora.train]
78
+ @readers = "spacy.Corpus.v1"
79
+ path = ${paths.train}
80
+ max_length = 0
81
+ gold_preproc = false
82
+ limit = 0
83
+ augmenter = null
84
+
85
+ [training]
86
+ accumulate_gradient = 3
87
+ dev_corpus = "corpora.dev"
88
+ train_corpus = "corpora.train"
89
+ seed = ${system.seed}
90
+ gpu_allocator = ${system.gpu_allocator}
91
+ dropout = 0.1
92
+ patience = 1000
93
+ max_epochs = 0
94
+ max_steps = 8000
95
+ eval_frequency = 200
96
+ frozen_components = []
97
+ annotating_components = []
98
+ before_to_disk = null
99
+
100
+ [training.batcher]
101
+ @batchers = "spacy.batch_by_padded.v1"
102
+ discard_oversize = true
103
+ size = 200
104
+ buffer = 256
105
+ get_length = null
106
+
107
+ [training.logger]
108
+ @loggers = "spacy.ConsoleLogger.v1"
109
+ progress_bar = true
110
+
111
+ [training.optimizer]
112
+ @optimizers = "Adam.v1"
113
+ beta1 = 0.9
114
+ beta2 = 0.999
115
+ L2_is_weight_decay = true
116
+ L2 = 0.01
117
+ grad_clip = 1.0
118
+ use_averages = false
119
+ eps = 0.00000001
120
+
121
+ [training.optimizer.learn_rate]
122
+ @schedules = "warmup_linear.v1"
123
+ warmup_steps = 250
124
+ total_steps = 20000
125
+ initial_rate = 0.00005
126
+
127
+ [training.score_weights]
128
+ ents_f = 1.0
129
+ ents_p = 0.0
130
+ ents_r = 0.0
131
+ ents_per_type = null
132
+
133
+ [pretraining]
134
+
135
+ [initialize]
136
+ vectors = ${paths.vectors}
137
+ init_tok2vec = ${paths.init_tok2vec}
138
+ vocab_data = null
139
+ lookups = null
140
+ before_init = null
141
+ after_init = null
142
+
143
+ [initialize.components]
144
+
145
+ [initialize.tokenizer]
fr_present_tense_value-any-py3-none-any.whl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:428f7a202e2068faa9015ba8d655fffada036f2025d962c183c39edb965b88a3
3
+ size 392343335
meta.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "lang":"fr",
3
+ "name":"present_tense_value",
4
+ "version":"0.0.1",
5
+ "description":"",
6
+ "author":"",
7
+ "email":"",
8
+ "url":"",
9
+ "license":"",
10
+ "spacy_version":">=3.4.4,<3.5.0",
11
+ "spacy_git_version":"Unknown",
12
+ "vectors":{
13
+ "width":0,
14
+ "vectors":0,
15
+ "keys":0,
16
+ "name":null
17
+ },
18
+ "labels":{
19
+ "transformer":[
20
+
21
+ ],
22
+ "ner":[
23
+ "PRESENT_ENNONCIATION",
24
+ "PRESENT_GENERIQUE",
25
+ "PRESENT_HISTORIQUE"
26
+ ]
27
+ },
28
+ "pipeline":[
29
+ "transformer",
30
+ "ner"
31
+ ],
32
+ "components":[
33
+ "transformer",
34
+ "ner"
35
+ ],
36
+ "disabled":[
37
+
38
+ ],
39
+ "performance":{
40
+ "ents_f":0.7862429256,
41
+ "ents_p":0.7757731959,
42
+ "ents_r":0.7969991174,
43
+ "ents_per_type":{
44
+ "PRESENT_ENNONCIATION":{
45
+ "p":0.8110465116,
46
+ "r":0.8315946349,
47
+ "f":0.821192053
48
+ },
49
+ "PRESENT_GENERIQUE":{
50
+ "p":0.5737704918,
51
+ "r":0.593220339,
52
+ "f":0.5833333333
53
+ },
54
+ "PRESENT_HISTORIQUE":{
55
+ "p":0.819112628,
56
+ "r":0.8421052632,
57
+ "f":0.830449827
58
+ }
59
+ },
60
+ "transformer_loss":820.0190143473,
61
+ "ner_loss":523.8487311951
62
+ },
63
+ "requirements":[
64
+ "spacy-transformers>=1.1.9,<1.2.0"
65
+ ]
66
+ }
ner/cfg ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "moves":null,
3
+ "update_with_oracle_cut_size":100,
4
+ "multitasks":[
5
+
6
+ ],
7
+ "min_action_freq":1,
8
+ "learn_tokens":false,
9
+ "beam_width":1,
10
+ "beam_density":0.0,
11
+ "beam_update_prob":0.0,
12
+ "incorrect_spans_key":null
13
+ }
ner/model ADDED
Binary file (220 kB). View file
 
ner/moves ADDED
@@ -0,0 +1 @@
 
 
1
+ ��moves�h{"0":{},"1":{"PRESENT_ENNONCIATION":2382,"PRESENT_HISTORIQUE":1105,"PRESENT_GENERIQUE":833},"2":{"PRESENT_ENNONCIATION":2382,"PRESENT_HISTORIQUE":1105,"PRESENT_GENERIQUE":833},"3":{"PRESENT_ENNONCIATION":2382,"PRESENT_HISTORIQUE":1105,"PRESENT_GENERIQUE":833},"4":{"PRESENT_ENNONCIATION":2382,"PRESENT_HISTORIQUE":1105,"PRESENT_GENERIQUE":833,"":1},"5":{"":1}}�cfg��neg_key�
tokenizer ADDED
The diff for this file is too large to render. See raw diff
 
transformer/cfg ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "max_batch_items":4096
3
+ }
transformer/model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5fd6ac32d51801f2dcd022533e01b0c38a507687d47f768aa2050e5962336f9
3
+ size 445802658
vocab/key2row ADDED
@@ -0,0 +1 @@
 
 
1
+
vocab/lookups.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76be8b528d0075f7aae98d6fa57a6d3c83ae480a8469e668d7b0af968995ac71
3
+ size 1
vocab/strings.json ADDED
The diff for this file is too large to render. See raw diff
 
vocab/vectors ADDED
Binary file (128 Bytes). View file
 
vocab/vectors.cfg ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "mode":"default"
3
+ }