theodoreguillet commited on
Commit
e4fcf0b
1 Parent(s): 9b8ee3f

initial commit

Browse files
1_Pooling/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false
7
+ }
README.md ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: sentence-similarity
3
+ language: fr
4
+ tags:
5
+ - sentence-similarity
6
+ - transformers
7
+ - fr
8
+ - flaubert
9
+ - sentence-transformers
10
+ - feature-extraction
11
+ - xnli
12
+ - stsb_multi_mt
13
+ datasets:
14
+ - xnli
15
+ - stsb_multi_mt
16
+ ---
17
+
18
+ # inokufu/flaubert-base-uncased-xnli-sts
19
+
20
+ This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
21
+
22
+ ## Details
23
+
24
+ This model is based on the French flaubert-base-uncased pre-trained model [1, 2].
25
+
26
+ It was then fine-tuned on a natural language inference task (XNLI) [3]. This task consists in training the model to recognize relations between sentences (contradiction, neutral, implication).
27
+
28
+ It was then fine-tuned on a text semantic similarity task (on STS-fr data) [4]. This task consists in training the model to estimate the similarity between two sentences.
29
+
30
+ This fine-tuning process allows our model to have a semantic representation of words that is much better than the one proposed by the base model.
31
+
32
+ ## Usage (Sentence-Transformers)
33
+
34
+ Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
35
+
36
+ ```
37
+ pip install -U sentence-transformers
38
+ ```
39
+
40
+ Then you can use the model like this:
41
+
42
+ ```python
43
+ from sentence_transformers import SentenceTransformer
44
+ sentences = ["Apprendre le python", "Devenir expert en comptabilité"]
45
+
46
+ model = SentenceTransformer('inokufu/flaubert-base-uncased-xnli-sts')
47
+ embeddings = model.encode(sentences)
48
+ print(embeddings)
49
+ ```
50
+
51
+ ## Usage (HuggingFace Transformers)
52
+
53
+ Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
54
+
55
+ ```python
56
+ from transformers import AutoTokenizer, AutoModel
57
+ import torch
58
+
59
+
60
+ #Mean Pooling - Take attention mask into account for correct averaging
61
+ def mean_pooling(model_output, attention_mask):
62
+ token_embeddings = model_output[0] #First element of model_output contains all token embeddings
63
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
64
+ return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
65
+
66
+
67
+ # Sentences we want sentence embeddings for
68
+ sentences = ["Apprendre le python", "Devenir expert en comptabilité"]
69
+
70
+ # Load model from HuggingFace Hub
71
+ tokenizer = AutoTokenizer.from_pretrained('inokufu/flaubert-base-uncased-xnli-sts')
72
+ model = AutoModel.from_pretrained('inokufu/flaubert-base-uncased-xnli-sts')
73
+
74
+ # Tokenize sentences
75
+ encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
76
+
77
+ # Compute token embeddings
78
+ with torch.no_grad():
79
+ model_output = model(**encoded_input)
80
+
81
+ # Perform pooling. In this case, mean pooling.
82
+ sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
83
+
84
+ print("Sentence embeddings:")
85
+ print(sentence_embeddings)
86
+ ```
87
+
88
+ ## Evaluation Results
89
+
90
+ STS (fr) score: 83.07%
91
+
92
+ ## Model Architecture
93
+
94
+ ```
95
+ SentenceTransformer(
96
+ (0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: FlaubertModel
97
+ (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
98
+ )
99
+ ```
100
+
101
+ ## References
102
+
103
+ [1] https://hal.archives-ouvertes.fr/hal-02784776v3/document <br>
104
+ [2] https://huggingface.co/flaubert/flaubert_base_uncased <br>
105
+ [3] https://arxiv.org/abs/1809.05053 <br>
106
+ [4] https://huggingface.co/datasets/stsb_multi_mt <br>
config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "inokufu/flaubert-base-uncased-xnli-sts",
3
+ "amp": 1,
4
+ "architectures": [
5
+ "FlaubertModel"
6
+ ],
7
+ "asm": false,
8
+ "attention_dropout": 0.1,
9
+ "bos_index": 0,
10
+ "bos_token_id": 0,
11
+ "bptt": 512,
12
+ "causal": false,
13
+ "clip_grad_norm": 5,
14
+ "dropout": 0.1,
15
+ "emb_dim": 768,
16
+ "embed_init_std": 0.02209708691207961,
17
+ "encoder_only": true,
18
+ "end_n_top": 5,
19
+ "eos_index": 1,
20
+ "fp16": true,
21
+ "gelu_activation": true,
22
+ "group_by_size": true,
23
+ "id2lang": {
24
+ "0": "fr"
25
+ },
26
+ "init_std": 0.02,
27
+ "is_encoder": true,
28
+ "lang2id": {
29
+ "fr": 0
30
+ },
31
+ "lang_id": 0,
32
+ "langs": [
33
+ "fr"
34
+ ],
35
+ "layer_norm_eps": 1e-12,
36
+ "layerdrop": 0.0,
37
+ "lg_sampling_factor": -1,
38
+ "lgs": "fr",
39
+ "mask_index": 5,
40
+ "mask_token_id": 0,
41
+ "max_batch_size": 0,
42
+ "max_position_embeddings": 512,
43
+ "max_vocab": -1,
44
+ "mlm_steps": [
45
+ [
46
+ "fr",
47
+ null
48
+ ]
49
+ ],
50
+ "model_type": "flaubert",
51
+ "n_heads": 12,
52
+ "n_langs": 1,
53
+ "n_layers": 12,
54
+ "pad_index": 2,
55
+ "pad_token_id": 2,
56
+ "pre_norm": false,
57
+ "sample_alpha": 0,
58
+ "share_inout_emb": true,
59
+ "sinusoidal_embeddings": false,
60
+ "start_n_top": 5,
61
+ "summary_activation": null,
62
+ "summary_first_dropout": 0.1,
63
+ "summary_proj_to_labels": true,
64
+ "summary_type": "first",
65
+ "summary_use_proj": true,
66
+ "tokens_per_batch": -1,
67
+ "torch_dtype": "float32",
68
+ "transformers_version": "4.11.3",
69
+ "unk_index": 3,
70
+ "use_lang_emb": true,
71
+ "vocab_size": 67542,
72
+ "word_blank": 0,
73
+ "word_dropout": 0,
74
+ "word_keep": 0.1,
75
+ "word_mask": 0.8,
76
+ "word_mask_keep_rand": "0.8,0.1,0.1",
77
+ "word_pred": 0.15,
78
+ "word_rand": 0.1,
79
+ "word_shuffle": 0
80
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "2.1.0",
4
+ "transformers": "4.11.3",
5
+ "pytorch": "1.9.0+cu102"
6
+ }
7
+ }
eval/similarity_evaluation_sts-dev_results.csv ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ epoch,steps,cosine_pearson,cosine_spearman,euclidean_pearson,euclidean_spearman,manhattan_pearson,manhattan_spearman,dot_pearson,dot_spearman
2
+ 0,-1,0.8476134716760887,0.8468186524889956,0.8374628393139093,0.8393587213401801,0.8361178664176142,0.8379331088259698,0.822123471306284,0.8200800390948763
3
+ 1,-1,0.8585223492907658,0.8570807869134168,0.8424447467205926,0.8452738848553554,0.8416316010899966,0.8442137608694786,0.8304583702559676,0.8305974975814222
4
+ 2,-1,0.8571486998231743,0.8559970703038707,0.8426665991938748,0.8453606609929885,0.8418315419031606,0.8446965654748465,0.829168847143357,0.8290776387285839
5
+ 3,-1,0.8578583320115634,0.8563828855356073,0.8415682855261146,0.8444170090920187,0.8408005059135651,0.8439627134028199,0.8303650556789606,0.8306674833948446
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
modules.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ }
14
+ ]
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e833a8a240f04e8e3b19bacad9b0b6ff09768429d47c116e701b35a70a0c40b
3
+ size 549350969
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 512,
3
+ "do_lower_case": true
4
+ }
similarity_evaluation_sts-test_results.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ epoch,steps,cosine_pearson,cosine_spearman,euclidean_pearson,euclidean_spearman,manhattan_pearson,manhattan_spearman,dot_pearson,dot_spearman
2
+ -1,-1,0.8281639363790627,0.8306544280206849,0.8144472895806177,0.8190524312029783,0.8146705390507208,0.8188915767762216,0.7682952962055747,0.7565634657936158
3
+ -1,-1,0.8281638924716349,0.830654716456512,0.8144472832412396,0.8190596125240516,0.8146705315250665,0.8188990444980802,0.7682951830461527,0.7565642629626018
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "</s>", "mask_token": "<special1>", "additional_special_tokens": ["<special0>", "<special1>", "<special2>", "<special3>", "<special4>", "<special5>", "<special6>", "<special7>", "<special8>", "<special9>"]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "</s>", "mask_token": "<special1>", "additional_special_tokens": ["<special0>", "<special1>", "<special2>", "<special3>", "<special4>", "<special5>", "<special6>", "<special7>", "<special8>", "<special9>"], "lang2id": null, "id2lang": null, "do_lowercase_and_remove_accent": true, "do_lower_case": true, "model_max_length": 512, "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "inokufu/flaubert-base-uncased-xnli-sts", "tokenizer_class": "FlaubertTokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff