osanseviero HF staff commited on
Commit
81f5459
1 Parent(s): 4972994
.gitattributes CHANGED
@@ -14,3 +14,4 @@
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
 
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: sentence-similarity
3
+ tags:
4
+ - sentence-transformers
5
+ - causal-lm
6
+ license:
7
+ - CC-BY-SA-4.0
8
+ ---
9
+
10
+ # TODO: Name of Model
11
+
12
+ TODO: Description
13
+
14
+ ## Model Description
15
+ TODO: Add relevant content
16
+
17
+ (0) Base Transformer Type: RobertaModel
18
+
19
+ (1) Pooling mean
20
+
21
+
22
+ ## Usage (Sentence-Transformers)
23
+
24
+ Using this model becomes more convenient when you have [sentence-transformers](https://github.com/UKPLab/sentence-transformers) installed:
25
+
26
+ ```
27
+ pip install -U sentence-transformers
28
+ ```
29
+
30
+ Then you can use the model like this:
31
+
32
+ ```python
33
+ from sentence_transformers import SentenceTransformer
34
+ sentences = ["This is an example sentence"]
35
+
36
+ model = SentenceTransformer(TODO)
37
+ embeddings = model.encode(sentences)
38
+ print(embeddings)
39
+ ```
40
+
41
+
42
+ ## Usage (HuggingFace Transformers)
43
+
44
+ ```python
45
+ from transformers import AutoTokenizer, AutoModel
46
+ import torch
47
+
48
+ # The next step is optional if you want your own pooling function.
49
+ # Max Pooling - Take the max value over time for every dimension.
50
+ def max_pooling(model_output, attention_mask):
51
+ token_embeddings = model_output[0] #First element of model_output contains all token embeddings
52
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
53
+ token_embeddings[input_mask_expanded == 0] = -1e9 # Set padding tokens to large negative value
54
+ max_over_time = torch.max(token_embeddings, 1)[0]
55
+ return max_over_time
56
+
57
+ # Sentences we want sentence embeddings for
58
+ sentences = ['This is an example sentence']
59
+
60
+ # Load model from HuggingFace Hub
61
+ tokenizer = AutoTokenizer.from_pretrained(TODO)
62
+ model = AutoModel.from_pretrained(TODO)
63
+
64
+ # Tokenize sentences
65
+ encoded_input = tokenizer(sentences, padding=True, truncation=True, max_length=128, return_tensors='pt'))
66
+
67
+ # Compute token embeddings
68
+ with torch.no_grad():
69
+ model_output = model(**encoded_input)
70
+
71
+ # Perform pooling. In this case, max pooling.
72
+ sentence_embeddings = max_pooling(model_output, encoded_input['attention_mask'])
73
+
74
+ print("Sentence embeddings:")
75
+ print(sentence_embeddings)
76
+ ```
77
+
78
+
79
+
80
+ ## TODO: Training Procedure
81
+
82
+ ## TODO: Evaluation Results
83
+
84
+ ## TODO: Citing & Authors
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/Users/osanseviero/.cache/torch/sentence_transformers/sbert.net_models_osanseviero_full-sentence-distillroberta2/",
3
+ "architectures": [
4
+ "RobertaModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 6,
20
+ "output_hidden_states": true,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "transformers_version": "4.6.0",
24
+ "type_vocab_size": 1,
25
+ "use_cache": true,
26
+ "vocab_size": 50265
27
+ }
flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bebbcc7ead5534be3f4dee3e5a1883e2a7e185f1c610256eaf5d804332fc4f9
3
+ size 328477339
merges.txt ADDED
The diff for this file is too large to render. See raw diff
modules.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ }
14
+ ]
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4427e4ab70e3618994ff0caca2fc2243b17e65d25612fed844b986954d425d1
3
+ size 328519167
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ {
2
+ "max_seq_length": 128,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "errors": "replace", "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_special_tokens": false, "model_max_length": 512, "special_tokens_map_file": "/home/ukp-reimers/.cache/torch/transformers/4f4743e3f4fbeb763d116a0f2697f5e03117bd130711d90eaf795aeaeb7c4659.01d47f83d2e88283cc7f6be55eaef5d08d20297fc3bc1c1618ac15c35d1b97dd", "full_tokenizer_file": null, "name_or_path": "/Users/osanseviero/.cache/torch/sentence_transformers/sbert.net_models_osanseviero_full-sentence-distillroberta2/"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff