Tom Aarsen commited on
Commit
89a809c
2 Parent(s): 917a3bb 72bd416

Merge branch 'main' of into integrations/sentence_transformers

Browse files
Files changed (2) hide show
  1. README.md +5 -2
  2. config.json +3 -3
README.md CHANGED
@@ -2666,12 +2666,15 @@ Training data to train the models is released in its entirety. For more details,
2666
 
2667
  ## Usage
2668
 
 
 
 
2669
  ### Sentence Transformers
2670
  ```python
2671
  from sentence_transformers import SentenceTransformer
2672
 
2673
  model = SentenceTransformer("nomic-ai/nomic-embed-text-v1-unsupervised", trust_remote_code=True)
2674
- sentences = ['What is TSNE?', 'Who is Laurens van der Maaten?']
2675
  embeddings = model.encode(sentences)
2676
  print(embeddings)
2677
  ```
@@ -2687,7 +2690,7 @@ def mean_pooling(model_output, attention_mask):
2687
  input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
2688
  return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
2689
 
2690
- sentences = ['What is TSNE?', 'Who is Laurens van der Maaten?']
2691
 
2692
  tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
2693
  model = AutoModel.from_pretrained('nomic-ai/nomic-embed-text-v1-unsupervised', trust_remote_code=True)
 
2666
 
2667
  ## Usage
2668
 
2669
+ Note `nomic-embed-text` requires prefixes! We support the prefixes `[search_query, search_document, classification, clustering]`.
2670
+ For retrieval applications, you should prepend `search_document` for all your documents and `search_query` for your queries.
2671
+
2672
  ### Sentence Transformers
2673
  ```python
2674
  from sentence_transformers import SentenceTransformer
2675
 
2676
  model = SentenceTransformer("nomic-ai/nomic-embed-text-v1-unsupervised", trust_remote_code=True)
2677
+ sentences = ['search_query: What is TSNE?', 'search_query: Who is Laurens van der Maaten?']
2678
  embeddings = model.encode(sentences)
2679
  print(embeddings)
2680
  ```
 
2690
  input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
2691
  return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
2692
 
2693
+ sentences = ['search_query: What is TSNE?', 'search_query: Who is Laurens van der Maaten?']
2694
 
2695
  tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
2696
  model = AutoModel.from_pretrained('nomic-ai/nomic-embed-text-v1-unsupervised', trust_remote_code=True)
config.json CHANGED
@@ -11,7 +11,7 @@
11
  "bos_token_id": null,
12
  "causal": false,
13
  "dense_seq_output": true,
14
- "embd_pdrop": 0.0,
15
  "eos_token_id": null,
16
  "fused_bias_fc": true,
17
  "fused_dropout_add_ln": true,
@@ -31,7 +31,7 @@
31
  "prenorm": false,
32
  "qkv_proj_bias": false,
33
  "reorder_and_upcast_attn": false,
34
- "resid_pdrop": 0.0,
35
  "rotary_emb_base": 1000,
36
  "rotary_emb_fraction": 1.0,
37
  "rotary_emb_interleaved": false,
@@ -40,7 +40,7 @@
40
  "scale_attn_by_inverse_layer_idx": false,
41
  "scale_attn_weights": true,
42
  "summary_activation": null,
43
- "summary_first_dropout": 0.0,
44
  "summary_proj_to_labels": true,
45
  "summary_type": "cls_index",
46
  "summary_use_proj": true,
 
11
  "bos_token_id": null,
12
  "causal": false,
13
  "dense_seq_output": true,
14
+ "embd_pdrop": 0.1,
15
  "eos_token_id": null,
16
  "fused_bias_fc": true,
17
  "fused_dropout_add_ln": true,
 
31
  "prenorm": false,
32
  "qkv_proj_bias": false,
33
  "reorder_and_upcast_attn": false,
34
+ "resid_pdrop": 0.1,
35
  "rotary_emb_base": 1000,
36
  "rotary_emb_fraction": 1.0,
37
  "rotary_emb_interleaved": false,
 
40
  "scale_attn_by_inverse_layer_idx": false,
41
  "scale_attn_weights": true,
42
  "summary_activation": null,
43
+ "summary_first_dropout": 0.1,
44
  "summary_proj_to_labels": true,
45
  "summary_type": "cls_index",
46
  "summary_use_proj": true,