Add transformers.js example code

#5
by Xenova HF staff - opened
Files changed (1) hide show
  1. README.md +27 -0
README.md CHANGED
@@ -4,6 +4,7 @@ tags:
4
  - sentence-transformers
5
  - gte
6
  - mteb
 
7
  license: apache-2.0
8
  language:
9
  - en
@@ -2684,6 +2685,32 @@ embeddings = model.encode(sentences)
2684
  print(cos_sim(embeddings[0], embeddings[1]))
2685
  ```
2686
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2687
  ## Training Details
2688
 
2689
  ### Training Data
 
4
  - sentence-transformers
5
  - gte
6
  - mteb
7
+ - transformers.js
8
  license: apache-2.0
9
  language:
10
  - en
 
2685
  print(cos_sim(embeddings[0], embeddings[1]))
2686
  ```
2687
 
2688
+ Use with `transformers.js`:
2689
+
2690
+ ```js
2691
+ // npm i @xenova/transformers
2692
+ import { pipeline, dot } from '@xenova/transformers';
2693
+
2694
+ // Create feature extraction pipeline
2695
+ const extractor = await pipeline('feature-extraction', 'Alibaba-NLP/gte-base-en-v1.5', {
2696
+ quantized: false, // Comment out this line to use the quantized version
2697
+ });
2698
+
2699
+ // Generate sentence embeddings
2700
+ const sentences = [
2701
+ "what is the capital of China?",
2702
+ "how to implement quick sort in python?",
2703
+ "Beijing",
2704
+ "sorting algorithms"
2705
+ ]
2706
+ const output = await extractor(sentences, { normalize: true, pooling: 'cls' });
2707
+
2708
+ // Compute similarity scores
2709
+ const [source_embeddings, ...document_embeddings ] = output.tolist();
2710
+ const similarities = document_embeddings.map(x => 100 * dot(source_embeddings, x));
2711
+ console.log(similarities); // [34.504930869007296, 64.03973265120138, 19.520042686034362]
2712
+ ```
2713
+
2714
  ## Training Details
2715
 
2716
  ### Training Data