Transformers.js - Enable external data format in Node.js

#2
by Xenova HF staff - opened
Files changed (2) hide show
  1. README.md +3 -3
  2. config.json +7 -2
README.md CHANGED
@@ -7,15 +7,15 @@ https://huggingface.co/hf-audio/wav2vec2-bert-CV16-en with ONNX weights to be co
7
 
8
  ## Usage (Transformers.js)
9
 
10
- If you haven't already, you can install the [Transformers.js](https://huggingface.co/docs/transformers.js) JavaScript library from [NPM](https://www.npmjs.com/package/@xenova/transformers) using:
11
  ```bash
12
- npm i @xenova/transformers
13
  ```
14
 
15
  You can then use the model for speech recognition with:
16
 
17
  ```js
18
- import { pipeline } from '@xenova/transformers';
19
 
20
  // Create an Automatic Speech Recognition pipeline
21
  const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/wav2vec2-bert-CV16-en');
 
7
 
8
  ## Usage (Transformers.js)
9
 
10
+ If you haven't already, you can install the [Transformers.js](https://huggingface.co/docs/transformers.js) JavaScript library from [NPM](https://www.npmjs.com/package/@huggingface/transformers) using:
11
  ```bash
12
+ npm i @huggingface/transformers
13
  ```
14
 
15
  You can then use the model for speech recognition with:
16
 
17
  ```js
18
+ import { pipeline } from '@huggingface/transformers';
19
 
20
  // Create an Automatic Speech Recognition pipeline
21
  const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/wav2vec2-bert-CV16-en');
config.json CHANGED
@@ -77,5 +77,10 @@
77
  "use_intermediate_ffn_before_adapter": false,
78
  "use_weighted_layer_sum": false,
79
  "vocab_size": 33,
80
- "xvector_output_dim": 512
81
- }
 
 
 
 
 
 
77
  "use_intermediate_ffn_before_adapter": false,
78
  "use_weighted_layer_sum": false,
79
  "vocab_size": 33,
80
+ "xvector_output_dim": 512,
81
+ "transformers.js_config": {
82
+ "use_external_data_format": {
83
+ "model.onnx": true
84
+ }
85
+ }
86
+ }