Update README.md
Browse files
README.md
CHANGED
@@ -44,6 +44,8 @@ fs.writeFileSync('result.wav', wav.toBuffer());
|
|
44 |
|
45 |
**Example:** Load processor, tokenizer, and models separately.
|
46 |
```js
|
|
|
|
|
47 |
// Load the tokenizer and processor
|
48 |
const tokenizer = await AutoTokenizer.from_pretrained('Xenova/speecht5_tts');
|
49 |
const processor = await AutoProcessor.from_pretrained('Xenova/speecht5_tts');
|
@@ -76,6 +78,19 @@ console.log(waveform)
|
|
76 |
// data: Float32Array(26112) [ -0.00043630177970044315, -0.00018082228780258447, ... ],
|
77 |
// }
|
78 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
---
|
80 |
|
81 |
Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`).
|
|
|
44 |
|
45 |
**Example:** Load processor, tokenizer, and models separately.
|
46 |
```js
|
47 |
+
import { AutoTokenizer, AutoProcessor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, Tensor } from '@xenova/transformers';
|
48 |
+
|
49 |
// Load the tokenizer and processor
|
50 |
const tokenizer = await AutoTokenizer.from_pretrained('Xenova/speecht5_tts');
|
51 |
const processor = await AutoProcessor.from_pretrained('Xenova/speecht5_tts');
|
|
|
78 |
// data: Float32Array(26112) [ -0.00043630177970044315, -0.00018082228780258447, ... ],
|
79 |
// }
|
80 |
```
|
81 |
+
|
82 |
+
Optionally, save the audio to a wav file (Node.js):
|
83 |
+
```js
|
84 |
+
// Write to file (Node.js)
|
85 |
+
import wavefile from 'wavefile';
|
86 |
+
import fs from 'fs';
|
87 |
+
|
88 |
+
const wav = new wavefile.WaveFile();
|
89 |
+
wav.fromScratch(1, processor.feature_extractor.config.sampling_rate, '32f', waveform.data);
|
90 |
+
fs.writeFileSync('out.wav', wav.toBuffer());
|
91 |
+
```
|
92 |
+
|
93 |
+
|
94 |
---
|
95 |
|
96 |
Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`).
|