ruslanmv commited on
Commit
cd3ba6d
1 Parent(s): 464e901
Files changed (7) hide show
  1. README.md +45 -23
  2. app.py +0 -56
  3. config.yml +79 -0
  4. model.h5 +3 -0
  5. packages.txt +0 -1
  6. processor.json +1 -0
  7. requirements.txt +0 -5
README.md CHANGED
@@ -1,33 +1,55 @@
1
  ---
2
- title: TensorFlowTTS
3
- emoji: 💩
4
- colorFrom: green
5
- colorTo: indigo
6
- sdk: gradio
7
- app_file: app.py
8
- pinned: false
 
 
 
 
9
  ---
10
 
11
- # Configuration
12
 
13
- `title`: _string_
14
- Display title for the Space
15
 
16
- `emoji`: _string_
17
- Space emoji (emoji-only character allowed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- `colorFrom`: _string_
20
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
21
 
22
- `colorTo`: _string_
23
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
24
 
25
- `sdk`: _string_
26
- Can be either `gradio` or `streamlit`
27
 
28
- `app_file`: _string_
29
- Path to your main application file (which contains either `gradio` or `streamlit` Python code).
30
- Path is relative to the root of the repository.
31
 
32
- `pinned`: _boolean_
33
- Whether the Space stays on top of your list.
 
1
  ---
2
+ tags:
3
+ - TensorFlowTTS
4
+ - audio
5
+ - text-to-speech
6
+ - text-to-mel
7
+ language: eng
8
+ license: apache-2.0
9
+ datasets:
10
+ - LJSpeech
11
+ widget:
12
+ - text: "How are you?"
13
  ---
14
 
 
15
 
 
 
16
 
17
+ This repository provides a pretrained [FastSpeech](https://arxiv.org/abs/1905.09263) trained on LJSpeech dataset (ENG). For a detail of the model, we encourage you to read more about
18
+ [TensorFlowTTS](https://github.com/TensorSpeech/TensorFlowTTS).
19
+
20
+
21
+ ## Install TensorFlowTTS
22
+ First of all, please install TensorFlowTTS with the following command:
23
+ ```
24
+ pip install TensorFlowTTS
25
+ ```
26
+
27
+ ### Converting your Text to Mel Spectrogram
28
+ ```python
29
+ import numpy as np
30
+ import soundfile as sf
31
+ import yaml
32
+
33
+ import tensorflow as tf
34
+
35
+ from tensorflow_tts.inference import AutoProcessor
36
+ from tensorflow_tts.inference import TFAutoModel
37
+
38
+ processor = AutoProcessor.from_pretrained("ruslanmv/tts-fastspeech-ljspeech-en")
39
+ fastspeech = TFAutoModel.from_pretrained("ruslanmv/tts-fastspeech-ljspeech-en")
40
+
41
+ text = "How are you?"
42
+
43
+ input_ids = processor.text_to_sequence(text)
44
+
45
+ mel_before, mel_after, duration_outputs = fastspeech.inference(
46
+ input_ids=tf.expand_dims(tf.convert_to_tensor(input_ids, dtype=tf.int32), 0),
47
+ speaker_ids=tf.convert_to_tensor([0], dtype=tf.int32),
48
+ speed_ratios=tf.convert_to_tensor([1.0], dtype=tf.float32),
49
+ )
50
+ ```
51
 
 
 
52
 
 
 
53
 
 
 
54
 
 
 
 
55
 
 
 
app.py DELETED
@@ -1,56 +0,0 @@
1
- import numpy as np
2
- import soundfile as sf
3
- import yaml
4
-
5
- import tensorflow as tf
6
-
7
- from tensorflow_tts.inference import TFAutoModel
8
- from tensorflow_tts.inference import AutoProcessor
9
- import gradio as gr
10
-
11
- # initialize fastspeech2 model.
12
- fastspeech2 = TFAutoModel.from_pretrained("tensorspeech/tts-fastspeech2-ljspeech-en")
13
-
14
-
15
- # initialize mb_melgan model
16
- mb_melgan = TFAutoModel.from_pretrained("tensorspeech/tts-mb_melgan-ljspeech-en")
17
-
18
-
19
- # inference
20
- processor = AutoProcessor.from_pretrained("tensorspeech/tts-fastspeech2-ljspeech-en")
21
-
22
- def inference(text):
23
- input_ids = processor.text_to_sequence(text)
24
- # fastspeech inference
25
-
26
- mel_before, mel_after, duration_outputs, _, _ = fastspeech2.inference(
27
- input_ids=tf.expand_dims(tf.convert_to_tensor(input_ids, dtype=tf.int32), 0),
28
- speaker_ids=tf.convert_to_tensor([0], dtype=tf.int32),
29
- speed_ratios=tf.convert_to_tensor([1.0], dtype=tf.float32),
30
- f0_ratios =tf.convert_to_tensor([1.0], dtype=tf.float32),
31
- energy_ratios =tf.convert_to_tensor([1.0], dtype=tf.float32),
32
- )
33
-
34
- # melgan inference
35
- audio_before = mb_melgan.inference(mel_before)[0, :, 0]
36
- audio_after = mb_melgan.inference(mel_after)[0, :, 0]
37
-
38
- # save to file
39
- sf.write('./audio_before.wav', audio_before, 22050, "PCM_16")
40
- sf.write('./audio_after.wav', audio_after, 22050, "PCM_16")
41
- return './audio_after.wav'
42
-
43
- inputs = gr.inputs.Textbox(lines=5, label="Input Text")
44
- outputs = gr.outputs.Audio(type="file", label="Output Audio")
45
-
46
-
47
- title = "Tensorflow TTS"
48
- description = "Gradio demo for TensorFlowTTS: Real-Time State-of-the-art Speech Synthesis for Tensorflow 2. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
49
- article = "<p style='text-align: center'><a href='https://tensorspeech.github.io/TensorFlowTTS/'>TensorFlowTTS: Real-Time State-of-the-art Speech Synthesis for Tensorflow 2</a> | <a href='https://github.com/TensorSpeech/TensorFlowTTS'>Github Repo</a></p>"
50
-
51
- examples = [
52
- ["TensorFlowTTS provides real-time state-of-the-art speech synthesis architectures such as Tacotron-2, Melgan, Multiband-Melgan, FastSpeech, FastSpeech2 based-on TensorFlow 2."],
53
- ["With Tensorflow 2, we can speed-up training/inference progress, optimizer further by using fake-quantize aware and pruning, make TTS models can be run faster than real-time and be able to deploy on mobile devices or embedded systems."]
54
- ]
55
-
56
- gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
config.yml ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is the hyperparameter configuration file for FastSpeech v3.
2
+ # Please make sure this is adjusted for the LJSpeech dataset. If you want to
3
+ # apply to the other dataset, you might need to carefully change some parameters.
4
+ # This configuration performs 200k iters but a best checkpoint is around 150k iters.
5
+
6
+ ###########################################################
7
+ # FEATURE EXTRACTION SETTING #
8
+ ###########################################################
9
+ hop_size: 256 # Hop size.
10
+ format: "npy"
11
+
12
+
13
+ ###########################################################
14
+ # NETWORK ARCHITECTURE SETTING #
15
+ ###########################################################
16
+ model_type: "fastspeech"
17
+
18
+ fastspeech_params:
19
+ n_speakers: 1
20
+ encoder_hidden_size: 384
21
+ encoder_num_hidden_layers: 4
22
+ encoder_num_attention_heads: 2
23
+ encoder_attention_head_size: 192 # hidden_size // num_attention_heads
24
+ encoder_intermediate_size: 1024
25
+ encoder_intermediate_kernel_size: 3
26
+ encoder_hidden_act: "mish"
27
+ decoder_hidden_size: 384
28
+ decoder_num_hidden_layers: 4
29
+ decoder_num_attention_heads: 2
30
+ decoder_attention_head_size: 192 # hidden_size // num_attention_heads
31
+ decoder_intermediate_size: 1024
32
+ decoder_intermediate_kernel_size: 3
33
+ decoder_hidden_act: "mish"
34
+ num_duration_conv_layers: 2
35
+ duration_predictor_filters: 256
36
+ duration_predictor_kernel_sizes: 3
37
+ num_mels: 80
38
+ hidden_dropout_prob: 0.2
39
+ attention_probs_dropout_prob: 0.1
40
+ duration_predictor_dropout_probs: 0.2
41
+ max_position_embeddings: 2048
42
+ initializer_range: 0.02
43
+ output_attentions: False
44
+ output_hidden_states: False
45
+
46
+ ###########################################################
47
+ # DATA LOADER SETTING #
48
+ ###########################################################
49
+ batch_size: 16 # Batch size for each GPU with assuming that gradient_accumulation_steps == 1.
50
+ remove_short_samples: true # Whether to remove samples the length of which are less than batch_max_steps.
51
+ allow_cache: true # Whether to allow cache in dataset. If true, it requires cpu memory.
52
+ mel_length_threshold: 32 # remove all targets has mel_length <= 32
53
+ is_shuffle: true # shuffle dataset after each epoch.
54
+ ###########################################################
55
+ # OPTIMIZER & SCHEDULER SETTING #
56
+ ###########################################################
57
+ optimizer_params:
58
+ initial_learning_rate: 0.001
59
+ end_learning_rate: 0.00005
60
+ decay_steps: 150000 # < train_max_steps is recommend.
61
+ warmup_proportion: 0.02
62
+ weight_decay: 0.001
63
+
64
+ gradient_accumulation_steps: 1
65
+ var_train_expr: null # trainable variable expr (eg. 'embeddings|encoder|decoder' )
66
+ # must separate by |. if var_train_expr is null then we
67
+ # training all variable
68
+ ###########################################################
69
+ # INTERVAL SETTING #
70
+ ###########################################################
71
+ train_max_steps: 200000 # Number of training steps.
72
+ save_interval_steps: 5000 # Interval steps to save checkpoint.
73
+ eval_interval_steps: 500 # Interval steps to evaluate the network.
74
+ log_interval_steps: 200 # Interval steps to record the training log.
75
+
76
+ ###########################################################
77
+ # OTHER SETTING #
78
+ ###########################################################
79
+ num_save_intermediate_results: 1 # Number of batch to be saved as intermediate results.
model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d0352241536eec6bb426dfa8d69b8a5a96bb6ef51fe3a5599b68ec190b83a85
3
+ size 120784120
packages.txt DELETED
@@ -1 +0,0 @@
1
- libsndfile1
 
 
processor.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"symbol_to_id": {"pad": 0, "-": 1, "!": 2, "'": 3, "(": 4, ")": 5, ",": 6, ".": 7, ":": 8, ";": 9, "?": 10, " ": 11, "A": 12, "B": 13, "C": 14, "D": 15, "E": 16, "F": 17, "G": 18, "H": 19, "I": 20, "J": 21, "K": 22, "L": 23, "M": 24, "N": 25, "O": 26, "P": 27, "Q": 28, "R": 29, "S": 30, "T": 31, "U": 32, "V": 33, "W": 34, "X": 35, "Y": 36, "Z": 37, "a": 38, "b": 39, "c": 40, "d": 41, "e": 42, "f": 43, "g": 44, "h": 45, "i": 46, "j": 47, "k": 48, "l": 49, "m": 50, "n": 51, "o": 52, "p": 53, "q": 54, "r": 55, "s": 56, "t": 57, "u": 58, "v": 59, "w": 60, "x": 61, "y": 62, "z": 63, "@AA": 64, "@AA0": 65, "@AA1": 66, "@AA2": 67, "@AE": 68, "@AE0": 69, "@AE1": 70, "@AE2": 71, "@AH": 72, "@AH0": 73, "@AH1": 74, "@AH2": 75, "@AO": 76, "@AO0": 77, "@AO1": 78, "@AO2": 79, "@AW": 80, "@AW0": 81, "@AW1": 82, "@AW2": 83, "@AY": 84, "@AY0": 85, "@AY1": 86, "@AY2": 87, "@B": 88, "@CH": 89, "@D": 90, "@DH": 91, "@EH": 92, "@EH0": 93, "@EH1": 94, "@EH2": 95, "@ER": 96, "@ER0": 97, "@ER1": 98, "@ER2": 99, "@EY": 100, "@EY0": 101, "@EY1": 102, "@EY2": 103, "@F": 104, "@G": 105, "@HH": 106, "@IH": 107, "@IH0": 108, "@IH1": 109, "@IH2": 110, "@IY": 111, "@IY0": 112, "@IY1": 113, "@IY2": 114, "@JH": 115, "@K": 116, "@L": 117, "@M": 118, "@N": 119, "@NG": 120, "@OW": 121, "@OW0": 122, "@OW1": 123, "@OW2": 124, "@OY": 125, "@OY0": 126, "@OY1": 127, "@OY2": 128, "@P": 129, "@R": 130, "@S": 131, "@SH": 132, "@T": 133, "@TH": 134, "@UH": 135, "@UH0": 136, "@UH1": 137, "@UH2": 138, "@UW": 139, "@UW0": 140, "@UW1": 141, "@UW2": 142, "@V": 143, "@W": 144, "@Y": 145, "@Z": 146, "@ZH": 147, "eos": 148}, "id_to_symbol": {"0": "pad", "1": "-", "2": "!", "3": "'", "4": "(", "5": ")", "6": ",", "7": ".", "8": ":", "9": ";", "10": "?", "11": " ", "12": "A", "13": "B", "14": "C", "15": "D", "16": "E", "17": "F", "18": "G", "19": "H", "20": "I", "21": "J", "22": "K", "23": "L", "24": "M", "25": "N", "26": "O", "27": "P", "28": "Q", "29": "R", "30": "S", "31": "T", "32": "U", "33": "V", "34": "W", "35": "X", "36": "Y", "37": "Z", "38": "a", "39": "b", "40": "c", "41": "d", "42": "e", "43": "f", "44": "g", "45": "h", "46": "i", "47": "j", "48": "k", "49": "l", "50": "m", "51": "n", "52": "o", "53": "p", "54": "q", "55": "r", "56": "s", "57": "t", "58": "u", "59": "v", "60": "w", "61": "x", "62": "y", "63": "z", "64": "@AA", "65": "@AA0", "66": "@AA1", "67": "@AA2", "68": "@AE", "69": "@AE0", "70": "@AE1", "71": "@AE2", "72": "@AH", "73": "@AH0", "74": "@AH1", "75": "@AH2", "76": "@AO", "77": "@AO0", "78": "@AO1", "79": "@AO2", "80": "@AW", "81": "@AW0", "82": "@AW1", "83": "@AW2", "84": "@AY", "85": "@AY0", "86": "@AY1", "87": "@AY2", "88": "@B", "89": "@CH", "90": "@D", "91": "@DH", "92": "@EH", "93": "@EH0", "94": "@EH1", "95": "@EH2", "96": "@ER", "97": "@ER0", "98": "@ER1", "99": "@ER2", "100": "@EY", "101": "@EY0", "102": "@EY1", "103": "@EY2", "104": "@F", "105": "@G", "106": "@HH", "107": "@IH", "108": "@IH0", "109": "@IH1", "110": "@IH2", "111": "@IY", "112": "@IY0", "113": "@IY1", "114": "@IY2", "115": "@JH", "116": "@K", "117": "@L", "118": "@M", "119": "@N", "120": "@NG", "121": "@OW", "122": "@OW0", "123": "@OW1", "124": "@OW2", "125": "@OY", "126": "@OY0", "127": "@OY1", "128": "@OY2", "129": "@P", "130": "@R", "131": "@S", "132": "@SH", "133": "@T", "134": "@TH", "135": "@UH", "136": "@UH0", "137": "@UH1", "138": "@UH2", "139": "@UW", "140": "@UW0", "141": "@UW1", "142": "@UW2", "143": "@V", "144": "@W", "145": "@Y", "146": "@Z", "147": "@ZH", "148": "eos"}, "speakers_map": {"ljspeech": 0}, "processor_name": "LJSpeechProcessor"}
requirements.txt DELETED
@@ -1,5 +0,0 @@
1
- TensorFlowTTS
2
- gradio
3
- numpy
4
- SoundFile
5
- git+https://github.com/repodiac/german_transliterate