vishred18 commited on
Commit
d5ee97c
1 Parent(s): d72d95f

Upload 364 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. TensorFlowTTS/.eggs/README.txt +6 -0
  3. TensorFlowTTS/.eggs/pytest_runner-6.0.0-py3.11.egg/EGG-INFO/LICENSE +19 -0
  4. TensorFlowTTS/.eggs/pytest_runner-6.0.0-py3.11.egg/EGG-INFO/PKG-INFO +189 -0
  5. TensorFlowTTS/.eggs/pytest_runner-6.0.0-py3.11.egg/EGG-INFO/RECORD +7 -0
  6. TensorFlowTTS/.eggs/pytest_runner-6.0.0-py3.11.egg/EGG-INFO/WHEEL +5 -0
  7. TensorFlowTTS/.eggs/pytest_runner-6.0.0-py3.11.egg/EGG-INFO/entry_points.txt +3 -0
  8. TensorFlowTTS/.eggs/pytest_runner-6.0.0-py3.11.egg/EGG-INFO/requires.txt +17 -0
  9. TensorFlowTTS/.eggs/pytest_runner-6.0.0-py3.11.egg/EGG-INFO/top_level.txt +1 -0
  10. TensorFlowTTS/.eggs/pytest_runner-6.0.0-py3.11.egg/ptr/__init__.py +216 -0
  11. TensorFlowTTS/.gitattributes +1 -0
  12. TensorFlowTTS/.github/stale.yml +16 -0
  13. TensorFlowTTS/.github/workflows/ci.yaml +50 -0
  14. TensorFlowTTS/.gitignore +46 -0
  15. TensorFlowTTS/LICENSE +201 -0
  16. TensorFlowTTS/README.md +319 -0
  17. TensorFlowTTS/docker-compose.yml +11 -0
  18. TensorFlowTTS/dockerfile +8 -0
  19. TensorFlowTTS/examples/android/.gitignore +59 -0
  20. TensorFlowTTS/examples/android/README.md +15 -0
  21. TensorFlowTTS/examples/android/app/.gitignore +1 -0
  22. TensorFlowTTS/examples/android/app/build.gradle +39 -0
  23. TensorFlowTTS/examples/android/app/proguard-rules.pro +21 -0
  24. TensorFlowTTS/examples/android/app/src/androidTest/java/com/tensorspeech/tensorflowtts/ExampleInstrumentedTest.java +27 -0
  25. TensorFlowTTS/examples/android/app/src/main/AndroidManifest.xml +21 -0
  26. TensorFlowTTS/examples/android/app/src/main/assets/fastspeech2_quant.tflite +3 -0
  27. TensorFlowTTS/examples/android/app/src/main/assets/mbmelgan.tflite +3 -0
  28. TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/MainActivity.java +82 -0
  29. TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/dispatcher/OnTtsStateListener.java +13 -0
  30. TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/dispatcher/TtsStateDispatcher.java +79 -0
  31. TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/module/AbstractModule.java +17 -0
  32. TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/module/FastSpeech2.java +82 -0
  33. TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/module/MBMelGan.java +64 -0
  34. TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/tts/InputWorker.java +114 -0
  35. TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/tts/TtsManager.java +97 -0
  36. TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/tts/TtsPlayer.java +91 -0
  37. TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/utils/NumberNorm.java +109 -0
  38. TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/utils/Processor.java +336 -0
  39. TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/utils/ThreadPoolManager.java +157 -0
  40. TensorFlowTTS/examples/android/app/src/main/res/drawable-v24/ic_launcher_foreground.xml +34 -0
  41. TensorFlowTTS/examples/android/app/src/main/res/drawable/ic_launcher_background.xml +170 -0
  42. TensorFlowTTS/examples/android/app/src/main/res/layout/activity_main.xml +77 -0
  43. TensorFlowTTS/examples/android/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml +5 -0
  44. TensorFlowTTS/examples/android/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml +5 -0
  45. TensorFlowTTS/examples/android/app/src/main/res/mipmap-hdpi/ic_launcher.png +0 -0
  46. TensorFlowTTS/examples/android/app/src/main/res/mipmap-hdpi/ic_launcher_round.png +0 -0
  47. TensorFlowTTS/examples/android/app/src/main/res/mipmap-mdpi/ic_launcher.png +0 -0
  48. TensorFlowTTS/examples/android/app/src/main/res/mipmap-mdpi/ic_launcher_round.png +0 -0
  49. TensorFlowTTS/examples/android/app/src/main/res/mipmap-xhdpi/ic_launcher.png +0 -0
  50. TensorFlowTTS/examples/android/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png +0 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ TensorFlowTTS/examples/tacotron2/fig/alignment.gif filter=lfs diff=lfs merge=lfs -text
TensorFlowTTS/.eggs/README.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ This directory contains eggs that were downloaded by setuptools to build, test, and run plug-ins.
2
+
3
+ This directory caches those eggs to prevent repeated downloads.
4
+
5
+ However, it is safe to delete this directory.
6
+
TensorFlowTTS/.eggs/pytest_runner-6.0.0-py3.11.egg/EGG-INFO/LICENSE ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright Jason R. Coombs
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ of this software and associated documentation files (the "Software"), to
5
+ deal in the Software without restriction, including without limitation the
6
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7
+ sell copies of the Software, and to permit persons to whom the Software is
8
+ furnished to do so, subject to the following conditions:
9
+
10
+ The above copyright notice and this permission notice shall be included in
11
+ all copies or substantial portions of the Software.
12
+
13
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19
+ IN THE SOFTWARE.
TensorFlowTTS/.eggs/pytest_runner-6.0.0-py3.11.egg/EGG-INFO/PKG-INFO ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: pytest-runner
3
+ Version: 6.0.0
4
+ Summary: Invoke py.test as distutils command with dependency resolution
5
+ Home-page: https://github.com/pytest-dev/pytest-runner/
6
+ Author: Jason R. Coombs
7
+ Author-email: jaraco@jaraco.com
8
+ License: UNKNOWN
9
+ Platform: UNKNOWN
10
+ Classifier: Development Status :: 5 - Production/Stable
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: License :: OSI Approved :: MIT License
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3 :: Only
15
+ Classifier: Framework :: Pytest
16
+ Requires-Python: >=3.7
17
+ License-File: LICENSE
18
+ Provides-Extra: docs
19
+ Requires-Dist: sphinx ; extra == 'docs'
20
+ Requires-Dist: jaraco.packaging (>=9) ; extra == 'docs'
21
+ Requires-Dist: rst.linker (>=1.9) ; extra == 'docs'
22
+ Requires-Dist: jaraco.tidelift (>=1.4) ; extra == 'docs'
23
+ Provides-Extra: testing
24
+ Requires-Dist: pytest (>=6) ; extra == 'testing'
25
+ Requires-Dist: pytest-checkdocs (>=2.4) ; extra == 'testing'
26
+ Requires-Dist: pytest-flake8 ; extra == 'testing'
27
+ Requires-Dist: pytest-cov ; extra == 'testing'
28
+ Requires-Dist: pytest-enabler (>=1.0.1) ; extra == 'testing'
29
+ Requires-Dist: pytest-virtualenv ; extra == 'testing'
30
+ Requires-Dist: types-setuptools ; extra == 'testing'
31
+ Requires-Dist: pytest-black (>=0.3.7) ; (platform_python_implementation != "PyPy") and extra == 'testing'
32
+ Requires-Dist: pytest-mypy (>=0.9.1) ; (platform_python_implementation != "PyPy") and extra == 'testing'
33
+
34
+ .. image:: https://img.shields.io/pypi/v/pytest-runner.svg
35
+ :target: `PyPI link`_
36
+
37
+ .. image:: https://img.shields.io/pypi/pyversions/pytest-runner.svg
38
+ :target: `PyPI link`_
39
+
40
+ .. _PyPI link: https://pypi.org/project/pytest-runner
41
+
42
+ .. image:: https://github.com/pytest-dev/pytest-runner/workflows/tests/badge.svg
43
+ :target: https://github.com/pytest-dev/pytest-runner/actions?query=workflow%3A%22tests%22
44
+ :alt: tests
45
+
46
+ .. image:: https://img.shields.io/badge/code%20style-black-000000.svg
47
+ :target: https://github.com/psf/black
48
+ :alt: Code style: Black
49
+
50
+ .. .. image:: https://readthedocs.org/projects/skeleton/badge/?version=latest
51
+ .. :target: https://skeleton.readthedocs.io/en/latest/?badge=latest
52
+
53
+ .. image:: https://img.shields.io/badge/skeleton-2022-informational
54
+ :target: https://blog.jaraco.com/skeleton
55
+
56
+ .. image:: https://tidelift.com/badges/package/pypi/pytest-runner
57
+ :target: https://tidelift.com/subscription/pkg/pypi-pytest-runner?utm_source=pypi-pytest-runner&utm_medium=readme
58
+
59
+ Setup scripts can use pytest-runner to add setup.py test support for pytest
60
+ runner.
61
+
62
+ Deprecation Notice
63
+ ==================
64
+
65
+ pytest-runner depends on deprecated features of setuptools and relies on features that break security
66
+ mechanisms in pip. For example 'setup_requires' and 'tests_require' bypass ``pip --require-hashes``.
67
+ See also `pypa/setuptools#1684 <https://github.com/pypa/setuptools/issues/1684>`_.
68
+
69
+ It is recommended that you:
70
+
71
+ - Remove ``'pytest-runner'`` from your ``setup_requires``, preferably removing the ``setup_requires`` option.
72
+ - Remove ``'pytest'`` and any other testing requirements from ``tests_require``, preferably removing the ``tests_requires`` option.
73
+ - Select a tool to bootstrap and then run tests such as tox.
74
+
75
+ Usage
76
+ =====
77
+
78
+ - Add 'pytest-runner' to your 'setup_requires'. Pin to '>=2.0,<3dev' (or
79
+ similar) to avoid pulling in incompatible versions.
80
+ - Include 'pytest' and any other testing requirements to 'tests_require'.
81
+ - Invoke tests with ``setup.py pytest``.
82
+ - Pass ``--index-url`` to have test requirements downloaded from an alternate
83
+ index URL (unnecessary if specified for easy_install in setup.cfg).
84
+ - Pass additional py.test command-line options using ``--addopts``.
85
+ - Set permanent options for the ``python setup.py pytest`` command (like ``index-url``)
86
+ in the ``[pytest]`` section of ``setup.cfg``.
87
+ - Set permanent options for the ``py.test`` run (like ``addopts`` or ``pep8ignore``) in the ``[pytest]``
88
+ section of ``pytest.ini`` or ``tox.ini`` or put them in the ``[tool:pytest]``
89
+ section of ``setup.cfg``. See `pytest issue 567
90
+ <https://github.com/pytest-dev/pytest/issues/567>`_.
91
+ - Optionally, set ``test=pytest`` in the ``[aliases]`` section of ``setup.cfg``
92
+ to cause ``python setup.py test`` to invoke pytest.
93
+
94
+ Example
95
+ =======
96
+
97
+ The most simple usage looks like this in setup.py::
98
+
99
+ setup(
100
+ setup_requires=[
101
+ 'pytest-runner',
102
+ ],
103
+ tests_require=[
104
+ 'pytest',
105
+ ],
106
+ )
107
+
108
+ Additional dependencies require to run the tests (e.g. mock or pytest
109
+ plugins) may be added to tests_require and will be downloaded and
110
+ required by the session before invoking pytest.
111
+
112
+ Follow `this search on github
113
+ <https://github.com/search?utf8=%E2%9C%93&q=filename%3Asetup.py+pytest-runner&type=Code&ref=searchresults>`_
114
+ for examples of real-world usage.
115
+
116
+ Standalone Example
117
+ ==================
118
+
119
+ This technique is deprecated - if you have standalone scripts
120
+ you wish to invoke with dependencies, `use pip-run
121
+ <https://pypi.org/project/pip-run>`_.
122
+
123
+ Although ``pytest-runner`` is typically used to add pytest test
124
+ runner support to maintained packages, ``pytest-runner`` may
125
+ also be used to create standalone tests. Consider `this example
126
+ failure <https://gist.github.com/jaraco/d979a558bc0bf2194c23>`_,
127
+ reported in `jsonpickle #117
128
+ <https://github.com/jsonpickle/jsonpickle/issues/117>`_
129
+ or `this MongoDB test
130
+ <https://gist.github.com/jaraco/0b9e482f5c0a1300dc9a>`_
131
+ demonstrating a technique that works even when dependencies
132
+ are required in the test.
133
+
134
+ Either example file may be cloned or downloaded and simply run on
135
+ any system with Python and Setuptools. It will download the
136
+ specified dependencies and run the tests. Afterward, the the
137
+ cloned directory can be removed and with it all trace of
138
+ invoking the test. No other dependencies are needed and no
139
+ system configuration is altered.
140
+
141
+ Then, anyone trying to replicate the failure can do so easily
142
+ and with all the power of pytest (rewritten assertions,
143
+ rich comparisons, interactive debugging, extensibility through
144
+ plugins, etc).
145
+
146
+ As a result, the communication barrier for describing and
147
+ replicating failures is made almost trivially low.
148
+
149
+ Considerations
150
+ ==============
151
+
152
+ Conditional Requirement
153
+ -----------------------
154
+
155
+ Because it uses Setuptools setup_requires, pytest-runner will install itself
156
+ on every invocation of setup.py. In some cases, this causes delays for
157
+ invocations of setup.py that will never invoke pytest-runner. To help avoid
158
+ this contingency, consider requiring pytest-runner only when pytest
159
+ is invoked::
160
+
161
+ needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
162
+ pytest_runner = ['pytest-runner'] if needs_pytest else []
163
+
164
+ # ...
165
+
166
+ setup(
167
+ #...
168
+ setup_requires=[
169
+ #... (other setup requirements)
170
+ ] + pytest_runner,
171
+ )
172
+
173
+ For Enterprise
174
+ ==============
175
+
176
+ Available as part of the Tidelift Subscription.
177
+
178
+ This project and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use.
179
+
180
+ `Learn more <https://tidelift.com/subscription/pkg/pypi-PROJECT?utm_source=pypi-PROJECT&utm_medium=referral&utm_campaign=github>`_.
181
+
182
+ Security Contact
183
+ ================
184
+
185
+ To report a security vulnerability, please use the
186
+ `Tidelift security contact <https://tidelift.com/security>`_.
187
+ Tidelift will coordinate the fix and disclosure.
188
+
189
+
TensorFlowTTS/.eggs/pytest_runner-6.0.0-py3.11.egg/EGG-INFO/RECORD ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ ptr/__init__.py,sha256=0UfzhCooVgCNTBwVEOPOVGEPck4pnl_6PTfsC-QzNGM,6730
2
+ pytest_runner-6.0.0.dist-info/LICENSE,sha256=2z8CRrH5J48VhFuZ_sR4uLUG63ZIeZNyL4xuJUKF-vg,1050
3
+ pytest_runner-6.0.0.dist-info/METADATA,sha256=xa7jfGba2yXK6_27FdHmVJzb9SifCjm_EBVxNXC8R6w,7381
4
+ pytest_runner-6.0.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
5
+ pytest_runner-6.0.0.dist-info/entry_points.txt,sha256=BqezBqeO63XyzSYmHYE58gKEFIjJUd-XdsRQkXHy2ig,58
6
+ pytest_runner-6.0.0.dist-info/top_level.txt,sha256=DPzHbWlKG8yq8EOD5UgEvVNDWeJRPyimrwfShwV6Iuw,4
7
+ pytest_runner-6.0.0.dist-info/RECORD,,
TensorFlowTTS/.eggs/pytest_runner-6.0.0-py3.11.egg/EGG-INFO/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
TensorFlowTTS/.eggs/pytest_runner-6.0.0-py3.11.egg/EGG-INFO/entry_points.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [distutils.commands]
2
+ ptr = ptr:PyTest
3
+ pytest = ptr:PyTest
TensorFlowTTS/.eggs/pytest_runner-6.0.0-py3.11.egg/EGG-INFO/requires.txt ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ [docs]
3
+ sphinx
4
+ jaraco.packaging>=9
5
+ rst.linker>=1.9
6
+ jaraco.tidelift>=1.4
7
+
8
+ [testing]
9
+ pytest>=6
10
+ pytest-checkdocs>=2.4
11
+ pytest-flake8
12
+ pytest-cov
13
+ pytest-enabler>=1.0.1
14
+ pytest-virtualenv
15
+ types-setuptools
16
+ pytest-black>=0.3.7
17
+ pytest-mypy>=0.9.1
TensorFlowTTS/.eggs/pytest_runner-6.0.0-py3.11.egg/EGG-INFO/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ ptr
TensorFlowTTS/.eggs/pytest_runner-6.0.0-py3.11.egg/ptr/__init__.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation
3
+ """
4
+
5
+ import os as _os
6
+ import shlex as _shlex
7
+ import contextlib as _contextlib
8
+ import sys as _sys
9
+ import operator as _operator
10
+ import itertools as _itertools
11
+ import warnings as _warnings
12
+
13
+ import pkg_resources
14
+ import setuptools.command.test as orig
15
+ from setuptools import Distribution
16
+
17
+
18
+ @_contextlib.contextmanager
19
+ def _save_argv(repl=None):
20
+ saved = _sys.argv[:]
21
+ if repl is not None:
22
+ _sys.argv[:] = repl
23
+ try:
24
+ yield saved
25
+ finally:
26
+ _sys.argv[:] = saved
27
+
28
+
29
+ class CustomizedDist(Distribution):
30
+
31
+ allow_hosts = None
32
+ index_url = None
33
+
34
+ def fetch_build_egg(self, req):
35
+ """Specialized version of Distribution.fetch_build_egg
36
+ that respects respects allow_hosts and index_url."""
37
+ from setuptools.command.easy_install import easy_install
38
+
39
+ dist = Distribution({'script_args': ['easy_install']})
40
+ dist.parse_config_files()
41
+ opts = dist.get_option_dict('easy_install')
42
+ keep = (
43
+ 'find_links',
44
+ 'site_dirs',
45
+ 'index_url',
46
+ 'optimize',
47
+ 'site_dirs',
48
+ 'allow_hosts',
49
+ )
50
+ for key in list(opts):
51
+ if key not in keep:
52
+ del opts[key] # don't use any other settings
53
+ if self.dependency_links:
54
+ links = self.dependency_links[:]
55
+ if 'find_links' in opts:
56
+ links = opts['find_links'][1].split() + links
57
+ opts['find_links'] = ('setup', links)
58
+ if self.allow_hosts:
59
+ opts['allow_hosts'] = ('test', self.allow_hosts)
60
+ if self.index_url:
61
+ opts['index_url'] = ('test', self.index_url)
62
+ install_dir_func = getattr(self, 'get_egg_cache_dir', _os.getcwd)
63
+ install_dir = install_dir_func()
64
+ cmd = easy_install(
65
+ dist,
66
+ args=["x"],
67
+ install_dir=install_dir,
68
+ exclude_scripts=True,
69
+ always_copy=False,
70
+ build_directory=None,
71
+ editable=False,
72
+ upgrade=False,
73
+ multi_version=True,
74
+ no_report=True,
75
+ user=False,
76
+ )
77
+ cmd.ensure_finalized()
78
+ return cmd.easy_install(req)
79
+
80
+
81
+ class PyTest(orig.test):
82
+ """
83
+ >>> import setuptools
84
+ >>> dist = setuptools.Distribution()
85
+ >>> cmd = PyTest(dist)
86
+ """
87
+
88
+ user_options = [
89
+ ('extras', None, "Install (all) setuptools extras when running tests"),
90
+ (
91
+ 'index-url=',
92
+ None,
93
+ "Specify an index url from which to retrieve dependencies",
94
+ ),
95
+ (
96
+ 'allow-hosts=',
97
+ None,
98
+ "Whitelist of comma-separated hosts to allow "
99
+ "when retrieving dependencies",
100
+ ),
101
+ (
102
+ 'addopts=',
103
+ None,
104
+ "Additional options to be passed verbatim to the pytest runner",
105
+ ),
106
+ ]
107
+
108
+ def initialize_options(self):
109
+ self.extras = False
110
+ self.index_url = None
111
+ self.allow_hosts = None
112
+ self.addopts = []
113
+ self.ensure_setuptools_version()
114
+
115
+ @staticmethod
116
+ def ensure_setuptools_version():
117
+ """
118
+ Due to the fact that pytest-runner is often required (via
119
+ setup-requires directive) by toolchains that never invoke
120
+ it (i.e. they're only installing the package, not testing it),
121
+ instead of declaring the dependency in the package
122
+ metadata, assert the requirement at run time.
123
+ """
124
+ pkg_resources.require('setuptools>=27.3')
125
+
126
+ def finalize_options(self):
127
+ if self.addopts:
128
+ self.addopts = _shlex.split(self.addopts)
129
+
130
+ @staticmethod
131
+ def marker_passes(marker):
132
+ """
133
+ Given an environment marker, return True if the marker is valid
134
+ and matches this environment.
135
+ """
136
+ return (
137
+ not marker
138
+ or not pkg_resources.invalid_marker(marker)
139
+ and pkg_resources.evaluate_marker(marker)
140
+ )
141
+
142
+ def install_dists(self, dist):
143
+ """
144
+ Extend install_dists to include extras support
145
+ """
146
+ return _itertools.chain(
147
+ orig.test.install_dists(dist), self.install_extra_dists(dist)
148
+ )
149
+
150
+ def install_extra_dists(self, dist):
151
+ """
152
+ Install extras that are indicated by markers or
153
+ install all extras if '--extras' is indicated.
154
+ """
155
+ extras_require = dist.extras_require or {}
156
+
157
+ spec_extras = (
158
+ (spec.partition(':'), reqs) for spec, reqs in extras_require.items()
159
+ )
160
+ matching_extras = (
161
+ reqs
162
+ for (name, sep, marker), reqs in spec_extras
163
+ # include unnamed extras or all if self.extras indicated
164
+ if (not name or self.extras)
165
+ # never include extras that fail to pass marker eval
166
+ and self.marker_passes(marker)
167
+ )
168
+ results = list(map(dist.fetch_build_eggs, matching_extras))
169
+ return _itertools.chain.from_iterable(results)
170
+
171
+ @staticmethod
172
+ def _warn_old_setuptools():
173
+ msg = (
174
+ "pytest-runner will stop working on this version of setuptools; "
175
+ "please upgrade to setuptools 30.4 or later or pin to "
176
+ "pytest-runner < 5."
177
+ )
178
+ ver_str = pkg_resources.get_distribution('setuptools').version
179
+ ver = pkg_resources.parse_version(ver_str)
180
+ if ver < pkg_resources.parse_version('30.4'):
181
+ _warnings.warn(msg)
182
+
183
+ def run(self):
184
+ """
185
+ Override run to ensure requirements are available in this session (but
186
+ don't install them anywhere).
187
+ """
188
+ self._warn_old_setuptools()
189
+ dist = CustomizedDist()
190
+ for attr in 'allow_hosts index_url'.split():
191
+ setattr(dist, attr, getattr(self, attr))
192
+ for attr in (
193
+ 'dependency_links install_requires tests_require extras_require '
194
+ ).split():
195
+ setattr(dist, attr, getattr(self.distribution, attr))
196
+ installed_dists = self.install_dists(dist)
197
+ if self.dry_run:
198
+ self.announce('skipping tests (dry run)')
199
+ return
200
+ paths = map(_operator.attrgetter('location'), installed_dists)
201
+ with self.paths_on_pythonpath(paths):
202
+ with self.project_on_sys_path():
203
+ return self.run_tests()
204
+
205
+ @property
206
+ def _argv(self):
207
+ return ['pytest'] + self.addopts
208
+
209
+ def run_tests(self):
210
+ """
211
+ Invoke pytest, replacing argv. Return result code.
212
+ """
213
+ with _save_argv(_sys.argv[:1] + self.addopts):
214
+ result_code = __import__('pytest').main()
215
+ if result_code:
216
+ raise SystemExit(result_code)
TensorFlowTTS/.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ *.ipynb linguist-language=Python
TensorFlowTTS/.github/stale.yml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Number of days of inactivity before an issue becomes stale
2
+ daysUntilStale: 60
3
+ # Number of days of inactivity before a stale issue is closed
4
+ daysUntilClose: 7
5
+ # Issues with these labels will never be considered stale
6
+ exemptLabels:
7
+ - pinned
8
+ - security
9
+ # Label to use when marking an issue as stale
10
+ staleLabel: wontfix
11
+ # Comment to post when marking an issue as stale. Set to `false` to disable
12
+ markComment: >
13
+ This issue has been automatically marked as stale because it has not had
14
+ recent activity. It will be closed if no further activity occurs.
15
+ # Comment to post when closing a stale issue. Set to `false` to disable
16
+ closeComment: false
TensorFlowTTS/.github/workflows/ci.yaml ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - master
7
+ pull_request:
8
+ branches:
9
+ - master
10
+ schedule:
11
+ - cron: 0 0 * * 1
12
+
13
+ jobs:
14
+ linter_and_test:
15
+ runs-on: ubuntu-18.04
16
+ strategy:
17
+ max-parallel: 10
18
+ matrix:
19
+ python-version: [3.7]
20
+ tensorflow-version: [2.7.0]
21
+ steps:
22
+ - uses: actions/checkout@master
23
+ - uses: actions/setup-python@v1
24
+ with:
25
+ python-version: ${{ matrix.python-version }}
26
+ architecture: 'x64'
27
+ - uses: actions/cache@v1
28
+ with:
29
+ path: ~/.cache/pip
30
+ key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.pytorch-version }}-pip-${{ hashFiles('**/setup.py') }}
31
+ restore-keys: |
32
+ ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.tensorflow-version }}-pip-
33
+ - name: Install dependencies
34
+ run: |
35
+ # install python modules
36
+ python -m pip install --upgrade pip
37
+ pip install -q -U numpy
38
+ pip install git+https://github.com/repodiac/german_transliterate.git#egg=german_transliterate
39
+ pip install -q tensorflow-gpu==${{ matrix.tensorflow-version }}
40
+ pip install -q -e .
41
+ pip install -q -e .[test]
42
+ pip install typing_extensions
43
+ sudo apt-get install libsndfile1-dev
44
+ python -m pip install black
45
+ - name: black
46
+ run: |
47
+ python -m black .
48
+ - name: Pytest
49
+ run: |
50
+ pytest test
TensorFlowTTS/.gitignore ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # general
3
+ *~
4
+ *.pyc
5
+ \#*\#
6
+ .\#*
7
+ *DS_Store
8
+ out.txt
9
+ TensorFlowTTS.egg-info/
10
+ doc/_build
11
+ slurm-*.out
12
+ tmp*
13
+ .eggs/
14
+ .hypothesis/
15
+ .idea
16
+ .backup/
17
+ .pytest_cache/
18
+ __pycache__/
19
+ .coverage*
20
+ coverage.xml*
21
+ .vscode*
22
+ .nfs*
23
+ .ipynb_checkpoints
24
+ ljspeech
25
+ *.h5
26
+ *.npy
27
+ ./*.wav
28
+ !docker-compose.yml
29
+ /Pipfile
30
+ /Pipfile.lock
31
+ /datasets
32
+ /examples/tacotron2/exp/
33
+ /temp/
34
+ LibriTTS/
35
+ dataset/
36
+ mfa/
37
+ kss/
38
+ baker/
39
+ libritts/
40
+ dump_baker/
41
+ dump_ljspeech/
42
+ dump_kss/
43
+ dump_libritts/
44
+ /notebooks/test_saved/
45
+ build/
46
+ dist/
TensorFlowTTS/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
TensorFlowTTS/README.md ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <h2 align="center">
2
+ <p> :yum: TensorFlowTTS
3
+ <p align="center">
4
+ <a href="https://github.com/tensorspeech/TensorFlowTTS/actions">
5
+ <img alt="Build" src="https://github.com/tensorspeech/TensorFlowTTS/workflows/CI/badge.svg?branch=master">
6
+ </a>
7
+ <a href="https://github.com/tensorspeech/TensorFlowTTS/blob/master/LICENSE">
8
+ <img alt="GitHub" src="https://img.shields.io/github/license/tensorspeech/TensorflowTTS?color=red">
9
+ </a>
10
+ <a href="https://colab.research.google.com/drive/1akxtrLZHKuMiQup00tzO2olCaN-y3KiD?usp=sharing">
11
+ <img alt="Colab" src="https://colab.research.google.com/assets/colab-badge.svg">
12
+ </a>
13
+ </p>
14
+ </h2>
15
+ <h2 align="center">
16
+ <p>Real-Time State-of-the-art Speech Synthesis for Tensorflow 2
17
+ </h2>
18
+
19
+ :zany_face: TensorFlowTTS provides real-time state-of-the-art speech synthesis architectures such as Tacotron-2, Melgan, Multiband-Melgan, FastSpeech, FastSpeech2 based-on TensorFlow 2. With Tensorflow 2, we can speed-up training/inference progress, optimizer further by using [fake-quantize aware](https://www.tensorflow.org/model_optimization/guide/quantization/training_comprehensive_guide) and [pruning](https://www.tensorflow.org/model_optimization/guide/pruning/pruning_with_keras), make TTS models can be run faster than real-time and be able to deploy on mobile devices or embedded systems.
20
+
21
+ ## What's new
22
+ - 2021/08/18 (**NEW!**) Integrated to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See [Gradio Web Demo](https://huggingface.co/spaces/akhaliq/TensorFlowTTS).
23
+ - 2021/08/12 (**NEW!**) Support French TTS (Tacotron2, Multiband MelGAN). Pls see the [colab](https://colab.research.google.com/drive/1jd3u46g-fGQw0rre8fIwWM9heJvrV1c0?usp=sharing). Many Thanks [Samuel Delalez](https://github.com/samuel-lunii)
24
+ - 2021/06/01 Integrated with [Huggingface Hub](https://huggingface.co/tensorspeech). See the [PR](https://github.com/TensorSpeech/TensorFlowTTS/pull/555). Thanks [patrickvonplaten](https://github.com/patrickvonplaten) and [osanseviero](https://github.com/osanseviero)
25
+ - 2021/03/18 Support IOS for FastSpeech2 and MB MelGAN. Thanks [kewlbear](https://github.com/kewlbear). See [here](https://github.com/TensorSpeech/TensorFlowTTS/tree/master/examples/ios)
26
+ - 2021/01/18 Support TFLite C++ inference. Thanks [luan78zaoha](https://github.com/luan78zaoha). See [here](https://github.com/TensorSpeech/TensorFlowTTS/tree/master/examples/cpptflite)
27
+ - 2020/12/02 Support German TTS with [Thorsten dataset](https://github.com/thorstenMueller/deep-learning-german-tts). See the [Colab](https://colab.research.google.com/drive/1W0nSFpsz32M0OcIkY9uMOiGrLTPKVhTy?usp=sharing). Thanks [thorstenMueller](https://github.com/thorstenMueller) and [monatis](https://github.com/monatis)
28
+ - 2020/11/24 Add HiFi-GAN vocoder. See [here](https://github.com/TensorSpeech/TensorFlowTTS/tree/master/examples/hifigan)
29
+ - 2020/11/19 Add Multi-GPU gradient accumulator. See [here](https://github.com/TensorSpeech/TensorFlowTTS/pull/377)
30
+ - 2020/08/23 Add Parallel WaveGAN tensorflow implementation. See [here](https://github.com/TensorSpeech/TensorFlowTTS/tree/master/examples/parallel_wavegan)
31
+ - 2020/08/20 Add C++ inference code. Thank [@ZDisket](https://github.com/ZDisket). See [here](https://github.com/TensorSpeech/TensorFlowTTS/tree/master/examples/cppwin)
32
+ - 2020/08/18 Update [new base processor](https://github.com/TensorSpeech/TensorFlowTTS/blob/master/tensorflow_tts/processor/base_processor.py). Add [AutoProcessor](https://github.com/TensorSpeech/TensorFlowTTS/blob/master/tensorflow_tts/inference/auto_processor.py) and [pretrained processor](https://github.com/TensorSpeech/TensorFlowTTS/blob/master/tensorflow_tts/processor/pretrained/) json file
33
+ - 2020/08/14 Support Chinese TTS. Pls see the [colab](https://colab.research.google.com/drive/1YpSHRBRPBI7cnTkQn1UcVTWEQVbsUm1S?usp=sharing). Thank [@azraelkuan](https://github.com/azraelkuan)
34
+ - 2020/08/05 Support Korean TTS. Pls see the [colab](https://colab.research.google.com/drive/1ybWwOS5tipgPFttNulp77P6DAB5MtiuN?usp=sharing). Thank [@crux153](https://github.com/crux153)
35
+ - 2020/07/17 Support MultiGPU for all Trainer
36
+ - 2020/07/05 Support Convert Tacotron-2, FastSpeech to Tflite. Pls see the [colab](https://colab.research.google.com/drive/1HudLLpT9CQdh2k04c06bHUwLubhGTWxA?usp=sharing). Thank @jaeyoo from the TFlite team for his support
37
+ - 2020/06/20 [FastSpeech2](https://arxiv.org/abs/2006.04558) implementation with Tensorflow is supported.
38
+ - 2020/06/07 [Multi-band MelGAN (MB MelGAN)](https://github.com/tensorspeech/TensorFlowTTS/blob/master/examples/multiband_melgan/) implementation with Tensorflow is supported
39
+
40
+
41
+ ## Features
42
+ - High performance on Speech Synthesis.
43
+ - Be able to fine-tune on other languages.
44
+ - Fast, Scalable, and Reliable.
45
+ - Suitable for deployment.
46
+ - Easy to implement a new model, based-on abstract class.
47
+ - Mixed precision to speed-up training if possible.
48
+ - Support Single/Multi GPU gradient Accumulate.
49
+ - Support both Single/Multi GPU in base trainer class.
50
+ - TFlite conversion for all supported models.
51
+ - Android example.
52
+ - Support many languages (currently, we support Chinese, Korean, English, French and German)
53
+ - Support C++ inference.
54
+ - Support Convert weight for some models from PyTorch to TensorFlow to accelerate speed.
55
+
56
+ ## Requirements
57
+ This repository is tested on Ubuntu 18.04 with:
58
+
59
+ - Python 3.7+
60
+ - Cuda 10.1
61
+ - CuDNN 7.6.5
62
+ - Tensorflow 2.2/2.3/2.4/2.5/2.6
63
+ - [Tensorflow Addons](https://github.com/tensorflow/addons) >= 0.10.0
64
+
65
+ Different Tensorflow version should be working but not tested yet. This repo will try to work with the latest stable TensorFlow version. **We recommend you install TensorFlow 2.6.0 to training in case you want to use MultiGPU.**
66
+
67
+ ## Installation
68
+ ### With pip
69
+ ```bash
70
+ $ pip install TensorFlowTTS
71
+ ```
72
+ ### From source
73
+ Examples are included in the repository but are not shipped with the framework. Therefore, to run the latest version of examples, you need to install the source below.
74
+ ```bash
75
+ $ git clone https://github.com/TensorSpeech/TensorFlowTTS.git
76
+ $ cd TensorFlowTTS
77
+ $ pip install .
78
+ ```
79
+ If you want to upgrade the repository and its dependencies:
80
+ ```bash
81
+ $ git pull
82
+ $ pip install --upgrade .
83
+ ```
84
+
85
+ # Supported Model architectures
86
+ TensorFlowTTS currently provides the following architectures:
87
+
88
+ 1. **MelGAN** released with the paper [MelGAN: Generative Adversarial Networks for Conditional Waveform Synthesis](https://arxiv.org/abs/1910.06711) by Kundan Kumar, Rithesh Kumar, Thibault de Boissiere, Lucas Gestin, Wei Zhen Teoh, Jose Sotelo, Alexandre de Brebisson, Yoshua Bengio, Aaron Courville.
89
+ 2. **Tacotron-2** released with the paper [Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions](https://arxiv.org/abs/1712.05884) by Jonathan Shen, Ruoming Pang, Ron J. Weiss, Mike Schuster, Navdeep Jaitly, Zongheng Yang, Zhifeng Chen, Yu Zhang, Yuxuan Wang, RJ Skerry-Ryan, Rif A. Saurous, Yannis Agiomyrgiannakis, Yonghui Wu.
90
+ 3. **FastSpeech** released with the paper [FastSpeech: Fast, Robust, and Controllable Text to Speech](https://arxiv.org/abs/1905.09263) by Yi Ren, Yangjun Ruan, Xu Tan, Tao Qin, Sheng Zhao, Zhou Zhao, Tie-Yan Liu.
91
+ 4. **Multi-band MelGAN** released with the paper [Multi-band MelGAN: Faster Waveform Generation for High-Quality Text-to-Speech](https://arxiv.org/abs/2005.05106) by Geng Yang, Shan Yang, Kai Liu, Peng Fang, Wei Chen, Lei Xie.
92
+ 5. **FastSpeech2** released with the paper [FastSpeech 2: Fast and High-Quality End-to-End Text to Speech](https://arxiv.org/abs/2006.04558) by Yi Ren, Chenxu Hu, Xu Tan, Tao Qin, Sheng Zhao, Zhou Zhao, Tie-Yan Liu.
93
+ 6. **Parallel WaveGAN** released with the paper [Parallel WaveGAN: A fast waveform generation model based on generative adversarial networks with multi-resolution spectrogram](https://arxiv.org/abs/1910.11480) by Ryuichi Yamamoto, Eunwoo Song, Jae-Min Kim.
94
+ 7. **HiFi-GAN** released with the paper [HiFi-GAN: Generative Adversarial Networks for Efficient and High Fidelity Speech Synthesis](https://arxiv.org/abs/2010.05646) by Jungil Kong, Jaehyeon Kim, Jaekyoung Bae.
95
+
96
+ We are also implementing some techniques to improve quality and convergence speed from the following papers:
97
+
98
+ 2. **Guided Attention Loss** released with the paper [Efficiently Trainable Text-to-Speech System Based on Deep Convolutional Networks with Guided Attention
99
+ ](https://arxiv.org/abs/1710.08969) by Hideyuki Tachibana, Katsuya Uenoyama, Shunsuke Aihara.
100
+
101
+
102
+ # Audio Samples
103
+ Here in an audio samples on valid set. [tacotron-2](https://drive.google.com/open?id=1kaPXRdLg9gZrll9KtvH3-feOBMM8sn3_), [fastspeech](https://drive.google.com/open?id=1f69ujszFeGnIy7PMwc8AkUckhIaT2OD0), [melgan](https://drive.google.com/open?id=1mBwGVchwtNkgFsURl7g4nMiqx4gquAC2), [melgan.stft](https://drive.google.com/open?id=1xUkDjbciupEkM3N4obiJAYySTo6J9z6b), [fastspeech2](https://drive.google.com/drive/u/1/folders/1NG7oOfNuXSh7WyAoM1hI8P5BxDALY_mU), [multiband_melgan](https://drive.google.com/drive/folders/1DCV3sa6VTyoJzZmKATYvYVDUAFXlQ_Zp)
104
+
105
+ # Tutorial End-to-End
106
+
107
+ ## Prepare Dataset
108
+
109
+ Prepare a dataset in the following format:
110
+ ```
111
+ |- [NAME_DATASET]/
112
+ | |- metadata.csv
113
+ | |- wavs/
114
+ | |- file1.wav
115
+ | |- ...
116
+ ```
117
+
118
+ Where `metadata.csv` has the following format: `id|transcription`. This is a ljspeech-like format; you can ignore preprocessing steps if you have other format datasets.
119
+
120
+ Note that `NAME_DATASET` should be `[ljspeech/kss/baker/libritts/synpaflex]` for example.
121
+
122
+ ## Preprocessing
123
+
124
+ The preprocessing has two steps:
125
+
126
+ 1. Preprocess audio features
127
+ - Convert characters to IDs
128
+ - Compute mel spectrograms
129
+ - Normalize mel spectrograms to [-1, 1] range
130
+ - Split the dataset into train and validation
131
+ - Compute the mean and standard deviation of multiple features from the **training** split
132
+ 2. Standardize mel spectrogram based on computed statistics
133
+
134
+ To reproduce the steps above:
135
+ ```
136
+ tensorflow-tts-preprocess --rootdir ./[ljspeech/kss/baker/libritts/thorsten/synpaflex] --outdir ./dump_[ljspeech/kss/baker/libritts/thorsten/synpaflex] --config preprocess/[ljspeech/kss/baker/thorsten/synpaflex]_preprocess.yaml --dataset [ljspeech/kss/baker/libritts/thorsten/synpaflex]
137
+ tensorflow-tts-normalize --rootdir ./dump_[ljspeech/kss/baker/libritts/thorsten/synpaflex] --outdir ./dump_[ljspeech/kss/baker/libritts/thorsten/synpaflex] --config preprocess/[ljspeech/kss/baker/libritts/thorsten/synpaflex]_preprocess.yaml --dataset [ljspeech/kss/baker/libritts/thorsten/synpaflex]
138
+ ```
139
+
140
+ Right now we only support [`ljspeech`](https://keithito.com/LJ-Speech-Dataset/), [`kss`](https://www.kaggle.com/bryanpark/korean-single-speaker-speech-dataset), [`baker`](https://weixinxcxdb.oss-cn-beijing.aliyuncs.com/gwYinPinKu/BZNSYP.rar), [`libritts`](http://www.openslr.org/60/), [`thorsten`](https://github.com/thorstenMueller/deep-learning-german-tts) and
141
+ [`synpaflex`](https://www.ortolang.fr/market/corpora/synpaflex-corpus/) for dataset argument. In the future, we intend to support more datasets.
142
+
143
+ **Note**: To run `libritts` preprocessing, please first read the instruction in [examples/fastspeech2_libritts](https://github.com/TensorSpeech/TensorFlowTTS/tree/master/examples/fastspeech2_libritts). We need to reformat it first before run preprocessing.
144
+
145
+ **Note**: To run `synpaflex` preprocessing, please first run the notebook [notebooks/prepare_synpaflex.ipynb](https://github.com/TensorSpeech/TensorFlowTTS/tree/master/notebooks/prepare_synpaflex.ipynb). We need to reformat it first before run preprocessing.
146
+
147
+ After preprocessing, the structure of the project folder should be:
148
+ ```
149
+ |- [NAME_DATASET]/
150
+ | |- metadata.csv
151
+ | |- wav/
152
+ | |- file1.wav
153
+ | |- ...
154
+ |- dump_[ljspeech/kss/baker/libritts/thorsten]/
155
+ | |- train/
156
+ | |- ids/
157
+ | |- LJ001-0001-ids.npy
158
+ | |- ...
159
+ | |- raw-feats/
160
+ | |- LJ001-0001-raw-feats.npy
161
+ | |- ...
162
+ | |- raw-f0/
163
+ | |- LJ001-0001-raw-f0.npy
164
+ | |- ...
165
+ | |- raw-energies/
166
+ | |- LJ001-0001-raw-energy.npy
167
+ | |- ...
168
+ | |- norm-feats/
169
+ | |- LJ001-0001-norm-feats.npy
170
+ | |- ...
171
+ | |- wavs/
172
+ | |- LJ001-0001-wave.npy
173
+ | |- ...
174
+ | |- valid/
175
+ | |- ids/
176
+ | |- LJ001-0009-ids.npy
177
+ | |- ...
178
+ | |- raw-feats/
179
+ | |- LJ001-0009-raw-feats.npy
180
+ | |- ...
181
+ | |- raw-f0/
182
+ | |- LJ001-0001-raw-f0.npy
183
+ | |- ...
184
+ | |- raw-energies/
185
+ | |- LJ001-0001-raw-energy.npy
186
+ | |- ...
187
+ | |- norm-feats/
188
+ | |- LJ001-0009-norm-feats.npy
189
+ | |- ...
190
+ | |- wavs/
191
+ | |- LJ001-0009-wave.npy
192
+ | |- ...
193
+ | |- stats.npy
194
+ | |- stats_f0.npy
195
+ | |- stats_energy.npy
196
+ | |- train_utt_ids.npy
197
+ | |- valid_utt_ids.npy
198
+ |- examples/
199
+ | |- melgan/
200
+ | |- fastspeech/
201
+ | |- tacotron2/
202
+ | ...
203
+ ```
204
+
205
+ - `stats.npy` contains the mean and std from the training split mel spectrograms
206
+ - `stats_energy.npy` contains the mean and std of energy values from the training split
207
+ - `stats_f0.npy` contains the mean and std of F0 values in the training split
208
+ - `train_utt_ids.npy` / `valid_utt_ids.npy` contains training and validation utterances IDs respectively
209
+
210
+ We use suffix (`ids`, `raw-feats`, `raw-energy`, `raw-f0`, `norm-feats`, and `wave`) for each input type.
211
+
212
+
213
+ **IMPORTANT NOTES**:
214
+ - This preprocessing step is based on [ESPnet](https://github.com/espnet/espnet) so you can combine all models here with other models from ESPnet repository.
215
+ - Regardless of how your dataset is formatted, the final structure of the `dump` folder **SHOULD** follow the above structure to be able to use the training script, or you can modify it by yourself 😄.
216
+
217
+ ## Training models
218
+
219
+ To know how to train model from scratch or fine-tune with other datasets/languages, please see detail at example directory.
220
+
221
+ - For Tacotron-2 tutorial, pls see [examples/tacotron2](https://github.com/tensorspeech/TensorFlowTTS/tree/master/examples/tacotron2)
222
+ - For FastSpeech tutorial, pls see [examples/fastspeech](https://github.com/tensorspeech/TensorFlowTTS/tree/master/examples/fastspeech)
223
+ - For FastSpeech2 tutorial, pls see [examples/fastspeech2](https://github.com/tensorspeech/TensorFlowTTS/tree/master/examples/fastspeech2)
224
+ - For FastSpeech2 + MFA tutorial, pls see [examples/fastspeech2_libritts](https://github.com/tensorspeech/TensorFlowTTS/tree/master/examples/fastspeech2_libritts)
225
+ - For MelGAN tutorial, pls see [examples/melgan](https://github.com/tensorspeech/TensorFlowTTS/tree/master/examples/melgan)
226
+ - For MelGAN + STFT Loss tutorial, pls see [examples/melgan.stft](https://github.com/tensorspeech/TensorFlowTTS/tree/master/examples/melgan.stft)
227
+ - For Multiband-MelGAN tutorial, pls see [examples/multiband_melgan](https://github.com/tensorspeech/TensorFlowTTS/tree/master/examples/multiband_melgan)
228
+ - For Parallel WaveGAN tutorial, pls see [examples/parallel_wavegan](https://github.com/tensorspeech/TensorFlowTTS/tree/master/examples/parallel_wavegan)
229
+ - For Multiband-MelGAN Generator + HiFi-GAN tutorial, pls see [examples/multiband_melgan_hf](https://github.com/tensorspeech/TensorFlowTTS/tree/master/examples/multiband_melgan_hf)
230
+ - For HiFi-GAN tutorial, pls see [examples/hifigan](https://github.com/tensorspeech/TensorFlowTTS/tree/master/examples/hifigan)
231
+ # Abstract Class Explaination
232
+
233
+ ## Abstract DataLoader Tensorflow-based dataset
234
+
235
+ A detail implementation of abstract dataset class from [tensorflow_tts/dataset/abstract_dataset](https://github.com/tensorspeech/TensorFlowTTS/blob/master/tensorflow_tts/datasets/abstract_dataset.py). There are some functions you need overide and understand:
236
+
237
+ 1. **get_args**: This function return argumentation for **generator** class, normally is utt_ids.
238
+ 2. **generator**: This function have an inputs from **get_args** function and return a inputs for models. **Note that we return a dictionary for all generator functions with the keys that exactly match with the model's parameters because base_trainer will use model(\*\*batch) to do forward step.**
239
+ 3. **get_output_dtypes**: This function need return dtypes for each element from **generator** function.
240
+ 4. **get_len_dataset**: Return len of datasets, normaly is len(utt_ids).
241
+
242
+ **IMPORTANT NOTES**:
243
+
244
+ - A pipeline of creating dataset should be: cache -> shuffle -> map_fn -> get_batch -> prefetch.
245
+ - If you do shuffle before cache, the dataset won't shuffle when it re-iterate over datasets.
246
+ - You should apply map_fn to make each element return from **generator** function have the same length before getting batch and feed it into a model.
247
+
248
+ Some examples to use this **abstract_dataset** are [tacotron_dataset.py](https://github.com/tensorspeech/TensorFlowTTS/blob/master/examples/tacotron2/tacotron_dataset.py), [fastspeech_dataset.py](https://github.com/tensorspeech/TensorFlowTTS/blob/master/examples/fastspeech/fastspeech_dataset.py), [melgan_dataset.py](https://github.com/tensorspeech/TensorFlowTTS/blob/master/examples/melgan/audio_mel_dataset.py), [fastspeech2_dataset.py](https://github.com/TensorSpeech/TensorFlowTTS/blob/master/examples/fastspeech2/fastspeech2_dataset.py)
249
+
250
+
251
+ ## Abstract Trainer Class
252
+
253
+ A detail implementation of base_trainer from [tensorflow_tts/trainer/base_trainer.py](https://github.com/tensorspeech/TensorFlowTTS/blob/master/tensorflow_tts/trainers/base_trainer.py). It include [Seq2SeqBasedTrainer](https://github.com/tensorspeech/TensorFlowTTS/blob/master/tensorflow_tts/trainers/base_trainer.py#L265) and [GanBasedTrainer](https://github.com/tensorspeech/TensorFlowTTS/blob/master/tensorflow_tts/trainers/base_trainer.py#L149) inherit from [BasedTrainer](https://github.com/tensorspeech/TensorFlowTTS/blob/master/tensorflow_tts/trainers/base_trainer.py#L16). All trainer support both single/multi GPU. There a some functions you **MUST** overide when implement new_trainer:
254
+
255
+ - **compile**: This function aim to define a models, and losses.
256
+ - **generate_and_save_intermediate_result**: This function will save intermediate result such as: plot alignment, save audio generated, plot mel-spectrogram ...
257
+ - **compute_per_example_losses**: This function will compute per_example_loss for model, note that all element of the loss **MUST** has shape [batch_size].
258
+
259
+ All models on this repo are trained based-on **GanBasedTrainer** (see [train_melgan.py](https://github.com/tensorspeech/TensorFlowTTS/blob/master/examples/melgan/train_melgan.py), [train_melgan_stft.py](https://github.com/tensorspeech/TensorFlowTTS/blob/master/examples/melgan.stft/train_melgan_stft.py), [train_multiband_melgan.py](https://github.com/tensorspeech/TensorFlowTTS/blob/master/examples/multiband_melgan/train_multiband_melgan.py)) and **Seq2SeqBasedTrainer** (see [train_tacotron2.py](https://github.com/tensorspeech/TensorFlowTTS/blob/master/examples/tacotron2/train_tacotron2.py), [train_fastspeech.py](https://github.com/tensorspeech/TensorFlowTTS/blob/master/examples/fastspeech/train_fastspeech.py)).
260
+
261
+ # End-to-End Examples
262
+ You can know how to inference each model at [notebooks](https://github.com/tensorspeech/TensorFlowTTS/tree/master/notebooks) or see a [colab](https://colab.research.google.com/drive/1akxtrLZHKuMiQup00tzO2olCaN-y3KiD?usp=sharing) (for English), [colab](https://colab.research.google.com/drive/1ybWwOS5tipgPFttNulp77P6DAB5MtiuN?usp=sharing) (for Korean), [colab](https://colab.research.google.com/drive/1YpSHRBRPBI7cnTkQn1UcVTWEQVbsUm1S?usp=sharing) (for Chinese), [colab](https://colab.research.google.com/drive/1jd3u46g-fGQw0rre8fIwWM9heJvrV1c0?usp=sharing) (for French), [colab](https://colab.research.google.com/drive/1W0nSFpsz32M0OcIkY9uMOiGrLTPKVhTy?usp=sharing) (for German). Here is an example code for end2end inference with fastspeech2 and multi-band melgan. We uploaded all our pretrained in [HuggingFace Hub](https://huggingface.co/tensorspeech).
263
+
264
+ ```python
265
+ import numpy as np
266
+ import soundfile as sf
267
+ import yaml
268
+
269
+ import tensorflow as tf
270
+
271
+ from tensorflow_tts.inference import TFAutoModel
272
+ from tensorflow_tts.inference import AutoProcessor
273
+
274
+ # initialize fastspeech2 model.
275
+ fastspeech2 = TFAutoModel.from_pretrained("tensorspeech/tts-fastspeech2-ljspeech-en")
276
+
277
+
278
+ # initialize mb_melgan model
279
+ mb_melgan = TFAutoModel.from_pretrained("tensorspeech/tts-mb_melgan-ljspeech-en")
280
+
281
+
282
+ # inference
283
+ processor = AutoProcessor.from_pretrained("tensorspeech/tts-fastspeech2-ljspeech-en")
284
+
285
+ input_ids = processor.text_to_sequence("Recent research at Harvard has shown meditating for as little as 8 weeks, can actually increase the grey matter in the parts of the brain responsible for emotional regulation, and learning.")
286
+ # fastspeech inference
287
+
288
+ mel_before, mel_after, duration_outputs, _, _ = fastspeech2.inference(
289
+ input_ids=tf.expand_dims(tf.convert_to_tensor(input_ids, dtype=tf.int32), 0),
290
+ speaker_ids=tf.convert_to_tensor([0], dtype=tf.int32),
291
+ speed_ratios=tf.convert_to_tensor([1.0], dtype=tf.float32),
292
+ f0_ratios =tf.convert_to_tensor([1.0], dtype=tf.float32),
293
+ energy_ratios =tf.convert_to_tensor([1.0], dtype=tf.float32),
294
+ )
295
+
296
+ # melgan inference
297
+ audio_before = mb_melgan.inference(mel_before)[0, :, 0]
298
+ audio_after = mb_melgan.inference(mel_after)[0, :, 0]
299
+
300
+ # save to file
301
+ sf.write('./audio_before.wav', audio_before, 22050, "PCM_16")
302
+ sf.write('./audio_after.wav', audio_after, 22050, "PCM_16")
303
+ ```
304
+
305
+ # Contact
306
+ - [Minh Nguyen Quan Anh](https://github.com/tensorspeech): nguyenquananhminh@gmail.com
307
+ - [erogol](https://github.com/erogol): erengolge@gmail.com
308
+ - [Kuan Chen](https://github.com/azraelkuan): azraelkuan@gmail.com
309
+ - [Dawid Kobus](https://github.com/machineko): machineko@protonmail.com
310
+ - [Takuya Ebata](https://github.com/MokkeMeguru): meguru.mokke@gmail.com
311
+ - [Trinh Le Quang](https://github.com/l4zyf9x): trinhle.cse@gmail.com
312
+ - [Yunchao He](https://github.com/candlewill): yunchaohe@gmail.com
313
+ - [Alejandro Miguel Velasquez](https://github.com/ZDisket): xml506ok@gmail.com
314
+
315
+ # License
316
+ All models here are licensed under the [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0)
317
+
318
+ # Acknowledgement
319
+ We want to thank [Tomoki Hayashi](https://github.com/kan-bayashi), who discussed with us much about Melgan, Multi-band melgan, Fastspeech, and Tacotron. This framework based-on his great open-source [ParallelWaveGan](https://github.com/kan-bayashi/ParallelWaveGAN) project.
TensorFlowTTS/docker-compose.yml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '2.6'
2
+ services:
3
+ tensorflowtts:
4
+ build: .
5
+ volumes:
6
+ - .:/workspace
7
+ runtime: nvidia
8
+ tty: true
9
+ command: /bin/bash
10
+ environment:
11
+ - CUDA_VISIBLE_DEVICES
TensorFlowTTS/dockerfile ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ FROM tensorflow/tensorflow:2.6.0-gpu
2
+ RUN apt-get update
3
+ RUN apt-get install -y zsh tmux wget git libsndfile1
4
+ RUN pip install ipython && \
5
+ pip install git+https://github.com/TensorSpeech/TensorflowTTS.git && \
6
+ pip install git+https://github.com/repodiac/german_transliterate.git#egg=german_transliterate
7
+ RUN mkdir /workspace
8
+ WORKDIR /workspace
TensorFlowTTS/examples/android/.gitignore ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Android Studio
2
+ *.iml
3
+ .gradle
4
+ /local.properties
5
+ /.idea
6
+ .DS_Store
7
+ /build
8
+ /captures
9
+
10
+ # Built application files
11
+ *.apk
12
+ !prebuiltapps/*.apk
13
+ *.ap_
14
+
15
+ # Files for the Dalvik VM
16
+ *.dex
17
+
18
+ # Java class files
19
+ *.class
20
+
21
+ # Generated files
22
+ bin/
23
+ gen/
24
+
25
+ # Gradle files
26
+ .gradle/
27
+ build/
28
+ */build/
29
+
30
+ # Local configuration file (sdk path, etc)
31
+ local.properties
32
+
33
+ # Proguard folder generated by Eclipse
34
+ proguard/
35
+
36
+ # Log Files
37
+ *.log
38
+
39
+ # project
40
+ project.properties
41
+ .classpath
42
+ .project
43
+ .settings/
44
+
45
+ # Intellij project files
46
+ *.ipr
47
+ *.iws
48
+ .idea/
49
+ app/.gradle/
50
+ .idea/libraries
51
+ .idea/workspace.xml
52
+ .idea/vcs.xml
53
+ .idea/scopes/scope_setting.xml
54
+ .idea/moudles.xml
55
+ .idea/misc.xml
56
+ .idea/inspectionProfiles/Project_Default.xml
57
+ .idea/inspectionProfiles/profiles_setting.xml
58
+ .idea/encodings.xml
59
+ .idea/.name
TensorFlowTTS/examples/android/README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Android Demo
2
+
3
+ This is a simple Android demo which will load converted FastSpeech2 and Multi-Band MelGAN modules to synthesize audio.
4
+ In order to optimize the synthesize speed, two LinkedBlockingQueues have been implemented.
5
+
6
+
7
+ ### HOW-TO
8
+ 1. Import this project into Android Studio.
9
+ 2. Run the app!
10
+
11
+ ### LICENSE
12
+ The license use for this code is [CC BY-NC 3.0](https://creativecommons.org/licenses/by-nc/3.0/). Please read the license carefully before you use it.
13
+
14
+ ### Contributors
15
+ [Xuefeng Ding](https://github.com/mapledxf)
TensorFlowTTS/examples/android/app/.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ /build
TensorFlowTTS/examples/android/app/build.gradle ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apply plugin: 'com.android.application'
2
+
3
+ android {
4
+ compileSdkVersion 29
5
+ buildToolsVersion "29.0.2"
6
+ defaultConfig {
7
+ applicationId "com.tensorspeech.tensorflowtts"
8
+ minSdkVersion 21
9
+ targetSdkVersion 29
10
+ versionCode 1
11
+ versionName "1.0"
12
+ }
13
+ buildTypes {
14
+ release {
15
+ minifyEnabled false
16
+ proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro'
17
+ }
18
+ }
19
+ aaptOptions {
20
+ noCompress "tflite"
21
+ }
22
+ compileOptions {
23
+ sourceCompatibility = '1.8'
24
+ targetCompatibility = '1.8'
25
+ }
26
+ lintOptions {
27
+ abortOnError false
28
+ }
29
+ }
30
+
31
+ dependencies {
32
+ implementation fileTree(dir: 'libs', include: ['*.jar'])
33
+ implementation 'androidx.appcompat:appcompat:1.1.0'
34
+ implementation 'androidx.constraintlayout:constraintlayout:1.1.3'
35
+
36
+ implementation 'org.tensorflow:tensorflow-lite:0.0.0-nightly'
37
+ implementation 'org.tensorflow:tensorflow-lite-select-tf-ops:0.0.0-nightly'
38
+ implementation 'org.tensorflow:tensorflow-lite-support:0.0.0-nightly'
39
+ }
TensorFlowTTS/examples/android/app/proguard-rules.pro ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Add project specific ProGuard rules here.
2
+ # You can control the set of applied configuration files using the
3
+ # proguardFiles setting in build.gradle.
4
+ #
5
+ # For more details, see
6
+ # http://developer.android.com/guide/developing/tools/proguard.html
7
+
8
+ # If your project uses WebView with JS, uncomment the following
9
+ # and specify the fully qualified class name to the JavaScript interface
10
+ # class:
11
+ #-keepclassmembers class fqcn.of.javascript.interface.for.webview {
12
+ # public *;
13
+ #}
14
+
15
+ # Uncomment this to preserve the line number information for
16
+ # debugging stack traces.
17
+ #-keepattributes SourceFile,LineNumberTable
18
+
19
+ # If you keep the line number information, uncomment this to
20
+ # hide the original source file name.
21
+ #-renamesourcefileattribute SourceFile
TensorFlowTTS/examples/android/app/src/androidTest/java/com/tensorspeech/tensorflowtts/ExampleInstrumentedTest.java ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ package com.tensorspeech.tensorflowtts;
2
+
3
+ import android.content.Context;
4
+
5
+ import androidx.test.platform.app.InstrumentationRegistry;
6
+ import androidx.test.ext.junit.runners.AndroidJUnit4;
7
+
8
+ import org.junit.Test;
9
+ import org.junit.runner.RunWith;
10
+
11
+ import static org.junit.Assert.*;
12
+
13
+ /**
14
+ * Instrumented test, which will execute on an Android device.
15
+ *
16
+ * @see <a href="http://d.android.com/tools/testing">Testing documentation</a>
17
+ */
18
+ @RunWith(AndroidJUnit4.class)
19
+ public class ExampleInstrumentedTest {
20
+ @Test
21
+ public void useAppContext() {
22
+ // Context of the app under test.
23
+ Context appContext = InstrumentationRegistry.getInstrumentation().getTargetContext();
24
+
25
+ assertEquals("com.tensorspeech.tensorflowtts", appContext.getPackageName());
26
+ }
27
+ }
TensorFlowTTS/examples/android/app/src/main/AndroidManifest.xml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="utf-8"?>
2
+ <manifest xmlns:android="http://schemas.android.com/apk/res/android"
3
+ package="com.tensorspeech.tensorflowtts">
4
+
5
+ <application
6
+ android:allowBackup="true"
7
+ android:icon="@mipmap/ic_launcher"
8
+ android:label="@string/app_name"
9
+ android:roundIcon="@mipmap/ic_launcher_round"
10
+ android:supportsRtl="true"
11
+ android:theme="@style/AppTheme">
12
+ <activity android:name=".MainActivity">
13
+ <intent-filter>
14
+ <action android:name="android.intent.action.MAIN" />
15
+
16
+ <category android:name="android.intent.category.LAUNCHER" />
17
+ </intent-filter>
18
+ </activity>
19
+ </application>
20
+
21
+ </manifest>
TensorFlowTTS/examples/android/app/src/main/assets/fastspeech2_quant.tflite ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68730c535f05b171195d173033d40d5c22c1b08bee7daf35df1b89788db52172
3
+ size 31015600
TensorFlowTTS/examples/android/app/src/main/assets/mbmelgan.tflite ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae1f7c0a3f97debe7ab438a36c230116e52fc161bb435a8890b70a598ae94070
3
+ size 10254100
TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/MainActivity.java ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ package com.tensorspeech.tensorflowtts;
2
+
3
+ import android.os.Bundle;
4
+ import android.text.TextUtils;
5
+ import android.view.View;
6
+ import android.widget.EditText;
7
+ import android.widget.RadioGroup;
8
+
9
+ import androidx.appcompat.app.AppCompatActivity;
10
+
11
+ import com.tensorspeech.tensorflowtts.dispatcher.OnTtsStateListener;
12
+ import com.tensorspeech.tensorflowtts.dispatcher.TtsStateDispatcher;
13
+ import com.tensorspeech.tensorflowtts.tts.TtsManager;
14
+ import com.tensorspeech.tensorflowtts.utils.ThreadPoolManager;
15
+
16
+ /**
17
+ * @author {@link "mailto:xuefeng.ding@outlook.com" "Xuefeng Ding"}
18
+ * Created 2020-07-20 17:25
19
+ */
20
+ public class MainActivity extends AppCompatActivity {
21
+ private static final String DEFAULT_INPUT_TEXT = "Unless you work on a ship, it's unlikely that you use the word boatswain in everyday conversation, so it's understandably a tricky one. The word - which refers to a petty officer in charge of hull maintenance is not pronounced boats-wain Rather, it's bo-sun to reflect the salty pronunciation of sailors, as The Free Dictionary explains./Blue opinion poll conducted for the National Post.";
22
+
23
+ private View speakBtn;
24
+ private RadioGroup speedGroup;
25
+
26
+ @Override
27
+ protected void onCreate(Bundle savedInstanceState) {
28
+ super.onCreate(savedInstanceState);
29
+ setContentView(R.layout.activity_main);
30
+
31
+ TtsManager.getInstance().init(this);
32
+
33
+ TtsStateDispatcher.getInstance().addListener(new OnTtsStateListener() {
34
+ @Override
35
+ public void onTtsReady() {
36
+ speakBtn.setEnabled(true);
37
+ }
38
+
39
+ @Override
40
+ public void onTtsStart(String text) {
41
+ }
42
+
43
+ @Override
44
+ public void onTtsStop() {
45
+ }
46
+ });
47
+
48
+ EditText input = findViewById(R.id.input);
49
+ input.setHint(DEFAULT_INPUT_TEXT);
50
+
51
+ speedGroup = findViewById(R.id.speed_chooser);
52
+ speedGroup.check(R.id.normal);
53
+
54
+ speakBtn = findViewById(R.id.start);
55
+ speakBtn.setEnabled(false);
56
+ speakBtn.setOnClickListener(v ->
57
+ ThreadPoolManager.getInstance().execute(() -> {
58
+ float speed ;
59
+ switch (speedGroup.getCheckedRadioButtonId()) {
60
+ case R.id.fast:
61
+ speed = 0.8F;
62
+ break;
63
+ case R.id.slow:
64
+ speed = 1.2F;
65
+ break;
66
+ case R.id.normal:
67
+ default:
68
+ speed = 1.0F;
69
+ break;
70
+ }
71
+
72
+ String inputText = input.getText().toString();
73
+ if (TextUtils.isEmpty(inputText)) {
74
+ inputText = DEFAULT_INPUT_TEXT;
75
+ }
76
+ TtsManager.getInstance().speak(inputText, speed, true);
77
+ }));
78
+
79
+ findViewById(R.id.stop).setOnClickListener(v ->
80
+ TtsManager.getInstance().stopTts());
81
+ }
82
+ }
TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/dispatcher/OnTtsStateListener.java ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ package com.tensorspeech.tensorflowtts.dispatcher;
2
+
3
+ /**
4
+ * @author {@link "mailto:xuefeng.ding@outlook.com" "Xuefeng Ding"}
5
+ * Created 2020-07-28 14:25
6
+ */
7
+ public interface OnTtsStateListener {
8
+ public void onTtsReady();
9
+
10
+ public void onTtsStart(String text);
11
+
12
+ public void onTtsStop();
13
+ }
TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/dispatcher/TtsStateDispatcher.java ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ package com.tensorspeech.tensorflowtts.dispatcher;
2
+
3
+ import android.os.Handler;
4
+ import android.os.Looper;
5
+ import android.util.Log;
6
+
7
+ import java.util.concurrent.CopyOnWriteArrayList;
8
+
9
+ /**
10
+ * @author {@link "mailto:xuefeng.ding@outlook.com" "Xuefeng Ding"}
11
+ * Created 2020-07-28 14:25
12
+ */
13
+ public class TtsStateDispatcher {
14
+ private static final String TAG = "TtsStateDispatcher";
15
+ private static volatile TtsStateDispatcher instance;
16
+ private static final Object INSTANCE_WRITE_LOCK = new Object();
17
+
18
+ public static TtsStateDispatcher getInstance() {
19
+ if (instance == null) {
20
+ synchronized (INSTANCE_WRITE_LOCK) {
21
+ if (instance == null) {
22
+ instance = new TtsStateDispatcher();
23
+ }
24
+ }
25
+ }
26
+ return instance;
27
+ }
28
+
29
+ private final Handler handler = new Handler(Looper.getMainLooper());
30
+
31
+ private CopyOnWriteArrayList<OnTtsStateListener> mListeners = new CopyOnWriteArrayList<>();
32
+
33
+ public void release() {
34
+ Log.d(TAG, "release: ");
35
+ mListeners.clear();
36
+ }
37
+
38
+ public void addListener(OnTtsStateListener listener) {
39
+ if (mListeners.contains(listener)) {
40
+ return;
41
+ }
42
+ Log.d(TAG, "addListener: " + listener.getClass());
43
+ mListeners.add(listener);
44
+ }
45
+
46
+ public void removeListener(OnTtsStateListener listener) {
47
+ if (mListeners.contains(listener)) {
48
+ Log.d(TAG, "removeListener: " + listener.getClass());
49
+ mListeners.remove(listener);
50
+ }
51
+ }
52
+
53
+ public void onTtsStart(String text){
54
+ Log.d(TAG, "onTtsStart: ");
55
+ if (!mListeners.isEmpty()) {
56
+ for (OnTtsStateListener listener : mListeners) {
57
+ handler.post(() -> listener.onTtsStart(text));
58
+ }
59
+ }
60
+ }
61
+
62
+ public void onTtsStop(){
63
+ Log.d(TAG, "onTtsStop: ");
64
+ if (!mListeners.isEmpty()) {
65
+ for (OnTtsStateListener listener : mListeners) {
66
+ handler.post(listener::onTtsStop);
67
+ }
68
+ }
69
+ }
70
+
71
+ public void onTtsReady(){
72
+ Log.d(TAG, "onTtsReady: ");
73
+ if (!mListeners.isEmpty()) {
74
+ for (OnTtsStateListener listener : mListeners) {
75
+ handler.post(listener::onTtsReady);
76
+ }
77
+ }
78
+ }
79
+ }
TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/module/AbstractModule.java ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ package com.tensorspeech.tensorflowtts.module;
2
+
3
+ import org.tensorflow.lite.Interpreter;
4
+
5
+ /**
6
+ * @author {@link "mailto:xuefeng.ding@outlook.com" "Xuefeng Ding"}
7
+ * Created 2020-07-20 17:25
8
+ *
9
+ */
10
+ abstract class AbstractModule {
11
+
12
+ Interpreter.Options getOption() {
13
+ Interpreter.Options options = new Interpreter.Options();
14
+ options.setNumThreads(5);
15
+ return options;
16
+ }
17
+ }
TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/module/FastSpeech2.java ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ package com.tensorspeech.tensorflowtts.module;
2
+
3
+ import android.annotation.SuppressLint;
4
+ import android.util.Log;
5
+
6
+ import org.tensorflow.lite.DataType;
7
+ import org.tensorflow.lite.Interpreter;
8
+ import org.tensorflow.lite.Tensor;
9
+ import org.tensorflow.lite.support.tensorbuffer.TensorBuffer;
10
+
11
+ import java.io.File;
12
+ import java.nio.FloatBuffer;
13
+ import java.util.Arrays;
14
+ import java.util.HashMap;
15
+ import java.util.Map;
16
+
17
+ /**
18
+ * @author {@link "mailto:xuefeng.ding@outlook.com" "Xuefeng Ding"}
19
+ * Created 2020-07-20 17:26
20
+ *
21
+ */
22
+ public class FastSpeech2 extends AbstractModule {
23
+ private static final String TAG = "FastSpeech2";
24
+ private Interpreter mModule;
25
+
26
+ public FastSpeech2(String modulePath) {
27
+ try {
28
+ mModule = new Interpreter(new File(modulePath), getOption());
29
+ int input = mModule.getInputTensorCount();
30
+ for (int i = 0; i < input; i++) {
31
+ Tensor inputTensor = mModule.getInputTensor(i);
32
+ Log.d(TAG, "input:" + i +
33
+ " name:" + inputTensor.name() +
34
+ " shape:" + Arrays.toString(inputTensor.shape()) +
35
+ " dtype:" + inputTensor.dataType());
36
+ }
37
+
38
+ int output = mModule.getOutputTensorCount();
39
+ for (int i = 0; i < output; i++) {
40
+ Tensor outputTensor = mModule.getOutputTensor(i);
41
+ Log.d(TAG, "output:" + i +
42
+ " name:" + outputTensor.name() +
43
+ " shape:" + Arrays.toString(outputTensor.shape()) +
44
+ " dtype:" + outputTensor.dataType());
45
+ }
46
+ Log.d(TAG, "successfully init");
47
+ } catch (Exception e) {
48
+ e.printStackTrace();
49
+ }
50
+ }
51
+
52
+ public TensorBuffer getMelSpectrogram(int[] inputIds, float speed) {
53
+ Log.d(TAG, "input id length: " + inputIds.length);
54
+ mModule.resizeInput(0, new int[]{1, inputIds.length});
55
+ mModule.allocateTensors();
56
+
57
+ @SuppressLint("UseSparseArrays")
58
+ Map<Integer, Object> outputMap = new HashMap<>();
59
+
60
+ FloatBuffer outputBuffer = FloatBuffer.allocate(350000);
61
+ outputMap.put(0, outputBuffer);
62
+
63
+ int[][] inputs = new int[1][inputIds.length];
64
+ inputs[0] = inputIds;
65
+
66
+ long time = System.currentTimeMillis();
67
+ mModule.runForMultipleInputsOutputs(
68
+ new Object[]{inputs, new int[1][1], new int[]{0}, new float[]{speed}, new float[]{1F}, new float[]{1F}},
69
+ outputMap);
70
+ Log.d(TAG, "time cost: " + (System.currentTimeMillis() - time));
71
+
72
+ int size = mModule.getOutputTensor(0).shape()[2];
73
+ int[] shape = {1, outputBuffer.position() / size, size};
74
+ TensorBuffer spectrogram = TensorBuffer.createFixedSize(shape, DataType.FLOAT32);
75
+ float[] outputArray = new float[outputBuffer.position()];
76
+ outputBuffer.rewind();
77
+ outputBuffer.get(outputArray);
78
+ spectrogram.loadArray(outputArray);
79
+
80
+ return spectrogram;
81
+ }
82
+ }
TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/module/MBMelGan.java ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ package com.tensorspeech.tensorflowtts.module;
2
+
3
+ import android.util.Log;
4
+
5
+ import org.tensorflow.lite.Interpreter;
6
+ import org.tensorflow.lite.Tensor;
7
+ import org.tensorflow.lite.support.tensorbuffer.TensorBuffer;
8
+
9
+ import java.io.File;
10
+ import java.nio.FloatBuffer;
11
+ import java.util.Arrays;
12
+
13
+ /**
14
+ * @author {@link "mailto:xuefeng.ding@outlook.com" "Xuefeng Ding"}
15
+ * Created 2020-07-20 17:26
16
+ *
17
+ */
18
+ public class MBMelGan extends AbstractModule {
19
+ private static final String TAG = "MBMelGan";
20
+ private Interpreter mModule;
21
+
22
+ public MBMelGan(String modulePath) {
23
+ try {
24
+ mModule = new Interpreter(new File(modulePath), getOption());
25
+ int input = mModule.getInputTensorCount();
26
+ for (int i = 0; i < input; i++) {
27
+ Tensor inputTensor = mModule.getInputTensor(i);
28
+ Log.d(TAG, "input:" + i
29
+ + " name:" + inputTensor.name()
30
+ + " shape:" + Arrays.toString(inputTensor.shape()) +
31
+ " dtype:" + inputTensor.dataType());
32
+ }
33
+
34
+ int output = mModule.getOutputTensorCount();
35
+ for (int i = 0; i < output; i++) {
36
+ Tensor outputTensor = mModule.getOutputTensor(i);
37
+ Log.d(TAG, "output:" + i
38
+ + " name:" + outputTensor.name()
39
+ + " shape:" + Arrays.toString(outputTensor.shape())
40
+ + " dtype:" + outputTensor.dataType());
41
+ }
42
+ Log.d(TAG, "successfully init");
43
+ } catch (Exception e) {
44
+ e.printStackTrace();
45
+ }
46
+ }
47
+
48
+
49
+ public float[] getAudio(TensorBuffer input) {
50
+ mModule.resizeInput(0, input.getShape());
51
+ mModule.allocateTensors();
52
+
53
+ FloatBuffer outputBuffer = FloatBuffer.allocate(350000);
54
+
55
+ long time = System.currentTimeMillis();
56
+ mModule.run(input.getBuffer(), outputBuffer);
57
+ Log.d(TAG, "time cost: " + (System.currentTimeMillis() - time));
58
+
59
+ float[] audioArray = new float[outputBuffer.position()];
60
+ outputBuffer.rewind();
61
+ outputBuffer.get(audioArray);
62
+ return audioArray;
63
+ }
64
+ }
TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/tts/InputWorker.java ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ package com.tensorspeech.tensorflowtts.tts;
2
+
3
+ import android.util.Log;
4
+
5
+ import com.tensorspeech.tensorflowtts.dispatcher.TtsStateDispatcher;
6
+ import com.tensorspeech.tensorflowtts.module.FastSpeech2;
7
+ import com.tensorspeech.tensorflowtts.module.MBMelGan;
8
+ import com.tensorspeech.tensorflowtts.utils.Processor;
9
+ import com.tensorspeech.tensorflowtts.utils.ThreadPoolManager;
10
+
11
+ import org.tensorflow.lite.support.tensorbuffer.TensorBuffer;
12
+
13
+ import java.util.Arrays;
14
+ import java.util.concurrent.LinkedBlockingQueue;
15
+
16
+ /**
17
+ * @author {@link "mailto:xuefeng.ding@outlook.com" "Xuefeng Ding"}
18
+ * Created 2020-07-28 14:25
19
+ */
20
+ class InputWorker {
21
+ private static final String TAG = "InputWorker";
22
+
23
+ private LinkedBlockingQueue<InputText> mInputQueue = new LinkedBlockingQueue<>();
24
+ private InputText mCurrentInputText;
25
+ private FastSpeech2 mFastSpeech2;
26
+ private MBMelGan mMBMelGan;
27
+ private Processor mProcessor;
28
+ private TtsPlayer mTtsPlayer;
29
+
30
+ InputWorker(String fastspeech, String vocoder) {
31
+ mFastSpeech2 = new FastSpeech2(fastspeech);
32
+ mMBMelGan = new MBMelGan(vocoder);
33
+ mProcessor = new Processor();
34
+ mTtsPlayer = new TtsPlayer();
35
+
36
+ ThreadPoolManager.getInstance().getSingleExecutor("worker").execute(() -> {
37
+ //noinspection InfiniteLoopStatement
38
+ while (true) {
39
+ try {
40
+ mCurrentInputText = mInputQueue.take();
41
+ Log.d(TAG, "processing: " + mCurrentInputText.INPUT_TEXT);
42
+ TtsStateDispatcher.getInstance().onTtsStart(mCurrentInputText.INPUT_TEXT);
43
+ mCurrentInputText.proceed();
44
+ TtsStateDispatcher.getInstance().onTtsStop();
45
+ } catch (Exception e) {
46
+ Log.e(TAG, "Exception: ", e);
47
+ }
48
+ }
49
+ });
50
+ }
51
+
52
+ void processInput(String inputText, float speed) {
53
+ Log.d(TAG, "add to queue: " + inputText);
54
+ mInputQueue.offer(new InputText(inputText, speed));
55
+ }
56
+
57
+ void interrupt() {
58
+ mInputQueue.clear();
59
+ if (mCurrentInputText != null) {
60
+ mCurrentInputText.interrupt();
61
+ }
62
+ mTtsPlayer.interrupt();
63
+ }
64
+
65
+
66
+ private class InputText {
67
+ private final String INPUT_TEXT;
68
+ private final float SPEED;
69
+ private boolean isInterrupt;
70
+
71
+ private InputText(String inputText, float speed) {
72
+ this.INPUT_TEXT = inputText;
73
+ this.SPEED = speed;
74
+ }
75
+
76
+ private void proceed() {
77
+ String[] sentences = INPUT_TEXT.split("[.,]");
78
+ Log.d(TAG, "speak: " + Arrays.toString(sentences));
79
+
80
+ for (String sentence : sentences) {
81
+
82
+ long time = System.currentTimeMillis();
83
+
84
+ int[] inputIds = mProcessor.textToIds(sentence);
85
+
86
+ TensorBuffer output = mFastSpeech2.getMelSpectrogram(inputIds, SPEED);
87
+
88
+ if (isInterrupt) {
89
+ Log.d(TAG, "proceed: interrupt");
90
+ return;
91
+ }
92
+
93
+ long encoderTime = System.currentTimeMillis();
94
+
95
+ float[] audioData = mMBMelGan.getAudio(output);
96
+
97
+ if (isInterrupt) {
98
+ Log.d(TAG, "proceed: interrupt");
99
+ return;
100
+ }
101
+
102
+ long vocoderTime = System.currentTimeMillis();
103
+
104
+ Log.d(TAG, "Time cost: " + (encoderTime - time) + "+" + (vocoderTime - encoderTime) + "=" + (vocoderTime - time));
105
+
106
+ mTtsPlayer.play(new TtsPlayer.AudioData(sentence, audioData));
107
+ }
108
+ }
109
+
110
+ private void interrupt() {
111
+ this.isInterrupt = true;
112
+ }
113
+ }
114
+ }
TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/tts/TtsManager.java ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ package com.tensorspeech.tensorflowtts.tts;
2
+
3
+ import android.content.Context;
4
+ import android.util.Log;
5
+
6
+ import com.tensorspeech.tensorflowtts.dispatcher.TtsStateDispatcher;
7
+ import com.tensorspeech.tensorflowtts.utils.ThreadPoolManager;
8
+
9
+ import java.io.File;
10
+ import java.io.FileOutputStream;
11
+ import java.io.InputStream;
12
+ import java.io.OutputStream;
13
+
14
+ /**
15
+ * @author {@link "mailto:xuefeng.ding@outlook.com" "Xuefeng Ding"}
16
+ * Created 2020-07-28 14:25
17
+ */
18
+ public class TtsManager {
19
+ private static final String TAG = "TtsManager";
20
+
21
+ private static final Object INSTANCE_WRITE_LOCK = new Object();
22
+
23
+ private static volatile TtsManager instance;
24
+
25
+ public static TtsManager getInstance() {
26
+ if (instance == null) {
27
+ synchronized (INSTANCE_WRITE_LOCK) {
28
+ if (instance == null) {
29
+ instance = new TtsManager();
30
+ }
31
+ }
32
+ }
33
+ return instance;
34
+ }
35
+
36
+ private InputWorker mWorker;
37
+
38
+ private final static String FASTSPEECH2_MODULE = "fastspeech2_quant.tflite";
39
+ private final static String MELGAN_MODULE = "mbmelgan.tflite";
40
+
41
+ public void init(Context context) {
42
+ ThreadPoolManager.getInstance().getSingleExecutor("init").execute(() -> {
43
+ try {
44
+ String fastspeech = copyFile(context, FASTSPEECH2_MODULE);
45
+ String vocoder = copyFile(context, MELGAN_MODULE);
46
+ mWorker = new InputWorker(fastspeech, vocoder);
47
+ } catch (Exception e) {
48
+ Log.e(TAG, "mWorker init failed", e);
49
+ }
50
+
51
+ TtsStateDispatcher.getInstance().onTtsReady();
52
+ });
53
+ }
54
+
55
+ private String copyFile(Context context, String strOutFileName) {
56
+ Log.d(TAG, "start copy file " + strOutFileName);
57
+ File file = context.getFilesDir();
58
+
59
+ String tmpFile = file.getAbsolutePath() + "/" + strOutFileName;
60
+ File f = new File(tmpFile);
61
+ if (f.exists()) {
62
+ Log.d(TAG, "file exists " + strOutFileName);
63
+ return f.getAbsolutePath();
64
+ }
65
+
66
+ try (OutputStream myOutput = new FileOutputStream(f);
67
+ InputStream myInput = context.getAssets().open(strOutFileName)) {
68
+ byte[] buffer = new byte[1024];
69
+ int length = myInput.read(buffer);
70
+ while (length > 0) {
71
+ myOutput.write(buffer, 0, length);
72
+ length = myInput.read(buffer);
73
+ }
74
+ myOutput.flush();
75
+ Log.d(TAG, "Copy task successful");
76
+ } catch (Exception e) {
77
+ Log.e(TAG, "copyFile: Failed to copy", e);
78
+ } finally {
79
+ Log.d(TAG, "end copy file " + strOutFileName);
80
+ }
81
+ return f.getAbsolutePath();
82
+ }
83
+
84
+ public void stopTts() {
85
+ mWorker.interrupt();
86
+ }
87
+
88
+ public void speak(String inputText, float speed, boolean interrupt) {
89
+ if (interrupt) {
90
+ stopTts();
91
+ }
92
+
93
+ ThreadPoolManager.getInstance().execute(() ->
94
+ mWorker.processInput(inputText, speed));
95
+ }
96
+
97
+ }
TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/tts/TtsPlayer.java ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ package com.tensorspeech.tensorflowtts.tts;
2
+
3
+ import android.media.AudioAttributes;
4
+ import android.media.AudioFormat;
5
+ import android.media.AudioManager;
6
+ import android.media.AudioTrack;
7
+ import android.util.Log;
8
+
9
+ import com.tensorspeech.tensorflowtts.utils.ThreadPoolManager;
10
+
11
+ import java.util.concurrent.LinkedBlockingQueue;
12
+
13
+ /**
14
+ * @author {@link "mailto:xuefeng.ding@outlook.com" "Xuefeng Ding"}
15
+ * Created 2020-07-20 18:22
16
+ */
17
+ class TtsPlayer {
18
+ private static final String TAG = "TtsPlayer";
19
+
20
+ private final AudioTrack mAudioTrack;
21
+
22
+ private final static int FORMAT = AudioFormat.ENCODING_PCM_FLOAT;
23
+ private final static int SAMPLERATE = 22050;
24
+ private final static int CHANNEL = AudioFormat.CHANNEL_OUT_MONO;
25
+ private final static int BUFFER_SIZE = AudioTrack.getMinBufferSize(SAMPLERATE, CHANNEL, FORMAT);
26
+ private LinkedBlockingQueue<AudioData> mAudioQueue = new LinkedBlockingQueue<>();
27
+ private AudioData mCurrentAudioData;
28
+
29
+ TtsPlayer() {
30
+ mAudioTrack = new AudioTrack(
31
+ new AudioAttributes.Builder()
32
+ .setUsage(AudioAttributes.USAGE_MEDIA)
33
+ .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
34
+ .build(),
35
+ new AudioFormat.Builder()
36
+ .setSampleRate(22050)
37
+ .setEncoding(FORMAT)
38
+ .setChannelMask(CHANNEL)
39
+ .build(),
40
+ BUFFER_SIZE,
41
+ AudioTrack.MODE_STREAM, AudioManager.AUDIO_SESSION_ID_GENERATE
42
+ );
43
+ mAudioTrack.play();
44
+
45
+ ThreadPoolManager.getInstance().getSingleExecutor("audio").execute(() -> {
46
+ //noinspection InfiniteLoopStatement
47
+ while (true) {
48
+ try {
49
+ mCurrentAudioData = mAudioQueue.take();
50
+ Log.d(TAG, "playing: " + mCurrentAudioData.text);
51
+ int index = 0;
52
+ while (index < mCurrentAudioData.audio.length && !mCurrentAudioData.isInterrupt) {
53
+ int buffer = Math.min(BUFFER_SIZE, mCurrentAudioData.audio.length - index);
54
+ mAudioTrack.write(mCurrentAudioData.audio, index, buffer, AudioTrack.WRITE_BLOCKING);
55
+ index += BUFFER_SIZE;
56
+ }
57
+ } catch (Exception e) {
58
+ Log.e(TAG, "Exception: ", e);
59
+ }
60
+ }
61
+ });
62
+ }
63
+
64
+ void play(AudioData audioData) {
65
+ Log.d(TAG, "add audio data to queue: " + audioData.text);
66
+ mAudioQueue.offer(audioData);
67
+ }
68
+
69
+ void interrupt() {
70
+ mAudioQueue.clear();
71
+ if (mCurrentAudioData != null) {
72
+ mCurrentAudioData.interrupt();
73
+ }
74
+ }
75
+
76
+ static class AudioData {
77
+ private String text;
78
+ private float[] audio;
79
+ private boolean isInterrupt;
80
+
81
+ AudioData(String text, float[] audio) {
82
+ this.text = text;
83
+ this.audio = audio;
84
+ }
85
+
86
+ private void interrupt() {
87
+ isInterrupt = true;
88
+ }
89
+ }
90
+
91
+ }
TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/utils/NumberNorm.java ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ package com.tensorspeech.tensorflowtts.utils;
2
+
3
+ import java.util.HashMap;
4
+ import java.util.Map;
5
+
6
+ // Borrowed from https://rosettacode.org/wiki/Spelling_of_ordinal_numbers
7
+ public class NumberNorm {
8
+
9
+ private static Map<String,String> ordinalMap = new HashMap<>();
10
+ static {
11
+ ordinalMap.put("one", "first");
12
+ ordinalMap.put("two", "second");
13
+ ordinalMap.put("three", "third");
14
+ ordinalMap.put("five", "fifth");
15
+ ordinalMap.put("eight", "eighth");
16
+ ordinalMap.put("nine", "ninth");
17
+ ordinalMap.put("twelve", "twelfth");
18
+ }
19
+
20
+ public static String toOrdinal(long n) {
21
+ String spelling = numToString(n);
22
+ String[] split = spelling.split(" ");
23
+ String last = split[split.length - 1];
24
+ String replace;
25
+ if ( last.contains("-") ) {
26
+ String[] lastSplit = last.split("-");
27
+ String lastWithDash = lastSplit[1];
28
+ String lastReplace;
29
+ if ( ordinalMap.containsKey(lastWithDash) ) {
30
+ lastReplace = ordinalMap.get(lastWithDash);
31
+ }
32
+ else if ( lastWithDash.endsWith("y") ) {
33
+ lastReplace = lastWithDash.substring(0, lastWithDash.length() - 1) + "ieth";
34
+ }
35
+ else {
36
+ lastReplace = lastWithDash + "th";
37
+ }
38
+ replace = lastSplit[0] + "-" + lastReplace;
39
+ }
40
+ else {
41
+ if ( ordinalMap.containsKey(last) ) {
42
+ replace = ordinalMap.get(last);
43
+ }
44
+ else if ( last.endsWith("y") ) {
45
+ replace = last.substring(0, last.length() - 1) + "ieth";
46
+ }
47
+ else {
48
+ replace = last + "th";
49
+ }
50
+ }
51
+ split[split.length - 1] = replace;
52
+ return String.join(" ", split);
53
+ }
54
+
55
+ private static final String[] nums = new String[] {
56
+ "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine",
57
+ "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen"
58
+ };
59
+
60
+ private static final String[] tens = new String[] {"zero", "ten", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"};
61
+
62
+ public static final String numToString(long n) {
63
+ return numToStringHelper(n);
64
+ }
65
+
66
+ private static final String numToStringHelper(long n) {
67
+ if ( n < 0 ) {
68
+ return "negative " + numToStringHelper(-n);
69
+ }
70
+ int index = (int) n;
71
+ if ( n <= 19 ) {
72
+ return nums[index];
73
+ }
74
+ if ( n <= 99 ) {
75
+ return tens[index/10] + (n % 10 > 0 ? "-" + numToStringHelper(n % 10) : "");
76
+ }
77
+ String label = null;
78
+ long factor = 0;
79
+ if ( n <= 999 ) {
80
+ label = "hundred";
81
+ factor = 100;
82
+ }
83
+ else if ( n <= 999999) {
84
+ label = "thousand";
85
+ factor = 1000;
86
+ }
87
+ else if ( n <= 999999999) {
88
+ label = "million";
89
+ factor = 1000000;
90
+ }
91
+ else if ( n <= 999999999999L) {
92
+ label = "billion";
93
+ factor = 1000000000;
94
+ }
95
+ else if ( n <= 999999999999999L) {
96
+ label = "trillion";
97
+ factor = 1000000000000L;
98
+ }
99
+ else if ( n <= 999999999999999999L) {
100
+ label = "quadrillion";
101
+ factor = 1000000000000000L;
102
+ }
103
+ else {
104
+ label = "quintillion";
105
+ factor = 1000000000000000000L;
106
+ }
107
+ return numToStringHelper(n / factor) + " " + label + (n % factor > 0 ? " " + numToStringHelper(n % factor ) : "");
108
+ }
109
+ }
TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/utils/Processor.java ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ package com.tensorspeech.tensorflowtts.utils;
2
+
3
+
4
+ import android.util.Log;
5
+
6
+ import androidx.annotation.Nullable;
7
+
8
+ import java.nio.charset.StandardCharsets;
9
+ import java.util.ArrayList;
10
+ import java.util.HashMap;
11
+ import java.util.List;
12
+ import java.util.Map;
13
+ import java.util.regex.Matcher;
14
+ import java.util.regex.Pattern;
15
+
16
+ /**
17
+ * @author {@link "mailto:yusufsarigoz@gmail.com" "M. Yusuf Sarıgöz"}
18
+ * Created 2020-07-25 17:25
19
+ */
20
+ public class Processor {
21
+ private static final String TAG = "processor";
22
+
23
+ private static final String[] VALID_SYMBOLS = new String[]{
24
+ "AA",
25
+ "AA0",
26
+ "AA1",
27
+ "AA2",
28
+ "AE",
29
+ "AE0",
30
+ "AE1",
31
+ "AE2",
32
+ "AH",
33
+ "AH0",
34
+ "AH1",
35
+ "AH2",
36
+ "AO",
37
+ "AO0",
38
+ "AO1",
39
+ "AO2",
40
+ "AW",
41
+ "AW0",
42
+ "AW1",
43
+ "AW2",
44
+ "AY",
45
+ "AY0",
46
+ "AY1",
47
+ "AY2",
48
+ "B",
49
+ "CH",
50
+ "D",
51
+ "DH",
52
+ "EH",
53
+ "EH0",
54
+ "EH1",
55
+ "EH2",
56
+ "ER",
57
+ "ER0",
58
+ "ER1",
59
+ "ER2",
60
+ "EY",
61
+ "EY0",
62
+ "EY1",
63
+ "EY2",
64
+ "F",
65
+ "G",
66
+ "HH",
67
+ "IH",
68
+ "IH0",
69
+ "IH1",
70
+ "IH2",
71
+ "IY",
72
+ "IY0",
73
+ "IY1",
74
+ "IY2",
75
+ "JH",
76
+ "K",
77
+ "L",
78
+ "M",
79
+ "N",
80
+ "NG",
81
+ "OW",
82
+ "OW0",
83
+ "OW1",
84
+ "OW2",
85
+ "OY",
86
+ "OY0",
87
+ "OY1",
88
+ "OY2",
89
+ "P",
90
+ "R",
91
+ "S",
92
+ "SH",
93
+ "T",
94
+ "TH",
95
+ "UH",
96
+ "UH0",
97
+ "UH1",
98
+ "UH2",
99
+ "UW",
100
+ "UW0",
101
+ "UW1",
102
+ "UW2",
103
+ "V",
104
+ "W",
105
+ "Y",
106
+ "Z",
107
+ "ZH"
108
+ };
109
+
110
+ private static final Pattern CURLY_RE = Pattern.compile("(.*?)\\{(.+?)\\}(.*)");
111
+ private static final Pattern COMMA_NUMBER_RE = Pattern.compile("([0-9][0-9\\,]+[0-9])");
112
+ private static final Pattern DECIMAL_RE = Pattern.compile("([0-9]+\\.[0-9]+)");
113
+ private static final Pattern POUNDS_RE = Pattern.compile("£([0-9\\,]*[0-9]+)");
114
+ private static final Pattern DOLLARS_RE = Pattern.compile("\\$([0-9.\\,]*[0-9]+)");
115
+ private static final Pattern ORDINAL_RE = Pattern.compile("[0-9]+(st|nd|rd|th)");
116
+ private static final Pattern NUMBER_RE = Pattern.compile("[0-9]+");
117
+
118
+ private static final String PAD = "_";
119
+ private static final String EOS = "~";
120
+ private static final String SPECIAL = "-";
121
+
122
+ private static final String[] PUNCTUATION = "!'(),.:;? ".split("");
123
+ private static final String[] LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz".split("");
124
+
125
+ private static final List<String> SYMBOLS = new ArrayList<>();
126
+ private static final Map<String, String> ABBREVIATIONS = new HashMap<>();
127
+ private static final Map<String, Integer> SYMBOL_TO_ID = new HashMap<>();
128
+
129
+ public Processor() {
130
+ SYMBOLS.add(PAD);
131
+ SYMBOLS.add(SPECIAL);
132
+
133
+ for (String p : PUNCTUATION) {
134
+ if (!"".equals(p)) {
135
+ SYMBOLS.add(p);
136
+ }
137
+ }
138
+
139
+ for (String l : LETTERS) {
140
+ if (!"".equals(l)) {
141
+ SYMBOLS.add(l);
142
+ }
143
+ }
144
+
145
+ for (String validSymbol : VALID_SYMBOLS) {
146
+ SYMBOLS.add("@" + validSymbol);
147
+ }
148
+
149
+ SYMBOLS.add(EOS);
150
+
151
+ for (int i = 0; i < SYMBOLS.size(); ++i) {
152
+ SYMBOL_TO_ID.put(SYMBOLS.get(i), i);
153
+ }
154
+
155
+ ABBREVIATIONS.put("mrs", "misess");
156
+ ABBREVIATIONS.put("mr", "mister");
157
+ ABBREVIATIONS.put("dr", "doctor");
158
+ ABBREVIATIONS.put("st", "saint");
159
+ ABBREVIATIONS.put("co", "company");
160
+ ABBREVIATIONS.put("jr", "junior");
161
+ ABBREVIATIONS.put("maj", "major");
162
+ ABBREVIATIONS.put("gen", "general");
163
+ ABBREVIATIONS.put("drs", "doctors");
164
+ ABBREVIATIONS.put("rev", "reverend");
165
+ ABBREVIATIONS.put("lt", "lieutenant");
166
+ ABBREVIATIONS.put("hon", "honorable");
167
+ ABBREVIATIONS.put("sgt", "sergeant");
168
+ ABBREVIATIONS.put("capt", "captain");
169
+ ABBREVIATIONS.put("esq", "esquire");
170
+ ABBREVIATIONS.put("ltd", "limited");
171
+ ABBREVIATIONS.put("col", "colonel");
172
+ ABBREVIATIONS.put("ft", "fort");
173
+ }
174
+
175
+
176
+ private List<Integer> symbolsToSequence(String symbols) {
177
+ List<Integer> sequence = new ArrayList<>();
178
+
179
+ for (int i = 0; i < symbols.length(); ++i) {
180
+ Integer id = SYMBOL_TO_ID.get(String.valueOf(symbols.charAt(i)));
181
+ if (id == null) {
182
+ Log.e(TAG, "symbolsToSequence: id is not found for " + symbols.charAt(i));
183
+ } else {
184
+ sequence.add(id);
185
+ }
186
+ }
187
+
188
+ return sequence;
189
+ }
190
+
191
+ private List<Integer> arpabetToSequence(@Nullable String symbols) {
192
+ List<Integer> sequence = new ArrayList<>();
193
+ if (symbols != null) {
194
+ String[] as = symbols.split(" ");
195
+ for (String s : as) {
196
+ sequence.add(SYMBOL_TO_ID.get("@" + s));
197
+ }
198
+ }
199
+ return sequence;
200
+ }
201
+
202
+ private String convertToAscii(String text) {
203
+ byte[] bytes = text.getBytes(StandardCharsets.US_ASCII);
204
+ return new String(bytes);
205
+ }
206
+
207
+ private String collapseWhitespace(String text) {
208
+ return text.replaceAll("\\s+", " ");
209
+ }
210
+
211
+ private String expandAbbreviations(String text) {
212
+ for (Map.Entry<String, String> entry : ABBREVIATIONS.entrySet()) {
213
+ text = text.replaceAll("\\b" + entry.getKey() + "\\.", entry.getValue());
214
+ }
215
+ return text;
216
+ }
217
+
218
+ private String removeCommasFromNumbers(String text) {
219
+ Matcher m = COMMA_NUMBER_RE.matcher(text);
220
+ while (m.find()) {
221
+ String s = m.group().replaceAll(",", "");
222
+ text = text.replaceFirst(m.group(), s);
223
+ }
224
+ return text;
225
+ }
226
+
227
+ private String expandPounds(String text) {
228
+ Matcher m = POUNDS_RE.matcher(text);
229
+ while (m.find()) {
230
+ text = text.replaceFirst(m.group(), m.group() + " pounds");
231
+ }
232
+ return text;
233
+ }
234
+
235
+ private String expandDollars(String text) {
236
+ Matcher m = DOLLARS_RE.matcher(text);
237
+ while (m.find()) {
238
+ String dollars = "0";
239
+ String cents = "0";
240
+ String spelling = "";
241
+ String s = m.group().substring(1);
242
+ String[] parts = s.split("\\.");
243
+ if (!s.startsWith(".")) {
244
+ dollars = parts[0];
245
+ }
246
+ if (!s.endsWith(".") && parts.length > 1) {
247
+ cents = parts[1];
248
+ }
249
+ if (!"0".equals(dollars)) {
250
+ spelling += parts[0] + " dollars ";
251
+ }
252
+ if (!"0".equals(cents) && !"00".equals(cents)) {
253
+ spelling += parts[1] + " cents ";
254
+ }
255
+ text = text.replaceFirst("\\" + m.group(), spelling);
256
+ }
257
+ return text;
258
+ }
259
+
260
+ private String expandDecimals(String text) {
261
+ Matcher m = DECIMAL_RE.matcher(text);
262
+ while (m.find()) {
263
+ String s = m.group().replaceAll("\\.", " point ");
264
+ text = text.replaceFirst(m.group(), s);
265
+ }
266
+ return text;
267
+ }
268
+
269
+ private String expandOrdinals(String text) {
270
+ Matcher m = ORDINAL_RE.matcher(text);
271
+ while (m.find()) {
272
+ String s = m.group().substring(0, m.group().length() - 2);
273
+ long l = Long.valueOf(s);
274
+ String spelling = NumberNorm.toOrdinal(l);
275
+ text = text.replaceFirst(m.group(), spelling);
276
+ }
277
+ return text;
278
+ }
279
+
280
+ private String expandCardinals(String text) {
281
+ Matcher m = NUMBER_RE.matcher(text);
282
+ while (m.find()) {
283
+ long l = Long.valueOf(m.group());
284
+ String spelling = NumberNorm.numToString(l);
285
+ text = text.replaceFirst(m.group(), spelling);
286
+ }
287
+ return text;
288
+ }
289
+
290
+ private String expandNumbers(String text) {
291
+ text = removeCommasFromNumbers(text);
292
+ text = expandPounds(text);
293
+ text = expandDollars(text);
294
+ text = expandDecimals(text);
295
+ text = expandOrdinals(text);
296
+ text = expandCardinals(text);
297
+ return text;
298
+ }
299
+
300
+ private String cleanTextForEnglish(String text) {
301
+ text = convertToAscii(text);
302
+ text = text.toLowerCase();
303
+ text = expandAbbreviations(text);
304
+ try {
305
+ text = expandNumbers(text);
306
+ } catch (Exception e) {
307
+ Log.d(TAG, "Failed to convert numbers", e);
308
+ }
309
+ text = collapseWhitespace(text);
310
+ Log.d(TAG, "text preprocessed: " + text);
311
+ return text;
312
+ }
313
+
314
+ public int[] textToIds(String text) {
315
+ List<Integer> sequence = new ArrayList<>();
316
+ while (text!= null && text.length() > 0) {
317
+ Matcher m = CURLY_RE.matcher(text);
318
+ if (!m.find()) {
319
+ sequence.addAll(symbolsToSequence(cleanTextForEnglish(text)));
320
+ break;
321
+ }
322
+ sequence.addAll(symbolsToSequence(cleanTextForEnglish(m.group(1))));
323
+ sequence.addAll(arpabetToSequence(m.group(2)));
324
+ text = m.group(3);
325
+ }
326
+
327
+ int size = sequence.size();
328
+ Integer[] tmp = new Integer[size];
329
+ tmp = sequence.toArray(tmp);
330
+ int[] ids = new int[size];
331
+ for (int i = 0; i < size; ++i) {
332
+ ids[i] = tmp[i];
333
+ }
334
+ return ids;
335
+ }
336
+ }
TensorFlowTTS/examples/android/app/src/main/java/com/tensorspeech/tensorflowtts/utils/ThreadPoolManager.java ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ package com.tensorspeech.tensorflowtts.utils;
2
+
3
+ import android.os.Looper;
4
+ import android.os.Process;
5
+
6
+ import java.util.concurrent.LinkedBlockingQueue;
7
+ import java.util.concurrent.ScheduledThreadPoolExecutor;
8
+ import java.util.concurrent.ThreadFactory;
9
+ import java.util.concurrent.ThreadPoolExecutor;
10
+ import java.util.concurrent.TimeUnit;
11
+ import java.util.concurrent.atomic.AtomicInteger;
12
+
13
+ /**
14
+ * @author {@link "mailto:xuefeng.ding@outlook.com" "Xuefeng Ding"}
15
+ * Created 2020-07-20 17:25
16
+ */
17
+ @SuppressWarnings("unused")
18
+ public class ThreadPoolManager {
19
+
20
+ public static ThreadPoolManager getInstance() {
21
+ return ThreadPoolManager.Holder.INSTANCE;
22
+ }
23
+
24
+ private static final class Holder {
25
+ private static final ThreadPoolManager INSTANCE = new ThreadPoolManager();
26
+ }
27
+
28
+ private ThreadPoolExecutor mExecutor;
29
+
30
+ /**
31
+ * Constructor
32
+ */
33
+ private ThreadPoolManager() {
34
+ int corePoolSize = Runtime.getRuntime().availableProcessors() * 2 + 1;
35
+ ThreadFactory namedThreadFactory = new NamedThreadFactory("thread pool");
36
+
37
+ mExecutor = new ThreadPoolExecutor(
38
+ corePoolSize,
39
+ corePoolSize * 10,
40
+ 1,
41
+ TimeUnit.HOURS,
42
+ new LinkedBlockingQueue<>(),
43
+ namedThreadFactory,
44
+ new ThreadPoolExecutor.DiscardPolicy()
45
+ );
46
+ }
47
+
48
+ /**
49
+ * 执行任务
50
+ * @param runnable 需要执行的异步任务
51
+ */
52
+ public void execute(Runnable runnable) {
53
+ if (runnable == null) {
54
+ return;
55
+ }
56
+ mExecutor.execute(runnable);
57
+ }
58
+
59
+ /**
60
+ * single thread with name
61
+ * @param name 线程名
62
+ * @return 线程执行器
63
+ */
64
+ public ScheduledThreadPoolExecutor getSingleExecutor(String name) {
65
+ return getSingleExecutor(name, Thread.NORM_PRIORITY);
66
+ }
67
+
68
+ /**
69
+ * single thread with name and priority
70
+ * @param name thread name
71
+ * @param priority thread priority
72
+ * @return Thread Executor
73
+ */
74
+ @SuppressWarnings("WeakerAccess")
75
+ public ScheduledThreadPoolExecutor getSingleExecutor(String name, int priority) {
76
+ return new ScheduledThreadPoolExecutor(
77
+ 1,
78
+ new NamedThreadFactory(name, priority));
79
+ }
80
+
81
+ /**
82
+ * 从线程池中移除任务
83
+ * @param runnable 需要移除的异步任务
84
+ */
85
+ public void remove(Runnable runnable) {
86
+ if (runnable == null) {
87
+ return;
88
+ }
89
+ mExecutor.remove(runnable);
90
+ }
91
+
92
+ /**
93
+ * 为线程池内的每个线程命名的工厂类
94
+ */
95
+ private static class NamedThreadFactory implements ThreadFactory {
96
+ private static final AtomicInteger POOL_NUMBER = new AtomicInteger(1);
97
+ private final ThreadGroup group;
98
+ private final AtomicInteger threadNumber = new AtomicInteger(1);
99
+ private final String namePrefix;
100
+ private final int priority;
101
+
102
+ /**
103
+ * Constructor
104
+ * @param namePrefix 线程名前缀
105
+ */
106
+ private NamedThreadFactory(String namePrefix) {
107
+ this(namePrefix, Thread.NORM_PRIORITY);
108
+ }
109
+
110
+ /**
111
+ * Constructor
112
+ * @param threadName 线程名前缀
113
+ * @param priority 线程优先级
114
+ */
115
+ private NamedThreadFactory(String threadName, int priority) {
116
+ SecurityManager s = System.getSecurityManager();
117
+ group = (s != null) ? s.getThreadGroup() :
118
+ Thread.currentThread().getThreadGroup();
119
+ namePrefix = threadName + "-" + POOL_NUMBER.getAndIncrement();
120
+ this.priority = priority;
121
+ }
122
+
123
+ @Override
124
+ public Thread newThread(Runnable r) {
125
+ Thread t = new Thread(group, r,
126
+ namePrefix + threadNumber.getAndIncrement(),
127
+ 0);
128
+ if (t.isDaemon()) {
129
+ t.setDaemon(false);
130
+ }
131
+
132
+ t.setPriority(priority);
133
+
134
+ switch (priority) {
135
+ case Thread.MIN_PRIORITY:
136
+ Process.setThreadPriority(Process.THREAD_PRIORITY_LOWEST);
137
+ break;
138
+ case Thread.MAX_PRIORITY:
139
+ Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
140
+ break;
141
+ default:
142
+ Process.setThreadPriority(Process.THREAD_PRIORITY_FOREGROUND);
143
+ break;
144
+ }
145
+
146
+ return t;
147
+ }
148
+ }
149
+
150
+ /**
151
+ * 判断当前线程是否为主线程
152
+ * @return {@code true} if the current thread is main thread.
153
+ */
154
+ public static boolean isMainThread() {
155
+ return Looper.myLooper() == Looper.getMainLooper();
156
+ }
157
+ }
TensorFlowTTS/examples/android/app/src/main/res/drawable-v24/ic_launcher_foreground.xml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <vector xmlns:android="http://schemas.android.com/apk/res/android"
2
+ xmlns:aapt="http://schemas.android.com/aapt"
3
+ android:width="108dp"
4
+ android:height="108dp"
5
+ android:viewportWidth="108"
6
+ android:viewportHeight="108">
7
+ <path
8
+ android:fillType="evenOdd"
9
+ android:pathData="M32,64C32,64 38.39,52.99 44.13,50.95C51.37,48.37 70.14,49.57 70.14,49.57L108.26,87.69L108,109.01L75.97,107.97L32,64Z"
10
+ android:strokeWidth="1"
11
+ android:strokeColor="#00000000">
12
+ <aapt:attr name="android:fillColor">
13
+ <gradient
14
+ android:endX="78.5885"
15
+ android:endY="90.9159"
16
+ android:startX="48.7653"
17
+ android:startY="61.0927"
18
+ android:type="linear">
19
+ <item
20
+ android:color="#44000000"
21
+ android:offset="0.0" />
22
+ <item
23
+ android:color="#00000000"
24
+ android:offset="1.0" />
25
+ </gradient>
26
+ </aapt:attr>
27
+ </path>
28
+ <path
29
+ android:fillColor="#FFFFFF"
30
+ android:fillType="nonZero"
31
+ android:pathData="M66.94,46.02L66.94,46.02C72.44,50.07 76,56.61 76,64L32,64C32,56.61 35.56,50.11 40.98,46.06L36.18,41.19C35.45,40.45 35.45,39.3 36.18,38.56C36.91,37.81 38.05,37.81 38.78,38.56L44.25,44.05C47.18,42.57 50.48,41.71 54,41.71C57.48,41.71 60.78,42.57 63.68,44.05L69.11,38.56C69.84,37.81 70.98,37.81 71.71,38.56C72.44,39.3 72.44,40.45 71.71,41.19L66.94,46.02ZM62.94,56.92C64.08,56.92 65,56.01 65,54.88C65,53.76 64.08,52.85 62.94,52.85C61.8,52.85 60.88,53.76 60.88,54.88C60.88,56.01 61.8,56.92 62.94,56.92ZM45.06,56.92C46.2,56.92 47.13,56.01 47.13,54.88C47.13,53.76 46.2,52.85 45.06,52.85C43.92,52.85 43,53.76 43,54.88C43,56.01 43.92,56.92 45.06,56.92Z"
32
+ android:strokeWidth="1"
33
+ android:strokeColor="#00000000" />
34
+ </vector>
TensorFlowTTS/examples/android/app/src/main/res/drawable/ic_launcher_background.xml ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="utf-8"?>
2
+ <vector xmlns:android="http://schemas.android.com/apk/res/android"
3
+ android:width="108dp"
4
+ android:height="108dp"
5
+ android:viewportWidth="108"
6
+ android:viewportHeight="108">
7
+ <path
8
+ android:fillColor="#008577"
9
+ android:pathData="M0,0h108v108h-108z" />
10
+ <path
11
+ android:fillColor="#00000000"
12
+ android:pathData="M9,0L9,108"
13
+ android:strokeWidth="0.8"
14
+ android:strokeColor="#33FFFFFF" />
15
+ <path
16
+ android:fillColor="#00000000"
17
+ android:pathData="M19,0L19,108"
18
+ android:strokeWidth="0.8"
19
+ android:strokeColor="#33FFFFFF" />
20
+ <path
21
+ android:fillColor="#00000000"
22
+ android:pathData="M29,0L29,108"
23
+ android:strokeWidth="0.8"
24
+ android:strokeColor="#33FFFFFF" />
25
+ <path
26
+ android:fillColor="#00000000"
27
+ android:pathData="M39,0L39,108"
28
+ android:strokeWidth="0.8"
29
+ android:strokeColor="#33FFFFFF" />
30
+ <path
31
+ android:fillColor="#00000000"
32
+ android:pathData="M49,0L49,108"
33
+ android:strokeWidth="0.8"
34
+ android:strokeColor="#33FFFFFF" />
35
+ <path
36
+ android:fillColor="#00000000"
37
+ android:pathData="M59,0L59,108"
38
+ android:strokeWidth="0.8"
39
+ android:strokeColor="#33FFFFFF" />
40
+ <path
41
+ android:fillColor="#00000000"
42
+ android:pathData="M69,0L69,108"
43
+ android:strokeWidth="0.8"
44
+ android:strokeColor="#33FFFFFF" />
45
+ <path
46
+ android:fillColor="#00000000"
47
+ android:pathData="M79,0L79,108"
48
+ android:strokeWidth="0.8"
49
+ android:strokeColor="#33FFFFFF" />
50
+ <path
51
+ android:fillColor="#00000000"
52
+ android:pathData="M89,0L89,108"
53
+ android:strokeWidth="0.8"
54
+ android:strokeColor="#33FFFFFF" />
55
+ <path
56
+ android:fillColor="#00000000"
57
+ android:pathData="M99,0L99,108"
58
+ android:strokeWidth="0.8"
59
+ android:strokeColor="#33FFFFFF" />
60
+ <path
61
+ android:fillColor="#00000000"
62
+ android:pathData="M0,9L108,9"
63
+ android:strokeWidth="0.8"
64
+ android:strokeColor="#33FFFFFF" />
65
+ <path
66
+ android:fillColor="#00000000"
67
+ android:pathData="M0,19L108,19"
68
+ android:strokeWidth="0.8"
69
+ android:strokeColor="#33FFFFFF" />
70
+ <path
71
+ android:fillColor="#00000000"
72
+ android:pathData="M0,29L108,29"
73
+ android:strokeWidth="0.8"
74
+ android:strokeColor="#33FFFFFF" />
75
+ <path
76
+ android:fillColor="#00000000"
77
+ android:pathData="M0,39L108,39"
78
+ android:strokeWidth="0.8"
79
+ android:strokeColor="#33FFFFFF" />
80
+ <path
81
+ android:fillColor="#00000000"
82
+ android:pathData="M0,49L108,49"
83
+ android:strokeWidth="0.8"
84
+ android:strokeColor="#33FFFFFF" />
85
+ <path
86
+ android:fillColor="#00000000"
87
+ android:pathData="M0,59L108,59"
88
+ android:strokeWidth="0.8"
89
+ android:strokeColor="#33FFFFFF" />
90
+ <path
91
+ android:fillColor="#00000000"
92
+ android:pathData="M0,69L108,69"
93
+ android:strokeWidth="0.8"
94
+ android:strokeColor="#33FFFFFF" />
95
+ <path
96
+ android:fillColor="#00000000"
97
+ android:pathData="M0,79L108,79"
98
+ android:strokeWidth="0.8"
99
+ android:strokeColor="#33FFFFFF" />
100
+ <path
101
+ android:fillColor="#00000000"
102
+ android:pathData="M0,89L108,89"
103
+ android:strokeWidth="0.8"
104
+ android:strokeColor="#33FFFFFF" />
105
+ <path
106
+ android:fillColor="#00000000"
107
+ android:pathData="M0,99L108,99"
108
+ android:strokeWidth="0.8"
109
+ android:strokeColor="#33FFFFFF" />
110
+ <path
111
+ android:fillColor="#00000000"
112
+ android:pathData="M19,29L89,29"
113
+ android:strokeWidth="0.8"
114
+ android:strokeColor="#33FFFFFF" />
115
+ <path
116
+ android:fillColor="#00000000"
117
+ android:pathData="M19,39L89,39"
118
+ android:strokeWidth="0.8"
119
+ android:strokeColor="#33FFFFFF" />
120
+ <path
121
+ android:fillColor="#00000000"
122
+ android:pathData="M19,49L89,49"
123
+ android:strokeWidth="0.8"
124
+ android:strokeColor="#33FFFFFF" />
125
+ <path
126
+ android:fillColor="#00000000"
127
+ android:pathData="M19,59L89,59"
128
+ android:strokeWidth="0.8"
129
+ android:strokeColor="#33FFFFFF" />
130
+ <path
131
+ android:fillColor="#00000000"
132
+ android:pathData="M19,69L89,69"
133
+ android:strokeWidth="0.8"
134
+ android:strokeColor="#33FFFFFF" />
135
+ <path
136
+ android:fillColor="#00000000"
137
+ android:pathData="M19,79L89,79"
138
+ android:strokeWidth="0.8"
139
+ android:strokeColor="#33FFFFFF" />
140
+ <path
141
+ android:fillColor="#00000000"
142
+ android:pathData="M29,19L29,89"
143
+ android:strokeWidth="0.8"
144
+ android:strokeColor="#33FFFFFF" />
145
+ <path
146
+ android:fillColor="#00000000"
147
+ android:pathData="M39,19L39,89"
148
+ android:strokeWidth="0.8"
149
+ android:strokeColor="#33FFFFFF" />
150
+ <path
151
+ android:fillColor="#00000000"
152
+ android:pathData="M49,19L49,89"
153
+ android:strokeWidth="0.8"
154
+ android:strokeColor="#33FFFFFF" />
155
+ <path
156
+ android:fillColor="#00000000"
157
+ android:pathData="M59,19L59,89"
158
+ android:strokeWidth="0.8"
159
+ android:strokeColor="#33FFFFFF" />
160
+ <path
161
+ android:fillColor="#00000000"
162
+ android:pathData="M69,19L69,89"
163
+ android:strokeWidth="0.8"
164
+ android:strokeColor="#33FFFFFF" />
165
+ <path
166
+ android:fillColor="#00000000"
167
+ android:pathData="M79,19L79,89"
168
+ android:strokeWidth="0.8"
169
+ android:strokeColor="#33FFFFFF" />
170
+ </vector>
TensorFlowTTS/examples/android/app/src/main/res/layout/activity_main.xml ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="utf-8"?>
2
+ <LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
3
+ xmlns:tools="http://schemas.android.com/tools"
4
+ android:layout_width="match_parent"
5
+ android:layout_height="match_parent"
6
+ android:gravity="center_horizontal"
7
+ android:orientation="vertical"
8
+ tools:context=".MainActivity">
9
+
10
+ <EditText
11
+ android:id="@+id/input"
12
+ android:inputType="textMultiLine"
13
+ android:layout_width="match_parent"
14
+ android:maxLines="5"
15
+ android:layout_height="wrap_content"
16
+ android:gravity="top" />
17
+
18
+ <LinearLayout
19
+ android:layout_width="wrap_content"
20
+ android:layout_height="wrap_content"
21
+ android:gravity="center"
22
+ android:orientation="horizontal">
23
+
24
+ <TextView
25
+ android:layout_width="wrap_content"
26
+ android:layout_height="wrap_content"
27
+ android:text="Speed Control:" />
28
+
29
+ <RadioGroup
30
+ android:id="@+id/speed_chooser"
31
+ android:layout_width="wrap_content"
32
+ android:layout_height="wrap_content"
33
+ android:orientation="horizontal">
34
+
35
+ <RadioButton
36
+ android:id="@+id/fast"
37
+ android:layout_width="wrap_content"
38
+ android:layout_height="wrap_content"
39
+ android:text="Fast" />
40
+
41
+ <RadioButton
42
+ android:id="@+id/normal"
43
+ android:layout_width="wrap_content"
44
+ android:layout_height="wrap_content"
45
+ android:text="Normal" />
46
+
47
+ <RadioButton
48
+ android:id="@+id/slow"
49
+ android:layout_width="wrap_content"
50
+ android:layout_height="wrap_content"
51
+ android:text="Slow" />
52
+
53
+ </RadioGroup>
54
+ </LinearLayout>
55
+
56
+ <LinearLayout
57
+ android:layout_width="match_parent"
58
+ android:layout_height="wrap_content"
59
+ android:gravity="center_horizontal"
60
+ android:orientation="horizontal">
61
+
62
+ <TextView
63
+ android:id="@+id/start"
64
+ android:layout_width="wrap_content"
65
+ android:layout_height="wrap_content"
66
+ android:padding="20dp"
67
+ android:text="Speak" />
68
+
69
+ <TextView
70
+ android:id="@+id/stop"
71
+ android:layout_width="wrap_content"
72
+ android:layout_height="wrap_content"
73
+ android:padding="20dp"
74
+ android:text="Stop" />
75
+
76
+ </LinearLayout>
77
+ </LinearLayout>
TensorFlowTTS/examples/android/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="utf-8"?>
2
+ <adaptive-icon xmlns:android="http://schemas.android.com/apk/res/android">
3
+ <background android:drawable="@drawable/ic_launcher_background" />
4
+ <foreground android:drawable="@drawable/ic_launcher_foreground" />
5
+ </adaptive-icon>
TensorFlowTTS/examples/android/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="utf-8"?>
2
+ <adaptive-icon xmlns:android="http://schemas.android.com/apk/res/android">
3
+ <background android:drawable="@drawable/ic_launcher_background" />
4
+ <foreground android:drawable="@drawable/ic_launcher_foreground" />
5
+ </adaptive-icon>
TensorFlowTTS/examples/android/app/src/main/res/mipmap-hdpi/ic_launcher.png ADDED
TensorFlowTTS/examples/android/app/src/main/res/mipmap-hdpi/ic_launcher_round.png ADDED
TensorFlowTTS/examples/android/app/src/main/res/mipmap-mdpi/ic_launcher.png ADDED
TensorFlowTTS/examples/android/app/src/main/res/mipmap-mdpi/ic_launcher_round.png ADDED
TensorFlowTTS/examples/android/app/src/main/res/mipmap-xhdpi/ic_launcher.png ADDED
TensorFlowTTS/examples/android/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png ADDED