Spaces:
Runtime error
Runtime error
fcyai
commited on
Commit
•
0196a95
1
Parent(s):
01a69aa
init
Browse files- chattts_webui_mix.ipynb +13 -89
- config.py +6 -4
- tts_model.py +68 -23
- utils.py +166 -34
- webui_mix.py +439 -81
chattts_webui_mix.ipynb
CHANGED
@@ -4,9 +4,7 @@
|
|
4 |
"metadata": {
|
5 |
"colab": {
|
6 |
"provenance": [],
|
7 |
-
"gpuType": "T4"
|
8 |
-
"authorship_tag": "ABX9TyPWzXw++IDXf5gvuBHiHqmz",
|
9 |
-
"include_colab_link": true
|
10 |
},
|
11 |
"kernelspec": {
|
12 |
"name": "python3",
|
@@ -20,42 +18,8 @@
|
|
20 |
"cells": [
|
21 |
{
|
22 |
"cell_type": "markdown",
|
23 |
-
"metadata": {
|
24 |
-
"id": "view-in-github",
|
25 |
-
"colab_type": "text"
|
26 |
-
},
|
27 |
-
"source": [
|
28 |
-
"<a href=\"https://colab.research.google.com/github/6drf21e/ChatTTS_colab/blob/main/chattts_webui_mix.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
29 |
-
]
|
30 |
-
},
|
31 |
-
{
|
32 |
-
"cell_type": "code",
|
33 |
-
"execution_count": 10,
|
34 |
-
"metadata": {
|
35 |
-
"colab": {
|
36 |
-
"base_uri": "https://localhost:8080/",
|
37 |
-
"height": 260
|
38 |
-
},
|
39 |
-
"id": "-VNe1BeDO1n0",
|
40 |
-
"outputId": "f3ed0cc9-b8dd-4f2a-9cdd-3106e41f485d"
|
41 |
-
},
|
42 |
-
"outputs": [
|
43 |
-
{
|
44 |
-
"output_type": "display_data",
|
45 |
-
"data": {
|
46 |
-
"text/plain": [
|
47 |
-
"<IPython.core.display.Markdown object>"
|
48 |
-
],
|
49 |
-
"text/markdown": "\n### 🌟 如果你觉得 ChatTTS 和 ChatTTS_colab 项目对你有帮助,请访问以下链接给它们点个星星吧!🌟\n\n- [ChatTTS 项目](https://github.com/2noise/ChatTTS)\n\n- [ChatTTS_colab 项目](https://github.com/6drf21e/ChatTTS_colab)\n\n感谢你的支持!\n\n### 运行方法 ###\n\n- 点击菜单栏的--代码执行程序--全部运行即可\n- 执行后在下方的日志中找到类似\n\n Running on public URL: https://**********.gradio.live <-这个就是可以访问的公网地址\n\n安装包的时候提示要重启 请点**\"否\"**\n\n\n"
|
50 |
-
},
|
51 |
-
"metadata": {}
|
52 |
-
}
|
53 |
-
],
|
54 |
"source": [
|
55 |
-
"
|
56 |
-
"\n",
|
57 |
-
"message = \"\"\"\n",
|
58 |
-
"### 🌟 如果你觉得 ChatTTS 和 ChatTTS_colab 项目对你有帮助,请访问以下链接给它们点个星星吧!🌟\n",
|
59 |
"\n",
|
60 |
"- [ChatTTS 项目](https://github.com/2noise/ChatTTS)\n",
|
61 |
"\n",
|
@@ -63,19 +27,18 @@
|
|
63 |
"\n",
|
64 |
"感谢你的支持!\n",
|
65 |
"\n",
|
66 |
-
"
|
67 |
"\n",
|
68 |
"- 点击菜单栏的--代码执行程序--全部运行即可\n",
|
69 |
"- 执行后在下方的日志中找到类似\n",
|
70 |
"\n",
|
71 |
" Running on public URL: https://**************.gradio.live <-这个就是可以访问的公网地址\n",
|
72 |
"\n",
|
73 |
-
"安装包的时候提示要重启 请点**\"否\"
|
74 |
-
|
75 |
-
|
76 |
-
"
|
77 |
-
|
78 |
-
]
|
79 |
},
|
80 |
{
|
81 |
"cell_type": "code",
|
@@ -84,58 +47,19 @@
|
|
84 |
"%cd ChatTTS_colab\n",
|
85 |
"!git clone -q https://github.com/2noise/ChatTTS\n",
|
86 |
"%cd ChatTTS\n",
|
87 |
-
"!git checkout -q
|
88 |
"%cd ..\n",
|
89 |
"!mv ChatTTS abc\n",
|
90 |
-
"!mv abc/
|
91 |
-
"!pip install -q omegaconf vocos vector_quantize_pytorch gradio cn2an pypinyin openai\n",
|
92 |
"# 启动 Gradio 有公网地址\n",
|
93 |
"!python webui_mix.py --share\n"
|
94 |
],
|
95 |
"metadata": {
|
96 |
-
"
|
97 |
-
"base_uri": "https://localhost:8080/"
|
98 |
-
},
|
99 |
-
"id": "hNDl-5muR77-",
|
100 |
-
"outputId": "9ca99a78-1354-4c4d-dfa9-30a82b1a7813"
|
101 |
},
|
102 |
"execution_count": null,
|
103 |
-
"outputs": [
|
104 |
-
{
|
105 |
-
"output_type": "stream",
|
106 |
-
"name": "stdout",
|
107 |
-
"text": [
|
108 |
-
"/content/ChatTTS_colab/ChatTTS_colab\n",
|
109 |
-
"/content/ChatTTS_colab/ChatTTS_colab/ChatTTS\n",
|
110 |
-
"/content/ChatTTS_colab/ChatTTS_colab\n",
|
111 |
-
"Loading ChatTTS model...\n",
|
112 |
-
"INFO:ChatTTS.core:Load from cache: /root/.cache/huggingface/hub/models--2Noise--ChatTTS/snapshots/ce5913842aebd78e4a01a02d47244b8d62ac4ee3\n",
|
113 |
-
"INFO:ChatTTS.core:use cuda:0\n",
|
114 |
-
"INFO:ChatTTS.core:vocos loaded.\n",
|
115 |
-
"INFO:ChatTTS.core:dvae loaded.\n",
|
116 |
-
"INFO:ChatTTS.core:gpt loaded.\n",
|
117 |
-
"INFO:ChatTTS.core:decoder loaded.\n",
|
118 |
-
"INFO:ChatTTS.core:tokenizer loaded.\n",
|
119 |
-
"INFO:ChatTTS.core:All initialized.\n",
|
120 |
-
"INFO:httpx:HTTP Request: GET https://api.gradio.app/pkg-version \"HTTP/1.1 200 OK\"\n",
|
121 |
-
"INFO:httpx:HTTP Request: GET https://checkip.amazonaws.com/ \"HTTP/1.1 200 \"\n",
|
122 |
-
"Running on local URL: http://127.0.0.1:7860\n",
|
123 |
-
"INFO:httpx:HTTP Request: GET http://127.0.0.1:7860/startup-events \"HTTP/1.1 200 OK\"\n",
|
124 |
-
"INFO:httpx:HTTP Request: HEAD http://127.0.0.1:7860/ \"HTTP/1.1 200 OK\"\n",
|
125 |
-
"INFO:httpx:HTTP Request: GET https://api.gradio.app/v2/tunnel-request \"HTTP/1.1 200 OK\"\n",
|
126 |
-
"Running on public URL: https://054d1298c1303e0370.gradio.live\n",
|
127 |
-
"\n",
|
128 |
-
"This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n",
|
129 |
-
"[' 海底二万里 。第一章\\u3000飞走的暗礁。人们一定还记得一八六六年海上发生的一件离奇的、神秘的、无法解释的怪事。且不说当时哄动沿海居民和世界舆论的各种传闻 这里只说一般航海人员特别激动的心情。欧美的进出口商人、船长和船主、各国的海军官佐以及这两大洲的各国政府都非常注意这件事。', '这事大体是这样 不久以前 好些大船在海上碰见了一一个 庞然大物 一个很长的物体 形状很像纺锤 有时发出磷光 它的体积比鲸鱼大得多 行动起来也比鲸鱼快得多。关于这个东西的出现 许多航海日志所记下的事实 如这个东西或这个生物的形状 在它运动时的难以估计的速度 它转移的惊人力量 它那种像是天生的特殊本领等等 大致是相同的。', '如果这东西是鲸鱼类动物 那么它的体积 是大大超过了生物学家曾经加以分类的鲸鱼。居维埃·拉色别德①、杜梅里②、卡特法日③ 这些生物学家一一除非看见过 也就是说 除非这些科学家本人的眼睛看见过——是不承认有这样一种怪物存在的。把多次观察的结果折中一下来看———方面丢开那些过低的估计 即这个东西只有二百英尺长 同时也不接受过于夸张的言论 即它有一英里。', '宽三英里长 ——我们可以肯定他说 晋书·阮籍传 。 其后纲维不摄 而虚无放诞之论盈于 这个奇怪的生物 如果真是存在的话 它的体积是大大超过鱼类学家所承认的体积的。这东西既然存在 而事实本身又是不可否认的 那么 由于人类好奇的心理 我们就不难理解这个怪物的出现会在全世界引起怎样的骚动。', '至于说这是荒唐无稽之谈 那是决不会有人同意的。因为 一八六六年七月二十日 加尔各答一布纳希汽船公司的喜金孙总督号 在澳大利亚海岸东边五英里 碰见了这个游动的巨大物体。巴克船长起初还以为这是没有人知道的、暗礁 他正要测定它的位置的时候 突然这个不可解释的物体喷出两道水柱 哗的一声射到空中一百五十英尺高。', '这么说 除非这座暗礁上边有间歇喷泉 不然的话 喜金孙总督号面前的东西 就是还没有人知道的一种海中哺乳类动物 它还从鼻孔中喷出有气泡的水柱呢。同年七月二十三日 西印度 太平洋汽船公司的克利斯托巴尔哥郎号 在太平洋上也碰到这样的事。喜金孙总督号看见这怪物以后三天 克利斯托巴尔哥郎号在相距七百里的地方也看见了它 由此可知义 实用性则是鉴别它们正确与否的根据。', ' 这个奇特的鲸鱼类动物能以掠人的速度从这一处转移到另一处。十五天以后 在离上面说的地点有两千里远的地方 国营轮船公司的海尔维地亚号和皇家邮船公司的山农号 在美国和欧洲之间的大西洋海面上相遇的时候 在北纬四十二度十五分、西经六十度三十五分的地方 同时看到了这个大怪物。', '根据两船同时观察得到的结果 估计这只哺乳动物的长度至少有三百五十多英尺 约一百零六米 因为山农号和海尔维地亚号两船连起来 都还比它短 两船从头至尾只有一百米长。可是 最长的鲸鱼 像常常出役于阿留申群岛的久阑马克岛和翁居里克岛①附近海面的那些鲸鱼 也只不过是五十六米 而比这再长的 从来就没有过。', '接连不断地传来的消息 横渡大西洋的贝雷尔号所做的种种观察 茵曼轮船公司的越提那号跟这个怪物的一次相碰 法国二级军舰诺曼第号军官们所写的记录 海军高级参谋弗兹一詹姆斯在克利德爵士号上所做的很精密的测算 这一切在当时的确曾经哄动一时。在民族性比较浮躁的国家里 大家都拿这件事作为谈笑资料 但在严肃和踏实的国家里 像英国、美国和德国就不同 它们对这事就非常关心。', '在各大城市里 这怪物变成了家喻户晓的事件。咖啡馆里歌唱它 报刊上嘲笑它 舞台上扮演它。谣言正好有了机会 从这怪物身上捏造出各种各样的奇闻。在一些发行量不多的报刊上派 。 出现了关于各种离奇的巨大动物的报道 从白鲸、北极海中可怕的 莫比·狄克 ①一直到庞大的 克拉肯 ②——这种怪鱼的触须可以缠住一只载重五百吨的船而把它拖到海底下去——都应有尽有。', '有些人甚至不惜引经据典 或者搬出古代的传说如亚里士多德③和蒲林尼④的见解 他们承认这类怪物的存在 或者搬出彭土皮丹主教⑤的挪威童话 保罗·埃纪德的记述 以及哈林顿的报告 这报告是不容怀疑的 他说 一八五七年 他在嘉斯第兰号上看见过一种大蛇 那种蛇以前只在那立宪号到过的海面上⑤才能看见。', '于是 在学术团体里和科学报刊中产生了相信者和怀疑者 这两派人无休止地争论着。 怪物问题 激动着人们。自以为懂科学的新闻记者和一向自以为多才的文人开起火来 他们在这次值得纪念的笔战中花费了不少的墨水 。甚至有几个人还流了两三滴血 因为有人把针对大海蛇的笔锋移向一些态度傲慢的家伙身上了。', '在六个月当中 争论继续着。彼此有理 各执一词。当时流行的小报都兴致勃勃地刊登争论的文章 它们不是攻击巴西地理学院、柏林皇家科学院、不列颠学术联合会或华盛顿斯密孙学院发表的权威论文 就是驳斥印度群岛报、摩亚诺神父的宇宙杂志、皮德曼的消息报里面的讨论和法国及其他各国大报刊的科学新闻。', '这些多才的作家故意曲解反对派也常引证的林奈①的一句话 大自然不制造蠢东西 恳求大家不要相信北海的大怪鱼、大海蛇、 莫比·狄克 和疯狂的海员们臆造出来的其它怪物的存在 不要因此而否定了大自然。最后 某一著名尖刻的讽刺报有一位最受欢迎的编辑先生草草了事地发表一篇文章物主义的一些基本范畴和基本原理。', '强调马克思主义哲学必 处理了这个怪物 他像夷包列提②那样 在大家的笑声中 给这佳物最后一次打击、把它结果了。于是机智战胜了科学。在一八六七年头几个月里 这个问题好像是人了土 不会再复洁了。但就在这个时候 人们又听说发生了一些新的事件。', '现在的问题并不是一个急待解决的科学问题 而是必须认真设法避免的一个危险。问题带了完全不同的面貌。这个怪物变成了小岛、岩石、暗礁 但它是会奔驰的、不可捉摸的、行动莫测的暗礁。一八六七年八月五日 蒙特利奥航海公司的摩拉维安号夜间驶到北纬二十七度三十分、西经七十二度十五分的地方 船右舷撞上了一座岩石 可是 任何地图也没有记载过这一带海面上有这座岩石。', '由于风力的助航和四百匹马力的推动 船的速度达到每小时十三海里。毫无疑问 如果不是船身质地优良 特别坚固 摩拉维安号被撞以后 一定要把它从加拿大载来的二百三十六名乘客一齐带到海底去。事故发生在早晨五点左右天刚破晓的时候。船上值班的海员们立即跑到船的后部 他们十分细心地观察海面。', '除了有个六百多米宽的大漩涡——好像水面受过猛烈的冲击——以外 他们什么也没有看见 只把事故发生的地点确切地记了下来。摩拉维安号继续航行 似乎并没有受到什么损伤。·它是撞上了暗礁呢 还是撞上了一只沉没的破船?。当时没有法子知道。后来到船坞检查了船底朋友?。', '这个问题是革命的首要问题。 运用马克思主义的立尝 才发现一部分龙骨折断了。这事实本身是十分严重的 可是 如果不是过了三个星期后 在相同的情况下又发生了相同的事件 它很可能跟许多其他的事件一样很快被人忘掉了。接着又发生的那一次撞船的事件 单单由于受害船的国籍和它所属公司的声望 就足以引起十分广泛的反响。', '英国著名的船主苟纳尔的名字是没有一个人不知道伪。这位精明的企业家早在一八四零年就创办了一家邮船公司 开辟了从利物浦到哈利法克斯①的航线 当时只有三艘四百匹马力、载重一千一百六十二吨的明轮木船。八年以后 公司扩大了 共有四艘六百五十匹马力、载重一千八百二十吨的船。', '再过两年 又添了两艘马力和载重量更大的船 一八五三年 苟纳尔公司继续取得装运政府邮件的特权 一连添造了阿拉伯号、波斯号、中国号、斯备脱亚号、爪哇号、俄罗斯号 这些都是头等的快船 而且是最宽大的 除了大东方号外 在海上航行的船没有能跟它们相比的。', '到一八六七年 这家公司一共有十二艘船~八艘明轮的 四艘暗轮的。我所以要把上面的情形简单地介绍一下 是要大家知道这家海运公司的重要性。它由于经营得法 是全世界都闻名的。任何航海企业 没有比这公司搞得更精明 经营得更成功的了。二十六年来学流派均是庸俗进化论的宣传者。', '实证主义者斯宾塞对其曾 苟纳尔公司的船在大西洋上航行了两千次 没有一次航行不达目的地 没有一次发生迟误 从没有遗失过一封信 损失过一个人或一只船。 因此 尽管法国竭力要抢它的生意 但是乘客们都一致愿意搭苟纳尔公司的船 这点从近年来官方的统计文献中就可以看出来。', '了解这情形以后 便没有人奇怪这家公司的一只汽船遭遇到意外事件会引起那么巨大的反响。一八六七年四月十三日 海很平静 风又是顺风 斯备脱亚号在西经十五度十二分、北纬四十五度三十七分的海面上行驶着。它在一千匹马力的发动机推动下 速度为每小时十三海里半。', '它的机轮在海中转动 完全正常。它当时的吃水深度是六米七十厘米 排水量是六 六百八十五方米。下午四点十六分 乘客们正在大厅中吃点心的时候 在斯各脱亚号船尾、左舷机轮后面一点 似乎发生了轻微的撞击。斯各脱亚号不是撞上了什么 而是被什么撞上了。', '憧它的不是敲击的器械而是钻凿的器械。这次冲撞是十分轻微的 要不是管船舱的人员跑到甲板上来喊 船要沉了 船要沉了 。 也许船上的人谁也不会在意。旅客们起初十分惊慌 但船长安德生很快就使他们安稳下来。危险并不会立刻就发生。斯各脱亚号由防水板分为七大间 一点也不在乎个把漏洞。', '安德生船长立即跑到舱底下去。他查出第五间被海水浸人了 海水浸入十分快 证明漏洞相当大。好在这间里没有蒸汽炉 不然的话 炉火就要熄灭了。安德生船长吩咐马上停船 并且命令一个潜水员下水检查船身的损坏情形。一会儿 他知道船底有一个长两米的大洞。', '这样一个裂口是没法堵住的 斯各脱亚号尽管机轮有一半浸在水里 但也必须继续行驶。当时船离克利亚峡还有三百海里 等船驶进公司的码头 已经误了三天期 在这三天里 利物浦的人都为它惶惶不安。斯各脱亚号被架了起来 工程师们开始检查。他们眼睛所看见的情形连自己也不能相信。', '在船身吃水线下两米半的地方 露出一个很规则的等边三角形的缺口。铁皮上的伤痕十分整齐 、就是钻孔机也不能凿得这么准确 弄成这个裂口的锐利器械一定不是用普通的钢铁制的 因为 这家伙在以惊人的力量向前猛撞 凿穿了四厘米厚的铁皮以后、还能用一种很难做到的后退动作 使自己脱身逃走。', '最近这次事件的经过大致就是这样。结果这又一次使舆论哄动起来。从这时候起 所有从前原因不明的航海遇难事件 现在都算在这个怪物的账上了。这只离奇古怪的动物于是负起了所有船只沉没的责任。不幸的是船沉的数目相当大 按照统计年鉴的记载 包括帆船和汽船在内 每年的损失约有三千艘左右 至于因下落不明而断定失踪 的 每年的数目也不下两百艘 。不管有没有冤枉这怪物 人们都把船只失踪的原因算在它身上。由于它的存在 五大洲间的海上交通越来越危险了 大家都坚决要求不惜任何代价清除海上这条可怕盼鲸鱼怪。']\n",
|
130 |
-
"INFO:ChatTTS.core:All initialized.\n",
|
131 |
-
" 46% 175/384 [00:05<00:07, 29.18steps/s]\n",
|
132 |
-
" 73% 1501/2048 [01:18<00:28, 19.09steps/s]\n",
|
133 |
-
"INFO:ChatTTS.core:All initialized.\n",
|
134 |
-
" 62% 238/384 [00:08<00:05, 28.48steps/s]\n",
|
135 |
-
" 36% 736/2048 [00:28<01:07, 19.51steps/s]"
|
136 |
-
]
|
137 |
-
}
|
138 |
-
]
|
139 |
}
|
140 |
]
|
141 |
}
|
|
|
4 |
"metadata": {
|
5 |
"colab": {
|
6 |
"provenance": [],
|
7 |
+
"gpuType": "T4"
|
|
|
|
|
8 |
},
|
9 |
"kernelspec": {
|
10 |
"name": "python3",
|
|
|
18 |
"cells": [
|
19 |
{
|
20 |
"cell_type": "markdown",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
"source": [
|
22 |
+
"> 🌟 如果你觉得 ChatTTS 和 ChatTTS_colab 项目对你有帮助,请访问以下链接给它们点个星星吧!🌟\n",
|
|
|
|
|
|
|
23 |
"\n",
|
24 |
"- [ChatTTS 项目](https://github.com/2noise/ChatTTS)\n",
|
25 |
"\n",
|
|
|
27 |
"\n",
|
28 |
"感谢你的支持!\n",
|
29 |
"\n",
|
30 |
+
"# 运行方法\n",
|
31 |
"\n",
|
32 |
"- 点击菜单栏的--代码执行程序--全部运行即可\n",
|
33 |
"- 执行后在下方的日志中找到类似\n",
|
34 |
"\n",
|
35 |
" Running on public URL: https://**************.gradio.live <-这个就是可以访问的公网地址\n",
|
36 |
"\n",
|
37 |
+
"安装包的时候提示要重启 请点**\"否\"**"
|
38 |
+
],
|
39 |
+
"metadata": {
|
40 |
+
"id": "Xo3k5XsTzWK6"
|
41 |
+
}
|
|
|
42 |
},
|
43 |
{
|
44 |
"cell_type": "code",
|
|
|
47 |
"%cd ChatTTS_colab\n",
|
48 |
"!git clone -q https://github.com/2noise/ChatTTS\n",
|
49 |
"%cd ChatTTS\n",
|
50 |
+
"!git checkout -q e6412b1\n",
|
51 |
"%cd ..\n",
|
52 |
"!mv ChatTTS abc\n",
|
53 |
+
"!mv abc/* /content/ChatTTS_colab/\n",
|
54 |
+
"!pip install -q omegaconf vocos vector_quantize_pytorch gradio cn2an pypinyin openai jieba WeTextProcessing python-dotenv\n",
|
55 |
"# 启动 Gradio 有公网地址\n",
|
56 |
"!python webui_mix.py --share\n"
|
57 |
],
|
58 |
"metadata": {
|
59 |
+
"id": "hNDl-5muR77-"
|
|
|
|
|
|
|
|
|
60 |
},
|
61 |
"execution_count": null,
|
62 |
+
"outputs": []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
}
|
64 |
]
|
65 |
}
|
config.py
CHANGED
@@ -1,13 +1,15 @@
|
|
1 |
# Description: Configuration file for the project
|
|
|
|
|
2 |
DEFAULT_SPEED = 5
|
3 |
DEFAULT_ORAL = 2
|
4 |
DEFAULT_LAUGH = 0
|
5 |
DEFAULT_BK = 4
|
6 |
# 段落切割
|
7 |
-
DEFAULT_SEG_LENGTH =
|
8 |
-
DEFAULT_BATCH_SIZE =
|
9 |
# 温度
|
10 |
-
DEFAULT_TEMPERATURE = 0.
|
11 |
# top_P
|
12 |
DEFAULT_TOP_P = 0.7
|
13 |
# top_K
|
@@ -41,4 +43,4 @@ LLM_PROMPT = """
|
|
41 |
注意: character 字段的值需要使用类似 "旁白"、"年轻男性"、"年轻女性" 等角色身份。如果有多个角色,可以使用 "年轻男性1"、"年轻男性2" 等。
|
42 |
|
43 |
--故事文本--
|
44 |
-
"""
|
|
|
1 |
# Description: Configuration file for the project
|
2 |
+
llama_seed = 2581
|
3 |
+
DEFAULT_DIR = "output"
|
4 |
DEFAULT_SPEED = 5
|
5 |
DEFAULT_ORAL = 2
|
6 |
DEFAULT_LAUGH = 0
|
7 |
DEFAULT_BK = 4
|
8 |
# 段落切割
|
9 |
+
DEFAULT_SEG_LENGTH = 80
|
10 |
+
DEFAULT_BATCH_SIZE = 3
|
11 |
# 温度
|
12 |
+
DEFAULT_TEMPERATURE = 0.1
|
13 |
# top_P
|
14 |
DEFAULT_TOP_P = 0.7
|
15 |
# top_K
|
|
|
43 |
注意: character 字段的值需要使用类似 "旁白"、"年轻男性"、"年轻女性" 等角色身份。如果有多个角色,可以使用 "年轻男性1"、"年轻男性2" 等。
|
44 |
|
45 |
--故事文本--
|
46 |
+
"""
|
tts_model.py
CHANGED
@@ -1,10 +1,14 @@
|
|
1 |
-
import
|
2 |
-
import
|
3 |
-
import numpy as np
|
4 |
import os
|
|
|
5 |
import time
|
|
|
|
|
|
|
6 |
from tqdm import tqdm
|
7 |
-
|
|
|
8 |
from config import DEFAULT_TEMPERATURE, DEFAULT_TOP_P, DEFAULT_TOP_K
|
9 |
import spaces
|
10 |
|
@@ -19,7 +23,7 @@ def load_chat_tts_model(source='huggingface', force_redownload=False, local_path
|
|
19 |
"""
|
20 |
print("Loading ChatTTS model...")
|
21 |
chat = ChatTTS.Chat()
|
22 |
-
chat.load_models(source=source, force_redownload=force_redownload, local_path=
|
23 |
return chat
|
24 |
|
25 |
|
@@ -45,18 +49,37 @@ def deterministic(seed=0):
|
|
45 |
torch.backends.cudnn.benchmark = False
|
46 |
|
47 |
@spaces.GPU
|
48 |
-
def generate_audio_for_seed(chat, seed, texts, batch_size, speed, refine_text_prompt,
|
49 |
-
|
|
|
|
|
50 |
from utils import combine_audio, save_audio, batch_split
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
-
deterministic(seed)
|
59 |
-
rnd_spk_emb = chat.sample_random_speaker()
|
60 |
params_infer_code = {
|
61 |
'spk_emb': rnd_spk_emb,
|
62 |
'prompt': f'[speed_{speed}]',
|
@@ -77,13 +100,16 @@ def generate_audio_for_seed(chat, seed, texts, batch_size, speed, refine_text_pr
|
|
77 |
if not cur_tqdm:
|
78 |
cur_tqdm = tqdm
|
79 |
|
|
|
|
|
|
|
|
|
|
|
80 |
for batch in cur_tqdm(batch_split(texts, batch_size), desc=f"Inferring audio for seed={seed}"):
|
81 |
flag += len(batch)
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
wavs = chat.infer(batch, params_infer_code=params_infer_code, params_refine_text=params_refine_text,
|
86 |
-
use_decoder=True, skip_refine_text=False)
|
87 |
all_wavs.extend(wavs)
|
88 |
clear_cuda_cache()
|
89 |
if skip_save:
|
@@ -93,9 +119,28 @@ def generate_audio_for_seed(chat, seed, texts, batch_size, speed, refine_text_pr
|
|
93 |
elapsed_time = end_time - start_time
|
94 |
print(f"Saving audio for seed {seed}, took {elapsed_time:.2f}s")
|
95 |
timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')
|
96 |
-
wav_filename = f"
|
97 |
-
save_audio(wav_filename, combined_audio)
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
|
101 |
def tts(chat, text_file, seed, speed, oral, laugh, bk, seg, batch, progres=None):
|
|
|
1 |
+
import datetime
|
2 |
+
import json
|
|
|
3 |
import os
|
4 |
+
import re
|
5 |
import time
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import torch
|
9 |
from tqdm import tqdm
|
10 |
+
|
11 |
+
import ChatTTS
|
12 |
from config import DEFAULT_TEMPERATURE, DEFAULT_TOP_P, DEFAULT_TOP_K
|
13 |
import spaces
|
14 |
|
|
|
23 |
"""
|
24 |
print("Loading ChatTTS model...")
|
25 |
chat = ChatTTS.Chat()
|
26 |
+
chat.load_models(source=source, force_redownload=force_redownload, custom_path=local_path, compile=False)
|
27 |
return chat
|
28 |
|
29 |
|
|
|
49 |
torch.backends.cudnn.benchmark = False
|
50 |
|
51 |
@spaces.GPU
|
52 |
+
def generate_audio_for_seed(chat, seed, texts, batch_size, speed, refine_text_prompt, roleid=None,
|
53 |
+
temperature=DEFAULT_TEMPERATURE,
|
54 |
+
top_P=DEFAULT_TOP_P, top_K=DEFAULT_TOP_K, cur_tqdm=None, skip_save=False,
|
55 |
+
skip_refine_text=False, speaker_type="seed", pt_file=None):
|
56 |
from utils import combine_audio, save_audio, batch_split
|
57 |
+
print(f"speaker_type: {speaker_type}")
|
58 |
+
if speaker_type == "seed":
|
59 |
+
if seed in [None, -1, 0, "", "random"]:
|
60 |
+
seed = np.random.randint(0, 9999)
|
61 |
+
deterministic(seed)
|
62 |
+
rnd_spk_emb = chat.sample_random_speaker()
|
63 |
+
elif speaker_type == "role":
|
64 |
+
# 从 JSON 文件中读取数据
|
65 |
+
with open('./slct_voice_240605.json', 'r', encoding='utf-8') as json_file:
|
66 |
+
slct_idx_loaded = json.load(json_file)
|
67 |
+
# 将包含 Tensor 数据的部分转换回 Tensor 对象
|
68 |
+
for key in slct_idx_loaded:
|
69 |
+
tensor_list = slct_idx_loaded[key]["tensor"]
|
70 |
+
slct_idx_loaded[key]["tensor"] = torch.tensor(tensor_list)
|
71 |
+
# 将音色 tensor 打包进params_infer_code,固定使用此音色发音,调低temperature
|
72 |
+
rnd_spk_emb = slct_idx_loaded[roleid]["tensor"]
|
73 |
+
# temperature = 0.001
|
74 |
+
elif speaker_type == "pt":
|
75 |
+
print(pt_file)
|
76 |
+
rnd_spk_emb = torch.load(pt_file)
|
77 |
+
print(rnd_spk_emb.shape)
|
78 |
+
if rnd_spk_emb.shape != (768,):
|
79 |
+
raise ValueError("维度应为 768。")
|
80 |
+
else:
|
81 |
+
raise ValueError(f"Invalid speaker_type: {speaker_type}. ")
|
82 |
|
|
|
|
|
83 |
params_infer_code = {
|
84 |
'spk_emb': rnd_spk_emb,
|
85 |
'prompt': f'[speed_{speed}]',
|
|
|
100 |
if not cur_tqdm:
|
101 |
cur_tqdm = tqdm
|
102 |
|
103 |
+
if re.search(r'\[uv_break\]|\[laugh\]', ''.join(texts)) is not None:
|
104 |
+
if not skip_refine_text:
|
105 |
+
print("Detected [uv_break] or [laugh] in text, skipping refine_text")
|
106 |
+
skip_refine_text = True
|
107 |
+
|
108 |
for batch in cur_tqdm(batch_split(texts, batch_size), desc=f"Inferring audio for seed={seed}"):
|
109 |
flag += len(batch)
|
110 |
+
_params_infer_code = {**params_infer_code}
|
111 |
+
wavs = chat.infer(batch, params_infer_code=_params_infer_code, params_refine_text=params_refine_text,
|
112 |
+
use_decoder=True, skip_refine_text=skip_refine_text)
|
|
|
|
|
113 |
all_wavs.extend(wavs)
|
114 |
clear_cuda_cache()
|
115 |
if skip_save:
|
|
|
119 |
elapsed_time = end_time - start_time
|
120 |
print(f"Saving audio for seed {seed}, took {elapsed_time:.2f}s")
|
121 |
timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')
|
122 |
+
wav_filename = f"chattts-[seed_{seed}][speed_{speed}]{refine_text_prompt}[{timestamp}].wav"
|
123 |
+
return save_audio(wav_filename, combined_audio)
|
124 |
+
|
125 |
+
|
126 |
+
def generate_refine_text(chat, seed, text, refine_text_prompt, temperature=DEFAULT_TEMPERATURE,
|
127 |
+
top_P=DEFAULT_TOP_P, top_K=DEFAULT_TOP_K):
|
128 |
+
if seed in [None, -1, 0, "", "random"]:
|
129 |
+
seed = np.random.randint(0, 9999)
|
130 |
+
|
131 |
+
deterministic(seed)
|
132 |
+
|
133 |
+
params_refine_text = {
|
134 |
+
'prompt': refine_text_prompt,
|
135 |
+
'top_P': top_P,
|
136 |
+
'top_K': top_K,
|
137 |
+
'temperature': temperature
|
138 |
+
}
|
139 |
+
print('params_refine_text:', text)
|
140 |
+
print('refine_text_prompt:', refine_text_prompt)
|
141 |
+
refine_text = chat.infer(text, params_refine_text=params_refine_text, refine_text_only=True, skip_refine_text=False)
|
142 |
+
print('refine_text:', refine_text)
|
143 |
+
return refine_text
|
144 |
|
145 |
|
146 |
def tts(chat, text_file, seed, speed, oral, laugh, bk, seg, batch, progres=None):
|
utils.py
CHANGED
@@ -4,9 +4,16 @@ except ImportError:
|
|
4 |
print("The 'cn2an' module is not installed. Please install it using 'pip install cn2an'.")
|
5 |
exit(1)
|
6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
import re
|
8 |
import numpy as np
|
9 |
import wave
|
|
|
10 |
|
11 |
|
12 |
def save_audio(file_name, audio, rate=24000):
|
@@ -17,13 +24,20 @@ def save_audio(file_name, audio, rate=24000):
|
|
17 |
:param rate:
|
18 |
:return:
|
19 |
"""
|
|
|
|
|
20 |
audio = (audio * 32767).astype(np.int16)
|
21 |
|
22 |
-
|
|
|
|
|
|
|
|
|
23 |
wf.setnchannels(1)
|
24 |
wf.setsampwidth(2)
|
25 |
wf.setframerate(rate)
|
26 |
wf.writeframes(audio.tobytes())
|
|
|
27 |
|
28 |
|
29 |
def combine_audio(wavs):
|
@@ -87,16 +101,32 @@ def remove_chinese_punctuation(text):
|
|
87 |
:param text:
|
88 |
:return:
|
89 |
"""
|
90 |
-
chinese_punctuation_pattern = r"[:;!(),【】『』「」《》-‘“’”:,;!\(\)\[\]
|
91 |
-
text = re.sub(chinese_punctuation_pattern, '
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
# 使用正则表达式将多个连续的句号替换为一个句号
|
93 |
-
text = re.sub(r'
|
|
|
|
|
94 |
return text
|
95 |
|
96 |
|
97 |
def text_normalize(text):
|
98 |
"""
|
99 |
-
对文本进行归一化处理
|
100 |
:param text:
|
101 |
:return:
|
102 |
"""
|
@@ -104,14 +134,7 @@ def text_normalize(text):
|
|
104 |
# ref: https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/paddlespeech/t2s/frontend/zh_normalization
|
105 |
tx = TextNormalizer()
|
106 |
sentences = tx.normalize(text)
|
107 |
-
# print(sentences)
|
108 |
-
|
109 |
_txt = ''.join(sentences)
|
110 |
-
# 替换掉除中文之外的所有字符
|
111 |
-
_txt = re.sub(
|
112 |
-
r"[^\u4e00-\u9fa5,。!?、]+", "", _txt
|
113 |
-
)
|
114 |
-
|
115 |
return _txt
|
116 |
|
117 |
|
@@ -124,6 +147,20 @@ def convert_numbers_to_chinese(text):
|
|
124 |
return cn2an.transform(text, "an2cn")
|
125 |
|
126 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
def split_text(text, min_length=60):
|
128 |
"""
|
129 |
将文本分割为长度不小于min_length的句子
|
@@ -131,33 +168,63 @@ def split_text(text, min_length=60):
|
|
131 |
:param min_length:
|
132 |
:return:
|
133 |
"""
|
134 |
-
|
135 |
-
|
136 |
-
#
|
137 |
-
|
|
|
|
|
|
|
138 |
result = []
|
139 |
-
|
140 |
-
for
|
141 |
-
if
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
return result
|
156 |
|
157 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
158 |
def normalize_zh(text):
|
159 |
-
#
|
160 |
-
|
|
|
|
|
|
|
|
|
|
|
161 |
|
162 |
|
163 |
def batch_split(items, batch_size=5):
|
@@ -189,11 +256,76 @@ def read_long_text(file_path):
|
|
189 |
raise ValueError("无法识别文件编码")
|
190 |
|
191 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
192 |
if __name__ == '__main__':
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
193 |
txts = [
|
194 |
"电影中梁朝伟扮演的陈永仁的编号27149",
|
195 |
"这块黄金重达324.75克 我们班的最高总分为583分",
|
196 |
"12\~23 -1.5\~2",
|
|
|
197 |
|
198 |
]
|
199 |
for txt in txts:
|
|
|
4 |
print("The 'cn2an' module is not installed. Please install it using 'pip install cn2an'.")
|
5 |
exit(1)
|
6 |
|
7 |
+
try:
|
8 |
+
import jieba
|
9 |
+
except ImportError:
|
10 |
+
print("The 'jieba' module is not installed. Please install it using 'pip install jieba'.")
|
11 |
+
exit(1)
|
12 |
+
|
13 |
import re
|
14 |
import numpy as np
|
15 |
import wave
|
16 |
+
import jieba.posseg as pseg
|
17 |
|
18 |
|
19 |
def save_audio(file_name, audio, rate=24000):
|
|
|
24 |
:param rate:
|
25 |
:return:
|
26 |
"""
|
27 |
+
import os
|
28 |
+
from config import DEFAULT_DIR
|
29 |
audio = (audio * 32767).astype(np.int16)
|
30 |
|
31 |
+
# 检查默认目录
|
32 |
+
if not os.path.exists(DEFAULT_DIR):
|
33 |
+
os.makedirs(DEFAULT_DIR)
|
34 |
+
full_path = os.path.join(DEFAULT_DIR, file_name)
|
35 |
+
with wave.open(full_path, "w") as wf:
|
36 |
wf.setnchannels(1)
|
37 |
wf.setsampwidth(2)
|
38 |
wf.setframerate(rate)
|
39 |
wf.writeframes(audio.tobytes())
|
40 |
+
return full_path
|
41 |
|
42 |
|
43 |
def combine_audio(wavs):
|
|
|
101 |
:param text:
|
102 |
:return:
|
103 |
"""
|
104 |
+
chinese_punctuation_pattern = r"[:;!(),【】『』「」《》-‘“’”:,;!\(\)\[\]><\-·]"
|
105 |
+
text = re.sub(chinese_punctuation_pattern, ',', text)
|
106 |
+
# 使用正则表达式将多个连续的句号替换为一个句号
|
107 |
+
text = re.sub(r'[。,]{2,}', '。', text)
|
108 |
+
# 删除开头和结尾的 , 号
|
109 |
+
text = re.sub(r'^,|,$', '', text)
|
110 |
+
return text
|
111 |
+
|
112 |
+
def remove_english_punctuation(text):
|
113 |
+
"""
|
114 |
+
移除文本中的中文标点符号 [:;!(),【】『』「」《》-‘“’”:,;!\(\)\[\]><\-] 替换为 ,
|
115 |
+
:param text:
|
116 |
+
:return:
|
117 |
+
"""
|
118 |
+
chinese_punctuation_pattern = r"[:;!(),【】『』「」《》-‘“’”:,;!\(\)\[\]><\-·]"
|
119 |
+
text = re.sub(chinese_punctuation_pattern, ',', text)
|
120 |
# 使用正则表达式将多个连续的句号替换为一个句号
|
121 |
+
text = re.sub(r'[,\.]{2,}', '.', text)
|
122 |
+
# 删除开头和结尾的 , 号
|
123 |
+
text = re.sub(r'^,|,$', '', text)
|
124 |
return text
|
125 |
|
126 |
|
127 |
def text_normalize(text):
|
128 |
"""
|
129 |
+
对文本进行归一化处理 (PaddlePaddle版本)
|
130 |
:param text:
|
131 |
:return:
|
132 |
"""
|
|
|
134 |
# ref: https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/paddlespeech/t2s/frontend/zh_normalization
|
135 |
tx = TextNormalizer()
|
136 |
sentences = tx.normalize(text)
|
|
|
|
|
137 |
_txt = ''.join(sentences)
|
|
|
|
|
|
|
|
|
|
|
138 |
return _txt
|
139 |
|
140 |
|
|
|
147 |
return cn2an.transform(text, "an2cn")
|
148 |
|
149 |
|
150 |
+
def detect_language(sentence):
|
151 |
+
# ref: https://github.com/2noise/ChatTTS/blob/main/ChatTTS/utils/infer_utils.py#L55
|
152 |
+
chinese_char_pattern = re.compile(r'[\u4e00-\u9fff]')
|
153 |
+
english_word_pattern = re.compile(r'\b[A-Za-z]+\b')
|
154 |
+
|
155 |
+
chinese_chars = chinese_char_pattern.findall(sentence)
|
156 |
+
english_words = english_word_pattern.findall(sentence)
|
157 |
+
|
158 |
+
if len(chinese_chars) > len(english_words):
|
159 |
+
return "zh"
|
160 |
+
else:
|
161 |
+
return "en"
|
162 |
+
|
163 |
+
|
164 |
def split_text(text, min_length=60):
|
165 |
"""
|
166 |
将文本分割为长度不小于min_length的句子
|
|
|
168 |
:param min_length:
|
169 |
:return:
|
170 |
"""
|
171 |
+
# 短句分割符号
|
172 |
+
sentence_delimiters = re.compile(r'([。?!\.]+)')
|
173 |
+
# 匹配多个连续的回车符 作为段落点 强制分段
|
174 |
+
paragraph_delimiters = re.compile(r'(\s*\n\s*)+')
|
175 |
+
|
176 |
+
paragraphs = re.split(paragraph_delimiters, text)
|
177 |
+
|
178 |
result = []
|
179 |
+
|
180 |
+
for paragraph in paragraphs:
|
181 |
+
if not paragraph.strip():
|
182 |
+
continue # 跳过空段落
|
183 |
+
# 小于阈值的段��直接分开
|
184 |
+
if len(paragraph.strip()) < min_length:
|
185 |
+
result.append(paragraph.strip())
|
186 |
+
continue
|
187 |
+
# 大于的再计算拆分
|
188 |
+
sentences = re.split(sentence_delimiters, paragraph)
|
189 |
+
current_sentence = ''
|
190 |
+
for sentence in sentences:
|
191 |
+
if re.match(sentence_delimiters, sentence):
|
192 |
+
current_sentence += sentence.strip() + ''
|
193 |
+
if len(current_sentence) >= min_length:
|
194 |
+
result.append(current_sentence.strip())
|
195 |
+
current_sentence = ''
|
196 |
+
else:
|
197 |
+
current_sentence += sentence.strip()
|
198 |
+
|
199 |
+
if current_sentence:
|
200 |
+
if len(current_sentence) < min_length and len(result) > 0:
|
201 |
+
result[-1] += current_sentence
|
202 |
+
else:
|
203 |
+
result.append(current_sentence)
|
204 |
+
if detect_language(text[:1024]) == "zh":
|
205 |
+
result = [normalize_zh(_.strip()) for _ in result if _.strip()]
|
206 |
+
else:
|
207 |
+
result = [normalize_en(_.strip()) for _ in result if _.strip()]
|
208 |
return result
|
209 |
|
210 |
|
211 |
+
def normalize_en(text):
|
212 |
+
# 不再在 ChatTTS 外正则化文本
|
213 |
+
# from tn.english.normalizer import Normalizer
|
214 |
+
# normalizer = Normalizer()
|
215 |
+
# text = normalizer.normalize(text)
|
216 |
+
# text = remove_english_punctuation(text)
|
217 |
+
return text
|
218 |
+
|
219 |
+
|
220 |
def normalize_zh(text):
|
221 |
+
# 不再在 ChatTTS 外正则化文本
|
222 |
+
# from tn.chinese.normalizer import Normalizer
|
223 |
+
# normalizer = Normalizer()
|
224 |
+
# text = normalizer.normalize(text)
|
225 |
+
# text = remove_chinese_punctuation(text)
|
226 |
+
text = process_ddd(text)
|
227 |
+
return text
|
228 |
|
229 |
|
230 |
def batch_split(items, batch_size=5):
|
|
|
256 |
raise ValueError("无法识别文件编码")
|
257 |
|
258 |
|
259 |
+
def replace_tokens(text):
|
260 |
+
remove_tokens = ['UNK']
|
261 |
+
for token in remove_tokens:
|
262 |
+
text = re.sub(r'\[' + re.escape(token) + r'\]', '', text)
|
263 |
+
|
264 |
+
tokens = ['uv_break', 'laugh','lbreak']
|
265 |
+
for token in tokens:
|
266 |
+
text = re.sub(r'\[' + re.escape(token) + r'\]', f'uu{token}uu', text)
|
267 |
+
text = text.replace('_', '')
|
268 |
+
return text
|
269 |
+
|
270 |
+
|
271 |
+
def restore_tokens(text):
|
272 |
+
tokens = ['uvbreak', 'laugh', 'UNK', 'lbreak']
|
273 |
+
for token in tokens:
|
274 |
+
text = re.sub(r'uu' + re.escape(token) + r'uu', f'[{token}]', text)
|
275 |
+
text = text.replace('[uvbreak]', '[uv_break]')
|
276 |
+
return text
|
277 |
+
|
278 |
+
|
279 |
+
def process_ddd(text):
|
280 |
+
"""
|
281 |
+
处理“地”、“得” 字的使用,都替换为“的”
|
282 |
+
依据:地、得的使用,主要是在动词和形容词前后,本方法没有严格按照语法替换,因为时常遇到用错的情况。
|
283 |
+
另外受 jieba 分词准确率的影响,部分情况下可能会出漏掉。例如:小红帽疑惑地问
|
284 |
+
:param text: 输入的文本
|
285 |
+
:return: 处理后的文本
|
286 |
+
"""
|
287 |
+
word_list = [(word, flag) for word, flag in pseg.cut(text, use_paddle=False)]
|
288 |
+
# print(word_list)
|
289 |
+
processed_words = []
|
290 |
+
for i, (word, flag) in enumerate(word_list):
|
291 |
+
if word in ["地", "得"]:
|
292 |
+
# Check previous and next word's flag
|
293 |
+
# prev_flag = word_list[i - 1][1] if i > 0 else None
|
294 |
+
# next_flag = word_list[i + 1][1] if i + 1 < len(word_list) else None
|
295 |
+
|
296 |
+
# if prev_flag in ['v', 'a'] or next_flag in ['v', 'a']:
|
297 |
+
if flag in ['uv', 'ud']:
|
298 |
+
processed_words.append("的")
|
299 |
+
else:
|
300 |
+
processed_words.append(word)
|
301 |
+
else:
|
302 |
+
processed_words.append(word)
|
303 |
+
|
304 |
+
return ''.join(processed_words)
|
305 |
+
|
306 |
+
|
307 |
+
def replace_space_between_chinese(text):
|
308 |
+
return re.sub(r'(?<=[\u4e00-\u9fff])\s+(?=[\u4e00-\u9fff])', '', text)
|
309 |
+
|
310 |
+
|
311 |
if __name__ == '__main__':
|
312 |
+
# txts = [
|
313 |
+
# "快速地跑过红色的大门",
|
314 |
+
# "笑得很开心,学得很好",
|
315 |
+
# "小红帽疑惑地问?",
|
316 |
+
# "大灰狼慌张地回答",
|
317 |
+
# "哦,这是为了更好地听你说话。",
|
318 |
+
# "大灰狼不耐烦地说:“为了更好地抱你。”",
|
319 |
+
# "他跑得很快,工作做得非常认真,这是他努力地结果。得到",
|
320 |
+
# ]
|
321 |
+
# for txt in txts:
|
322 |
+
# print(txt, '-->', process_ddd(txt))
|
323 |
+
|
324 |
txts = [
|
325 |
"电影中梁朝伟扮演的陈永仁的编号27149",
|
326 |
"这块黄金重达324.75克 我们班的最高总分为583分",
|
327 |
"12\~23 -1.5\~2",
|
328 |
+
"居维埃·拉色别德①、杜梅里②、卡特法日③,"
|
329 |
|
330 |
]
|
331 |
for txt in txts:
|
webui_mix.py
CHANGED
@@ -1,3 +1,7 @@
|
|
|
|
|
|
|
|
|
|
1 |
import argparse
|
2 |
import re
|
3 |
import time
|
@@ -40,30 +44,31 @@ if not os.path.exists(SAVED_SEEDS_FILE):
|
|
40 |
|
41 |
chat = load_chat_tts_model(source=args.source, local_path=args.local_path)
|
42 |
# chat = None
|
43 |
-
# chat = load_chat_tts_model(source="local", local_path="models")
|
44 |
|
45 |
# 抽卡的最大数量
|
46 |
max_audio_components = 10
|
47 |
|
48 |
-
|
49 |
-
# print("loading ChatTTS model...")
|
50 |
-
# chat = ChatTTS.Chat()
|
51 |
-
# chat.load_models(source="local", local_path="models")
|
52 |
-
# torch.cuda.empty_cache()
|
53 |
-
|
54 |
-
|
55 |
# 加载
|
56 |
def load_seeds():
|
57 |
with open(SAVED_SEEDS_FILE, "r") as f:
|
58 |
global saved_seeds
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
return saved_seeds
|
61 |
|
62 |
|
63 |
def display_seeds():
|
64 |
seeds = load_seeds()
|
65 |
# 转换为 List[List] 的形式
|
66 |
-
return [[i, s['seed'], s['name']] for i, s in enumerate(seeds)]
|
67 |
|
68 |
|
69 |
saved_seeds = load_seeds()
|
@@ -78,13 +83,14 @@ def save_seeds():
|
|
78 |
|
79 |
|
80 |
# 添加 seed
|
81 |
-
def add_seed(seed, name, save=True):
|
82 |
for s in saved_seeds:
|
83 |
if s['seed'] == seed:
|
84 |
return False
|
85 |
saved_seeds.append({
|
86 |
'seed': seed,
|
87 |
-
'name': name
|
|
|
88 |
})
|
89 |
if save:
|
90 |
save_seeds()
|
@@ -129,7 +135,7 @@ def generate_seeds(num_seeds, texts, tq):
|
|
129 |
for _ in tq(range(num_seeds), desc=f"随机音色生成中..."):
|
130 |
seed = np.random.randint(0, 9999)
|
131 |
|
132 |
-
filename = generate_audio_for_seed(chat, seed, texts, 1, 5, "[oral_2][laugh_0][break_4]", 0.3, 0.7, 20)
|
133 |
seeds.append((filename, seed))
|
134 |
clear_cuda_cache()
|
135 |
|
@@ -137,11 +143,12 @@ def generate_seeds(num_seeds, texts, tq):
|
|
137 |
|
138 |
|
139 |
# 保存选定的音频种子
|
140 |
-
def do_save_seed(seed):
|
|
|
141 |
seed = seed.replace('保存种子 ', '').strip()
|
142 |
if not seed:
|
143 |
return
|
144 |
-
add_seed(int(seed), seed)
|
145 |
gr.Info(f"Seed {seed} has been saved.")
|
146 |
|
147 |
|
@@ -173,11 +180,24 @@ def do_delete_seed(val):
|
|
173 |
return display_seeds()
|
174 |
|
175 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
176 |
def seed_change_btn():
|
177 |
global SELECTED_SEED_INDEX
|
178 |
if SELECTED_SEED_INDEX == -1:
|
179 |
-
return '删除'
|
180 |
-
return f'删除 idx=[{SELECTED_SEED_INDEX[0]}]'
|
181 |
|
182 |
|
183 |
def audio_interface(num_seeds, texts, progress=gr.Progress()):
|
@@ -194,11 +214,26 @@ def audio_interface(num_seeds, texts, progress=gr.Progress()):
|
|
194 |
# 不足的部分
|
195 |
all_wavs = wavs + [None] * (max_audio_components - len(wavs))
|
196 |
all_seeds = seeds + [''] * (max_audio_components - len(seeds))
|
197 |
-
return [item for pair in zip(all_wavs, all_seeds) for item in pair]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
198 |
|
199 |
|
200 |
def audio_interface_empty(num_seeds, texts, progress=gr.Progress(track_tqdm=True)):
|
201 |
-
return [None, ""] * max_audio_components
|
202 |
|
203 |
|
204 |
def update_audio_components(slider_value):
|
@@ -206,8 +241,9 @@ def update_audio_components(slider_value):
|
|
206 |
k = int(slider_value)
|
207 |
audios = [gr.Audio(visible=True)] * k + [gr.Audio(visible=False)] * (max_audio_components - k)
|
208 |
tbs = [gr.Textbox(visible=True)] * k + [gr.Textbox(visible=False)] * (max_audio_components - k)
|
|
|
209 |
print(f'k={k}, audios={len(audios)}')
|
210 |
-
return [item for pair in zip(audios, tbs) for item in pair]
|
211 |
|
212 |
|
213 |
def seed_change(evt: gr.SelectData):
|
@@ -218,9 +254,9 @@ def seed_change(evt: gr.SelectData):
|
|
218 |
|
219 |
@spaces.GPU
|
220 |
def generate_tts_audio(text_file, num_seeds, seed, speed, oral, laugh, bk, min_length, batch_size, temperature, top_P,
|
221 |
-
top_K, progress=gr.Progress()):
|
222 |
from tts_model import generate_audio_for_seed
|
223 |
-
from utils import split_text
|
224 |
if seed in [0, -1, None]:
|
225 |
seed = random.randint(1, 9999)
|
226 |
content = ''
|
@@ -228,19 +264,151 @@ def generate_tts_audio(text_file, num_seeds, seed, speed, oral, laugh, bk, min_l
|
|
228 |
content = ""
|
229 |
elif isinstance(text_file, str):
|
230 |
content = text_file
|
|
|
|
|
231 |
texts = split_text(content, min_length=min_length)
|
232 |
-
|
|
|
233 |
|
234 |
if oral < 0 or oral > 9 or laugh < 0 or laugh > 2 or bk < 0 or bk > 7:
|
235 |
raise ValueError("oral_(0-9), laugh_(0-2), break_(0-7) out of range")
|
236 |
|
237 |
refine_text_prompt = f"[oral_{oral}][laugh_{laugh}][break_{bk}]"
|
238 |
try:
|
239 |
-
output_files = generate_audio_for_seed(
|
240 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
241 |
return output_files
|
242 |
except Exception as e:
|
243 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
244 |
|
245 |
|
246 |
def generate_seed():
|
@@ -253,10 +421,28 @@ def generate_seed():
|
|
253 |
|
254 |
def update_label(text):
|
255 |
word_count = len(text)
|
256 |
-
return gr.update(label=f"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
257 |
|
258 |
|
259 |
with gr.Blocks() as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
260 |
with gr.Tab("音色抽卡"):
|
261 |
with gr.Row():
|
262 |
with gr.Column(scale=1):
|
@@ -267,6 +453,10 @@ with gr.Blocks() as demo:
|
|
267 |
]
|
268 |
# gr.Markdown("### 随机音色抽卡")
|
269 |
gr.Markdown("""
|
|
|
|
|
|
|
|
|
270 |
在相同的 seed 和 温度等参数下,音色具有一定的一致性。点击下面的“随机音色生成”按钮将生成多个 seed。找到满意的音色后,点击音频下方“保存”按钮。
|
271 |
**注意:不同机器使用相同种子生成的音频音色可能不同,同一机器使用相同种子多次生成的音频音色也可能变化。**
|
272 |
""")
|
@@ -283,21 +473,29 @@ with gr.Blocks() as demo:
|
|
283 |
gr.Markdown("### 种子管理界面")
|
284 |
seed_list = gr.DataFrame(
|
285 |
label="种子列表",
|
286 |
-
headers=["Index", "Seed", "Name"],
|
287 |
-
datatype=["number", "number", "str"],
|
288 |
interactive=True,
|
289 |
-
col_count=(
|
290 |
-
value=display_seeds
|
291 |
)
|
|
|
292 |
with gr.Row():
|
293 |
refresh_button = gr.Button("刷新")
|
294 |
save_button = gr.Button("保存")
|
295 |
del_button = gr.Button("删除")
|
|
|
|
|
|
|
|
|
|
|
|
|
296 |
# 绑定按钮和函数
|
297 |
refresh_button.click(display_seeds, outputs=seed_list)
|
298 |
-
seed_list.select(seed_change).success(seed_change_btn, outputs=[del_button])
|
299 |
save_button.click(do_save_seeds, inputs=[seed_list], outputs=None)
|
300 |
del_button.click(do_delete_seed, inputs=del_button, outputs=seed_list)
|
|
|
301 |
|
302 |
with gr.Column(scale=1):
|
303 |
audio_components = []
|
@@ -305,12 +503,13 @@ with gr.Blocks() as demo:
|
|
305 |
visible = i < num_seeds_default
|
306 |
a = gr.Audio(f"Audio {i}", visible=visible)
|
307 |
t = gr.Button(f"Seed", visible=visible)
|
308 |
-
|
|
|
309 |
audio_components.append(a)
|
310 |
audio_components.append(t)
|
|
|
311 |
|
312 |
num_seeds.change(update_audio_components, inputs=num_seeds, outputs=audio_components)
|
313 |
-
|
314 |
# output = gr.Column()
|
315 |
# audio = gr.Audio(label="Output Audio")
|
316 |
|
@@ -330,46 +529,136 @@ with gr.Blocks() as demo:
|
|
330 |
placeholder="Please Input Text...", value=default_text)
|
331 |
# 当文本框内容发生变化时调用 update_label 函数
|
332 |
text_file_input.change(update_label, inputs=text_file_input, outputs=text_file_input)
|
|
|
|
|
|
|
|
|
|
|
333 |
|
334 |
with gr.Column():
|
335 |
gr.Markdown("### 配置参数")
|
336 |
-
gr.Markdown("根据需要配置以下参数来生成音频。")
|
337 |
with gr.Row():
|
338 |
-
|
339 |
-
|
340 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
341 |
|
342 |
with gr.Row():
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
348 |
# gr.Markdown("### 文本参数")
|
349 |
with gr.Row():
|
350 |
-
min_length_input = gr.Number(label="文本分段长度", info="大于这个数值进行分段",
|
351 |
-
precision=0)
|
352 |
-
batch_size_input = gr.Number(label="批大小", info="
|
353 |
-
precision=0)
|
354 |
with gr.Accordion("其他参数", open=False):
|
355 |
with gr.Row():
|
356 |
# 温度 top_P top_K
|
357 |
-
temperature_input = gr.Slider(label="温度", minimum=0.01, maximum=1.0, step=0.01,
|
358 |
-
|
359 |
-
|
|
|
360 |
# reset 按钮
|
361 |
reset_button = gr.Button("重置")
|
362 |
|
363 |
with gr.Row():
|
364 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
365 |
|
366 |
with gr.Row():
|
367 |
output_audio = gr.Audio(label="生成的音频文件")
|
|
|
|
|
|
|
|
|
|
|
|
|
368 |
|
369 |
generate_audio_seed.click(generate_seed,
|
370 |
inputs=[],
|
371 |
outputs=seed_input)
|
372 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
373 |
# 重置按钮 重置温度等参数
|
374 |
reset_button.click(
|
375 |
lambda: [0.3, 0.7, 20],
|
@@ -392,9 +681,50 @@ with gr.Blocks() as demo:
|
|
392 |
temperature_input,
|
393 |
top_P_input,
|
394 |
top_K_input,
|
|
|
|
|
|
|
|
|
395 |
],
|
396 |
outputs=[output_audio]
|
397 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
398 |
with gr.Tab("角色扮演"):
|
399 |
def txt_2_script(text):
|
400 |
lines = text.split("\n")
|
@@ -426,7 +756,7 @@ with gr.Blocks() as demo:
|
|
426 |
characters = list([_["character"] for _ in lines])
|
427 |
unique_characters = list(dict.fromkeys(characters))
|
428 |
print([[character, 0] for character in unique_characters])
|
429 |
-
return [[character, 0] for character in unique_characters]
|
430 |
|
431 |
|
432 |
def get_txt_characters(text):
|
@@ -453,7 +783,7 @@ with gr.Blocks() as demo:
|
|
453 |
scripts = llm_operation(api_base, api_key, model, LLM_PROMPT, text, required_keys=["txt", "character"])
|
454 |
return script_2_txt(scripts)
|
455 |
|
456 |
-
|
457 |
def generate_script_audio(text, models_seeds, progress=gr.Progress()):
|
458 |
scripts = txt_2_script(text) # 将文本转换为剧本
|
459 |
characters = get_characters(scripts) # 从剧本中提取角色
|
@@ -464,7 +794,6 @@ with gr.Blocks() as demo:
|
|
464 |
import itertools
|
465 |
from tts_model import generate_audio_for_seed
|
466 |
from utils import combine_audio, save_audio, normalize_zh
|
467 |
-
from config import DEFAULT_BATCH_SIZE, DEFAULT_SPEED, DEFAULT_TEMPERATURE, DEFAULT_TOP_K, DEFAULT_TOP_P
|
468 |
|
469 |
assert isinstance(models_seeds, pd.DataFrame)
|
470 |
|
@@ -477,18 +806,40 @@ with gr.Blocks() as demo:
|
|
477 |
break
|
478 |
yield batch
|
479 |
|
480 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
481 |
|
482 |
# 检查每个角色是否都有对应的种子
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
#
|
489 |
-
|
490 |
-
|
491 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
492 |
refine_text_prompt = "[oral_2][laugh_0][break_4]"
|
493 |
all_wavs = []
|
494 |
|
@@ -502,13 +853,21 @@ with gr.Blocks() as demo:
|
|
502 |
batch_size = 5 # 设置批次大小
|
503 |
# 按角色处理
|
504 |
for character, lines in progress.tqdm(grouped_lines.items(), desc="生成剧本音频"):
|
505 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
506 |
# 按批次处理
|
507 |
for batch_lines in batch(lines, batch_size):
|
508 |
texts = [normalize_zh(line["txt"]) for line in batch_lines]
|
509 |
-
print(f"seed={seed} t={texts} c={character}")
|
510 |
-
wavs = generate_audio_for_seed(chat, int(seed), texts, DEFAULT_BATCH_SIZE,
|
511 |
-
refine_text_prompt, DEFAULT_TEMPERATURE, DEFAULT_TOP_P,
|
512 |
DEFAULT_TOP_K, skip_save=True) # 批量处理文本
|
513 |
batch_results[character].extend(wavs)
|
514 |
|
@@ -520,8 +879,7 @@ with gr.Blocks() as demo:
|
|
520 |
# 合成所有音频
|
521 |
audio = combine_audio(all_wavs)
|
522 |
fname = f"script_{int(time.time())}.wav"
|
523 |
-
save_audio(fname, audio)
|
524 |
-
return fname
|
525 |
|
526 |
|
527 |
script_example = {
|
@@ -556,7 +914,7 @@ with gr.Blocks() as demo:
|
|
556 |
"txt": "当小红帽到达奶奶家时,她发现大灰狼伪装成了奶奶。",
|
557 |
"character": "旁白"
|
558 |
}, {
|
559 |
-
"txt": "
|
560 |
"character": "旁白"
|
561 |
}, {
|
562 |
"txt": "奶奶,你的耳朵怎么这么尖?",
|
@@ -605,7 +963,7 @@ with gr.Blocks() as demo:
|
|
605 |
placeholder="请输入API Base URL",
|
606 |
value=r"https://api.openai.com/v1")
|
607 |
openai_api_key_input = gr.Textbox(label="OpenAI API Key", placeholder="请输入API Key",
|
608 |
-
value="sk-xxxxxxx")
|
609 |
# AI提示词
|
610 |
ai_text_input = gr.Textbox(label="剧情简介或者一段故事", placeholder="请输入文本...", lines=2,
|
611 |
value=ai_text_default)
|
@@ -616,7 +974,7 @@ with gr.Blocks() as demo:
|
|
616 |
with gr.Column(scale=3):
|
617 |
gr.Markdown("### 脚本")
|
618 |
gr.Markdown(
|
619 |
-
"
|
620 |
script_text = "\n".join(
|
621 |
[f"{_.get('character', '')}::{_.get('txt', '')}" for _ in script_example['lines']])
|
622 |
|
@@ -628,20 +986,20 @@ with gr.Blocks() as demo:
|
|
628 |
with gr.Column(scale=1):
|
629 |
gr.Markdown("### 角色种子")
|
630 |
# DataFrame 来存放转换后的脚本
|
631 |
-
# 默认数据
|
632 |
default_data = [
|
633 |
-
["旁白", 2222],
|
634 |
-
["年轻女性", 2],
|
635 |
-
["中年男性", 2424]
|
636 |
]
|
637 |
|
638 |
script_data = gr.DataFrame(
|
639 |
value=default_data,
|
640 |
label="角色对应的音色种子,从抽卡那获取",
|
641 |
-
headers=["
|
642 |
-
datatype=["str", "number"],
|
643 |
interactive=True,
|
644 |
-
col_count=(
|
645 |
)
|
646 |
# 生视频按钮
|
647 |
script_generate_audio = gr.Button("步骤②:生成音频")
|
@@ -674,4 +1032,4 @@ with gr.Blocks() as demo:
|
|
674 |
outputs=[script_audio]
|
675 |
)
|
676 |
|
677 |
-
demo.launch(share=args.share)
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
|
4 |
+
sys.path.insert(0, os.getcwd())
|
5 |
import argparse
|
6 |
import re
|
7 |
import time
|
|
|
44 |
|
45 |
chat = load_chat_tts_model(source=args.source, local_path=args.local_path)
|
46 |
# chat = None
|
47 |
+
# chat = load_chat_tts_model(source="local", local_path=r"models")
|
48 |
|
49 |
# 抽卡的最大数量
|
50 |
max_audio_components = 10
|
51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
# 加载
|
53 |
def load_seeds():
|
54 |
with open(SAVED_SEEDS_FILE, "r") as f:
|
55 |
global saved_seeds
|
56 |
+
|
57 |
+
seeds = json.load(f)
|
58 |
+
|
59 |
+
# 兼容旧的 JSON 格式,添加 path 字段
|
60 |
+
for seed in seeds:
|
61 |
+
if 'path' not in seed:
|
62 |
+
seed['path'] = None
|
63 |
+
|
64 |
+
saved_seeds = seeds
|
65 |
return saved_seeds
|
66 |
|
67 |
|
68 |
def display_seeds():
|
69 |
seeds = load_seeds()
|
70 |
# 转换为 List[List] 的形式
|
71 |
+
return [[i, s['seed'], s['name'], s['path']] for i, s in enumerate(seeds)]
|
72 |
|
73 |
|
74 |
saved_seeds = load_seeds()
|
|
|
83 |
|
84 |
|
85 |
# 添加 seed
|
86 |
+
def add_seed(seed, name, audio_path, save=True):
|
87 |
for s in saved_seeds:
|
88 |
if s['seed'] == seed:
|
89 |
return False
|
90 |
saved_seeds.append({
|
91 |
'seed': seed,
|
92 |
+
'name': name,
|
93 |
+
'path': audio_path
|
94 |
})
|
95 |
if save:
|
96 |
save_seeds()
|
|
|
135 |
for _ in tq(range(num_seeds), desc=f"随机音色生成中..."):
|
136 |
seed = np.random.randint(0, 9999)
|
137 |
|
138 |
+
filename = generate_audio_for_seed(chat, seed, texts, 1, 5, "[oral_2][laugh_0][break_4]", None, 0.3, 0.7, 20)
|
139 |
seeds.append((filename, seed))
|
140 |
clear_cuda_cache()
|
141 |
|
|
|
143 |
|
144 |
|
145 |
# 保存选定的音频种子
|
146 |
+
def do_save_seed(seed, audio_path):
|
147 |
+
print(f"Saving seed {seed} to {audio_path}")
|
148 |
seed = seed.replace('保存种子 ', '').strip()
|
149 |
if not seed:
|
150 |
return
|
151 |
+
add_seed(int(seed), seed, audio_path)
|
152 |
gr.Info(f"Seed {seed} has been saved.")
|
153 |
|
154 |
|
|
|
180 |
return display_seeds()
|
181 |
|
182 |
|
183 |
+
# 定义播放音频的函数
|
184 |
+
def do_play_seed(val):
|
185 |
+
# 从 val 匹配 [(\d+)] 获取index
|
186 |
+
index = re.search(r'\[(\d+)\]', val)
|
187 |
+
if index:
|
188 |
+
index = int(index.group(1))
|
189 |
+
seed = saved_seeds[index]['seed']
|
190 |
+
audio_path = saved_seeds[index]['path']
|
191 |
+
if audio_path:
|
192 |
+
return gr.update(visible=True, value=audio_path)
|
193 |
+
return gr.update(visible=False, value=None)
|
194 |
+
|
195 |
+
|
196 |
def seed_change_btn():
|
197 |
global SELECTED_SEED_INDEX
|
198 |
if SELECTED_SEED_INDEX == -1:
|
199 |
+
return ['删除', '试听']
|
200 |
+
return [f'删除 idx=[{SELECTED_SEED_INDEX[0]}]', f'试听 idx=[{SELECTED_SEED_INDEX[0]}]']
|
201 |
|
202 |
|
203 |
def audio_interface(num_seeds, texts, progress=gr.Progress()):
|
|
|
214 |
# 不足的部分
|
215 |
all_wavs = wavs + [None] * (max_audio_components - len(wavs))
|
216 |
all_seeds = seeds + [''] * (max_audio_components - len(seeds))
|
217 |
+
return [item for pair in zip(all_wavs, all_seeds, all_wavs) for item in pair]
|
218 |
+
|
219 |
+
|
220 |
+
# 保存刚刚生成的种子文件路径
|
221 |
+
audio_paths = [gr.State(value=None) for _ in range(max_audio_components)]
|
222 |
+
|
223 |
+
|
224 |
+
def audio_interface_with_paths(num_seeds, texts, progress=gr.Progress()):
|
225 |
+
"""
|
226 |
+
比 audio_interface 多携带音频的 path
|
227 |
+
"""
|
228 |
+
results = audio_interface(num_seeds, texts, progress)
|
229 |
+
wavs = results[::2] # 提取音频文件路径
|
230 |
+
for i, wav in enumerate(wavs):
|
231 |
+
audio_paths[i].value = wav # 直接为 State 组件赋值
|
232 |
+
return results
|
233 |
|
234 |
|
235 |
def audio_interface_empty(num_seeds, texts, progress=gr.Progress(track_tqdm=True)):
|
236 |
+
return [None, "", None] * max_audio_components
|
237 |
|
238 |
|
239 |
def update_audio_components(slider_value):
|
|
|
241 |
k = int(slider_value)
|
242 |
audios = [gr.Audio(visible=True)] * k + [gr.Audio(visible=False)] * (max_audio_components - k)
|
243 |
tbs = [gr.Textbox(visible=True)] * k + [gr.Textbox(visible=False)] * (max_audio_components - k)
|
244 |
+
stats = [gr.State(value=None)] * max_audio_components
|
245 |
print(f'k={k}, audios={len(audios)}')
|
246 |
+
return [item for pair in zip(audios, tbs, stats) for item in pair]
|
247 |
|
248 |
|
249 |
def seed_change(evt: gr.SelectData):
|
|
|
254 |
|
255 |
@spaces.GPU
|
256 |
def generate_tts_audio(text_file, num_seeds, seed, speed, oral, laugh, bk, min_length, batch_size, temperature, top_P,
|
257 |
+
top_K, roleid=None, refine_text=True, speaker_type="seed", pt_file=None, progress=gr.Progress()):
|
258 |
from tts_model import generate_audio_for_seed
|
259 |
+
from utils import split_text, replace_tokens, restore_tokens
|
260 |
if seed in [0, -1, None]:
|
261 |
seed = random.randint(1, 9999)
|
262 |
content = ''
|
|
|
264 |
content = ""
|
265 |
elif isinstance(text_file, str):
|
266 |
content = text_file
|
267 |
+
# 将 [uv_break] [laugh] 替换为 _uv_break_ _laugh_ 处理后再还原
|
268 |
+
content = replace_tokens(content)
|
269 |
texts = split_text(content, min_length=min_length)
|
270 |
+
for i, text in enumerate(texts):
|
271 |
+
texts[i] = restore_tokens(text)
|
272 |
|
273 |
if oral < 0 or oral > 9 or laugh < 0 or laugh > 2 or bk < 0 or bk > 7:
|
274 |
raise ValueError("oral_(0-9), laugh_(0-2), break_(0-7) out of range")
|
275 |
|
276 |
refine_text_prompt = f"[oral_{oral}][laugh_{laugh}][break_{bk}]"
|
277 |
try:
|
278 |
+
output_files = generate_audio_for_seed(
|
279 |
+
chat=chat,
|
280 |
+
seed=seed,
|
281 |
+
texts=texts,
|
282 |
+
batch_size=batch_size,
|
283 |
+
speed=speed,
|
284 |
+
refine_text_prompt=refine_text_prompt,
|
285 |
+
roleid=roleid,
|
286 |
+
temperature=temperature,
|
287 |
+
top_P=top_P,
|
288 |
+
top_K=top_K,
|
289 |
+
cur_tqdm=progress.tqdm,
|
290 |
+
skip_save=False,
|
291 |
+
skip_refine_text=not refine_text,
|
292 |
+
speaker_type=speaker_type,
|
293 |
+
pt_file=pt_file,
|
294 |
+
)
|
295 |
return output_files
|
296 |
except Exception as e:
|
297 |
+
raise e
|
298 |
+
|
299 |
+
|
300 |
+
def generate_tts_audio_stream(text_file, num_seeds, seed, speed, oral, laugh, bk, min_length, batch_size, temperature,
|
301 |
+
top_P,
|
302 |
+
top_K, roleid=None, refine_text=True, speaker_type="seed", pt_file=None,
|
303 |
+
stream_mode="fake"):
|
304 |
+
from utils import split_text, replace_tokens, restore_tokens
|
305 |
+
from tts_model import deterministic
|
306 |
+
if seed in [0, -1, None]:
|
307 |
+
seed = random.randint(1, 9999)
|
308 |
+
content = ''
|
309 |
+
if os.path.isfile(text_file):
|
310 |
+
content = ""
|
311 |
+
elif isinstance(text_file, str):
|
312 |
+
content = text_file
|
313 |
+
# 将 [uv_break] [laugh] 替换为 _uv_break_ _laugh_ 处理后再还原
|
314 |
+
content = replace_tokens(content)
|
315 |
+
# texts = [normalize_zh(_) for _ in content.split('\n') if _.strip()]
|
316 |
+
texts = split_text(content, min_length=min_length)
|
317 |
+
|
318 |
+
for i, text in enumerate(texts):
|
319 |
+
texts[i] = restore_tokens(text)
|
320 |
+
|
321 |
+
if oral < 0 or oral > 9 or laugh < 0 or laugh > 2 or bk < 0 or bk > 7:
|
322 |
+
raise ValueError("oral_(0-9), laugh_(0-2), break_(0-7) out of range")
|
323 |
+
|
324 |
+
refine_text_prompt = f"[oral_{oral}][laugh_{laugh}][break_{bk}]"
|
325 |
+
|
326 |
+
print(f"speaker_type: {speaker_type}")
|
327 |
+
if speaker_type == "seed":
|
328 |
+
if seed in [None, -1, 0, "", "random"]:
|
329 |
+
seed = np.random.randint(0, 9999)
|
330 |
+
deterministic(seed)
|
331 |
+
rnd_spk_emb = chat.sample_random_speaker()
|
332 |
+
elif speaker_type == "role":
|
333 |
+
# 从 JSON 文件中读取数据
|
334 |
+
with open('./slct_voice_240605.json', 'r', encoding='utf-8') as json_file:
|
335 |
+
slct_idx_loaded = json.load(json_file)
|
336 |
+
# 将包含 Tensor 数据的部分转换回 Tensor 对象
|
337 |
+
for key in slct_idx_loaded:
|
338 |
+
tensor_list = slct_idx_loaded[key]["tensor"]
|
339 |
+
slct_idx_loaded[key]["tensor"] = torch.tensor(tensor_list)
|
340 |
+
# 将音色 tensor 打包进params_infer_code,固定使用此音色发音,调低temperature
|
341 |
+
rnd_spk_emb = slct_idx_loaded[roleid]["tensor"]
|
342 |
+
# temperature = 0.001
|
343 |
+
elif speaker_type == "pt":
|
344 |
+
print(pt_file)
|
345 |
+
rnd_spk_emb = torch.load(pt_file)
|
346 |
+
print(rnd_spk_emb.shape)
|
347 |
+
if rnd_spk_emb.shape != (768,):
|
348 |
+
raise ValueError("维度应为 768。")
|
349 |
+
else:
|
350 |
+
raise ValueError(f"Invalid speaker_type: {speaker_type}. ")
|
351 |
+
|
352 |
+
params_infer_code = {
|
353 |
+
'spk_emb': rnd_spk_emb,
|
354 |
+
'prompt': f'[speed_{speed}]',
|
355 |
+
'top_P': top_P,
|
356 |
+
'top_K': top_K,
|
357 |
+
'temperature': temperature
|
358 |
+
}
|
359 |
+
params_refine_text = {
|
360 |
+
'prompt': refine_text_prompt,
|
361 |
+
'top_P': top_P,
|
362 |
+
'top_K': top_K,
|
363 |
+
'temperature': temperature
|
364 |
+
}
|
365 |
+
|
366 |
+
if stream_mode == "real":
|
367 |
+
for text in texts:
|
368 |
+
_params_infer_code = {**params_infer_code}
|
369 |
+
wavs_gen = chat.infer(text, params_infer_code=_params_infer_code, params_refine_text=params_refine_text,
|
370 |
+
use_decoder=True, skip_refine_text=True, stream=True)
|
371 |
+
for gen in wavs_gen:
|
372 |
+
wavs = [np.array([[]])]
|
373 |
+
wavs[0] = np.hstack([wavs[0], np.array(gen[0])])
|
374 |
+
audio = wavs[0][0]
|
375 |
+
yield 24000, normalize_audio(audio)
|
376 |
+
|
377 |
+
clear_cuda_cache()
|
378 |
+
else:
|
379 |
+
for text in batch_split(texts, batch_size):
|
380 |
+
_params_infer_code = {**params_infer_code}
|
381 |
+
wavs = chat.infer(text, params_infer_code=_params_infer_code, params_refine_text=params_refine_text,
|
382 |
+
use_decoder=True, skip_refine_text=False, stream=False)
|
383 |
+
combined_audio = combine_audio(wavs)
|
384 |
+
yield 24000, combined_audio[0]
|
385 |
+
|
386 |
+
|
387 |
+
def generate_refine(text_file, oral, laugh, bk, temperature, top_P, top_K, progress=gr.Progress()):
|
388 |
+
from tts_model import generate_refine_text
|
389 |
+
from utils import split_text, replace_tokens, restore_tokens, replace_space_between_chinese
|
390 |
+
seed = random.randint(1, 9999)
|
391 |
+
refine_text_prompt = f"[oral_{oral}][laugh_{laugh}][break_{bk}]"
|
392 |
+
content = ''
|
393 |
+
if os.path.isfile(text_file):
|
394 |
+
content = ""
|
395 |
+
elif isinstance(text_file, str):
|
396 |
+
content = text_file
|
397 |
+
if re.search(r'\[uv_break\]|\[laugh\]', content) is not None:
|
398 |
+
gr.Info("检测到 [uv_break] [laugh],不能重复 refine ")
|
399 |
+
# print("检测到 [uv_break] [laugh],不能重复 refine ")
|
400 |
+
return content
|
401 |
+
batch_size = 5
|
402 |
+
|
403 |
+
content = replace_tokens(content)
|
404 |
+
texts = split_text(content, min_length=120)
|
405 |
+
print(texts)
|
406 |
+
for i, text in enumerate(texts):
|
407 |
+
texts[i] = restore_tokens(text)
|
408 |
+
txts = []
|
409 |
+
for batch in progress.tqdm(batch_split(texts, batch_size), desc=f"Refine Text Please Wait ..."):
|
410 |
+
txts.extend(generate_refine_text(chat, seed, batch, refine_text_prompt, temperature, top_P, top_K))
|
411 |
+
return replace_space_between_chinese('\n\n'.join(txts))
|
412 |
|
413 |
|
414 |
def generate_seed():
|
|
|
421 |
|
422 |
def update_label(text):
|
423 |
word_count = len(text)
|
424 |
+
return gr.update(label=f"朗读文本({word_count} 字)")
|
425 |
+
|
426 |
+
|
427 |
+
def inser_token(text, btn):
|
428 |
+
if btn == "+笑声":
|
429 |
+
return gr.update(
|
430 |
+
value=text + "[laugh]"
|
431 |
+
)
|
432 |
+
elif btn == "+停顿":
|
433 |
+
return gr.update(
|
434 |
+
value=text + "[uv_break]"
|
435 |
+
)
|
436 |
|
437 |
|
438 |
with gr.Blocks() as demo:
|
439 |
+
# 项目链接
|
440 |
+
# gr.Markdown("""
|
441 |
+
# <div style='text-align: center; font-size: 16px;'>
|
442 |
+
# 🌟 <a href='https://github.com/6drf21e/ChatTTS_colab'>项目地址 欢迎 start</a> 🌟
|
443 |
+
# </div>
|
444 |
+
# """)
|
445 |
+
gr.Markdown("# Deployed by [chattts.dev](https://chattts.dev?refer=hf-story-telling)")
|
446 |
with gr.Tab("音色抽卡"):
|
447 |
with gr.Row():
|
448 |
with gr.Column(scale=1):
|
|
|
453 |
]
|
454 |
# gr.Markdown("### 随机音色抽卡")
|
455 |
gr.Markdown("""
|
456 |
+
免抽卡,直接找稳定音色👇
|
457 |
+
|
458 |
+
[ModelScope ChatTTS Speaker(国内)](https://modelscope.cn/studios/ttwwwaa/ChatTTS_Speaker) | [HuggingFace ChatTTS Speaker(国外)](https://huggingface.co/spaces/taa/ChatTTS_Speaker)
|
459 |
+
|
460 |
在相同的 seed 和 温度等参数下,音色具有一定的一致性。点击下面的“随机音色生成”按钮将生成多个 seed。找到满意的音色后,点击音频下方“保存”按钮。
|
461 |
**注意:不同机器使用相同种子生成的音频音色可能不同,同一机器使用相同种子多次生成的音频音色也可能变化。**
|
462 |
""")
|
|
|
473 |
gr.Markdown("### 种子管理界面")
|
474 |
seed_list = gr.DataFrame(
|
475 |
label="种子列表",
|
476 |
+
headers=["Index", "Seed", "Name", "Path"],
|
477 |
+
datatype=["number", "number", "str", "str"],
|
478 |
interactive=True,
|
479 |
+
col_count=(4, "fixed"),
|
480 |
+
value=display_seeds
|
481 |
)
|
482 |
+
|
483 |
with gr.Row():
|
484 |
refresh_button = gr.Button("刷新")
|
485 |
save_button = gr.Button("保存")
|
486 |
del_button = gr.Button("删除")
|
487 |
+
play_button = gr.Button("试听")
|
488 |
+
|
489 |
+
with gr.Row():
|
490 |
+
# 添加已保存的种子音频播放组件
|
491 |
+
audio_player = gr.Audio(label="播放已保存种子音频", visible=False)
|
492 |
+
|
493 |
# 绑定按钮和函数
|
494 |
refresh_button.click(display_seeds, outputs=seed_list)
|
495 |
+
seed_list.select(seed_change).success(seed_change_btn, outputs=[del_button, play_button])
|
496 |
save_button.click(do_save_seeds, inputs=[seed_list], outputs=None)
|
497 |
del_button.click(do_delete_seed, inputs=del_button, outputs=seed_list)
|
498 |
+
play_button.click(do_play_seed, inputs=play_button, outputs=audio_player)
|
499 |
|
500 |
with gr.Column(scale=1):
|
501 |
audio_components = []
|
|
|
503 |
visible = i < num_seeds_default
|
504 |
a = gr.Audio(f"Audio {i}", visible=visible)
|
505 |
t = gr.Button(f"Seed", visible=visible)
|
506 |
+
s = gr.State(value=None)
|
507 |
+
t.click(do_save_seed, inputs=[t, s], outputs=None).success(display_seeds, outputs=seed_list)
|
508 |
audio_components.append(a)
|
509 |
audio_components.append(t)
|
510 |
+
audio_components.append(s)
|
511 |
|
512 |
num_seeds.change(update_audio_components, inputs=num_seeds, outputs=audio_components)
|
|
|
513 |
# output = gr.Column()
|
514 |
# audio = gr.Audio(label="Output Audio")
|
515 |
|
|
|
529 |
placeholder="Please Input Text...", value=default_text)
|
530 |
# 当文本框内容发生变化时调用 update_label 函数
|
531 |
text_file_input.change(update_label, inputs=text_file_input, outputs=text_file_input)
|
532 |
+
# 加入停顿按钮
|
533 |
+
with gr.Row():
|
534 |
+
break_button = gr.Button("+停顿", variant="secondary")
|
535 |
+
laugh_button = gr.Button("+笑声", variant="secondary")
|
536 |
+
refine_button = gr.Button("Refine Text(预处理 加入停顿词、笑声等)", variant="secondary")
|
537 |
|
538 |
with gr.Column():
|
539 |
gr.Markdown("### 配置参数")
|
|
|
540 |
with gr.Row():
|
541 |
+
with gr.Column():
|
542 |
+
gr.Markdown("音色选择")
|
543 |
+
num_seeds_input = gr.Number(label="生成音频的数量", value=1, precision=0, visible=False)
|
544 |
+
speaker_stat = gr.State(value="seed")
|
545 |
+
tab_seed = gr.Tab(label="种子")
|
546 |
+
with tab_seed:
|
547 |
+
with gr.Row():
|
548 |
+
seed_input = gr.Number(label="指定种子", info="种子决定音色 0则随机", value=None,
|
549 |
+
precision=0)
|
550 |
+
generate_audio_seed = gr.Button("\U0001F3B2")
|
551 |
+
tab_roleid = gr.Tab(label="内置音色")
|
552 |
+
with tab_roleid:
|
553 |
+
roleid_input = gr.Dropdown(label="内置音色",
|
554 |
+
choices=[("发姐", "1"),
|
555 |
+
("纯情男大学生", "2"),
|
556 |
+
("阳光开朗大男孩", "3"),
|
557 |
+
("知心小姐姐", "4"),
|
558 |
+
("电视台女主持", "5"),
|
559 |
+
("魅力大叔", "6"),
|
560 |
+
("优雅甜美", "7"),
|
561 |
+
("贴心男宝2", "21"),
|
562 |
+
("正式打工人", "8"),
|
563 |
+
("贴心男宝1", "9")],
|
564 |
+
value="1",
|
565 |
+
info="选择音色后会覆盖种子。感谢 @QuantumDriver 提供音色")
|
566 |
+
tab_pt = gr.Tab(label="上传.PT文件")
|
567 |
+
with tab_pt:
|
568 |
+
pt_input = gr.File(label="上传音色文件", file_types=[".pt"], height=100)
|
569 |
|
570 |
with gr.Row():
|
571 |
+
style_select = gr.Radio(label="预设参数", info="语速部分可自行更改",
|
572 |
+
choices=["小说朗读", "对话", "中英混合", "默认"], value="默认",
|
573 |
+
interactive=True, )
|
574 |
+
with gr.Row():
|
575 |
+
# refine
|
576 |
+
refine_text_input = gr.Checkbox(label="Refine",
|
577 |
+
info="打开后会自动根据下方参数添加笑声/停顿等。关闭后可自行添加 [uv_break] [laugh] 或者点击下方 Refin按钮先行转换",
|
578 |
+
value=True)
|
579 |
+
speed_input = gr.Slider(label="语速", minimum=1, maximum=10, value=DEFAULT_SPEED, step=1)
|
580 |
+
with gr.Row():
|
581 |
+
oral_input = gr.Slider(label="口语化", minimum=0, maximum=9, value=DEFAULT_ORAL, step=1)
|
582 |
+
laugh_input = gr.Slider(label="笑声", minimum=0, maximum=2, value=DEFAULT_LAUGH, step=1)
|
583 |
+
bk_input = gr.Slider(label="停顿", minimum=0, maximum=7, value=DEFAULT_BK, step=1)
|
584 |
# gr.Markdown("### 文本参数")
|
585 |
with gr.Row():
|
586 |
+
min_length_input = gr.Number(label="文本分段长度", info="大于这个数值进行分段",
|
587 |
+
value=DEFAULT_SEG_LENGTH, precision=0)
|
588 |
+
batch_size_input = gr.Number(label="批大小", info="越高越快 太高爆显存 4G推荐3 其他酌情",
|
589 |
+
value=DEFAULT_BATCH_SIZE, precision=0)
|
590 |
with gr.Accordion("其他参数", open=False):
|
591 |
with gr.Row():
|
592 |
# 温度 top_P top_K
|
593 |
+
temperature_input = gr.Slider(label="温度", minimum=0.01, maximum=1.0, step=0.01,
|
594 |
+
value=DEFAULT_TEMPERATURE)
|
595 |
+
top_P_input = gr.Slider(label="top_P", minimum=0.1, maximum=0.9, step=0.05, value=DEFAULT_TOP_P)
|
596 |
+
top_K_input = gr.Slider(label="top_K", minimum=1, maximum=20, step=1, value=DEFAULT_TOP_K)
|
597 |
# reset 按钮
|
598 |
reset_button = gr.Button("重置")
|
599 |
|
600 |
with gr.Row():
|
601 |
+
with gr.Column():
|
602 |
+
generate_button = gr.Button("生成音频", variant="primary")
|
603 |
+
with gr.Column():
|
604 |
+
generate_button_stream = gr.Button("流式生成音频(一边播放一边推理)", variant="primary")
|
605 |
+
stream_select = gr.Radio(label="流输出方式",
|
606 |
+
info="真流式为实验功能,播放效果:卡播卡播卡播(⏳🎵⏳🎵⏳🎵);伪流式为分段推理后输出,播放效果:卡卡卡播播播播(⏳⏳🎵🎵🎵🎵)。伪流式批次建议4以上减少卡顿",
|
607 |
+
choices=[("真", "real"), ("伪", "fake")], value="fake", interactive=True, )
|
608 |
|
609 |
with gr.Row():
|
610 |
output_audio = gr.Audio(label="生成的音频文件")
|
611 |
+
output_audio_stream = gr.Audio(label="流式音频", value=None,
|
612 |
+
streaming=True,
|
613 |
+
autoplay=True,
|
614 |
+
# disable auto play for Windows, due to https://developer.chrome.com/blog/autoplay#webaudio
|
615 |
+
interactive=False,
|
616 |
+
show_label=True)
|
617 |
|
618 |
generate_audio_seed.click(generate_seed,
|
619 |
inputs=[],
|
620 |
outputs=seed_input)
|
621 |
|
622 |
+
|
623 |
+
def do_tab_change(evt: gr.SelectData):
|
624 |
+
print(evt.selected, evt.index, evt.value, evt.target)
|
625 |
+
kv = {
|
626 |
+
"种子": "seed",
|
627 |
+
"内置音色": "role",
|
628 |
+
"上传.PT文件": "pt"
|
629 |
+
}
|
630 |
+
return kv.get(evt.value, "seed")
|
631 |
+
|
632 |
+
|
633 |
+
tab_seed.select(do_tab_change, outputs=speaker_stat)
|
634 |
+
tab_roleid.select(do_tab_change, outputs=speaker_stat)
|
635 |
+
tab_pt.select(do_tab_change, outputs=speaker_stat)
|
636 |
+
|
637 |
+
|
638 |
+
def do_style_select(x):
|
639 |
+
if x == "小说朗读":
|
640 |
+
return [4, 0, 0, 2]
|
641 |
+
elif x == "对话":
|
642 |
+
return [5, 5, 1, 4]
|
643 |
+
elif x == "中英混合":
|
644 |
+
return [4, 1, 0, 3]
|
645 |
+
else:
|
646 |
+
return [DEFAULT_SPEED, DEFAULT_ORAL, DEFAULT_LAUGH, DEFAULT_BK]
|
647 |
+
|
648 |
+
|
649 |
+
# style_select 选择
|
650 |
+
style_select.change(
|
651 |
+
do_style_select,
|
652 |
+
inputs=style_select,
|
653 |
+
outputs=[speed_input, oral_input, laugh_input, bk_input]
|
654 |
+
)
|
655 |
+
|
656 |
+
# refine 按钮
|
657 |
+
refine_button.click(
|
658 |
+
generate_refine,
|
659 |
+
inputs=[text_file_input, oral_input, laugh_input, bk_input, temperature_input, top_P_input, top_K_input],
|
660 |
+
outputs=text_file_input
|
661 |
+
)
|
662 |
# 重置按钮 重置温度等参数
|
663 |
reset_button.click(
|
664 |
lambda: [0.3, 0.7, 20],
|
|
|
681 |
temperature_input,
|
682 |
top_P_input,
|
683 |
top_K_input,
|
684 |
+
roleid_input,
|
685 |
+
refine_text_input,
|
686 |
+
speaker_stat,
|
687 |
+
pt_input
|
688 |
],
|
689 |
outputs=[output_audio]
|
690 |
)
|
691 |
+
|
692 |
+
generate_button_stream.click(
|
693 |
+
fn=generate_tts_audio_stream,
|
694 |
+
inputs=[
|
695 |
+
text_file_input,
|
696 |
+
num_seeds_input,
|
697 |
+
seed_input,
|
698 |
+
speed_input,
|
699 |
+
oral_input,
|
700 |
+
laugh_input,
|
701 |
+
bk_input,
|
702 |
+
min_length_input,
|
703 |
+
batch_size_input,
|
704 |
+
temperature_input,
|
705 |
+
top_P_input,
|
706 |
+
top_K_input,
|
707 |
+
roleid_input,
|
708 |
+
refine_text_input,
|
709 |
+
speaker_stat,
|
710 |
+
pt_input,
|
711 |
+
stream_select
|
712 |
+
],
|
713 |
+
outputs=[output_audio_stream]
|
714 |
+
)
|
715 |
+
|
716 |
+
break_button.click(
|
717 |
+
inser_token,
|
718 |
+
inputs=[text_file_input, break_button],
|
719 |
+
outputs=text_file_input
|
720 |
+
)
|
721 |
+
|
722 |
+
laugh_button.click(
|
723 |
+
inser_token,
|
724 |
+
inputs=[text_file_input, laugh_button],
|
725 |
+
outputs=text_file_input
|
726 |
+
)
|
727 |
+
|
728 |
with gr.Tab("角色扮演"):
|
729 |
def txt_2_script(text):
|
730 |
lines = text.split("\n")
|
|
|
756 |
characters = list([_["character"] for _ in lines])
|
757 |
unique_characters = list(dict.fromkeys(characters))
|
758 |
print([[character, 0] for character in unique_characters])
|
759 |
+
return [[character, 0, 5, 2, 0, 4] for character in unique_characters]
|
760 |
|
761 |
|
762 |
def get_txt_characters(text):
|
|
|
783 |
scripts = llm_operation(api_base, api_key, model, LLM_PROMPT, text, required_keys=["txt", "character"])
|
784 |
return script_2_txt(scripts)
|
785 |
|
786 |
+
|
787 |
def generate_script_audio(text, models_seeds, progress=gr.Progress()):
|
788 |
scripts = txt_2_script(text) # 将文本转换为剧本
|
789 |
characters = get_characters(scripts) # 从剧本中提取角色
|
|
|
794 |
import itertools
|
795 |
from tts_model import generate_audio_for_seed
|
796 |
from utils import combine_audio, save_audio, normalize_zh
|
|
|
797 |
|
798 |
assert isinstance(models_seeds, pd.DataFrame)
|
799 |
|
|
|
806 |
break
|
807 |
yield batch
|
808 |
|
809 |
+
column_mapping = {
|
810 |
+
'角色': 'character',
|
811 |
+
'种子': 'seed',
|
812 |
+
'语速': 'speed',
|
813 |
+
'口语': 'oral',
|
814 |
+
'笑声': 'laugh',
|
815 |
+
'停顿': 'break'
|
816 |
+
}
|
817 |
+
# 使用 rename 方法重命名 DataFrame 的列
|
818 |
+
models_seeds = models_seeds.rename(columns=column_mapping).to_dict(orient='records')
|
819 |
+
# models_seeds = models_seeds.to_dict(orient='records')
|
820 |
|
821 |
# 检查每个角色是否都有对应的种子
|
822 |
+
print(models_seeds)
|
823 |
+
seed_lookup = {seed['character']: seed for seed in models_seeds}
|
824 |
+
|
825 |
+
character_seeds = {}
|
826 |
+
missing_seeds = []
|
827 |
+
# 遍历所有角色
|
828 |
+
for character in characters:
|
829 |
+
character_name = character[0]
|
830 |
+
seed_info = seed_lookup.get(character_name)
|
831 |
+
if seed_info:
|
832 |
+
character_seeds[character_name] = seed_info
|
833 |
+
else:
|
834 |
+
missing_seeds.append(character_name)
|
835 |
+
|
836 |
+
if missing_seeds:
|
837 |
+
missing_characters_str = ', '.join(missing_seeds)
|
838 |
+
gr.Info(f"以下角色没有种子,请先设置种子:{missing_characters_str}")
|
839 |
+
return None
|
840 |
+
|
841 |
+
print(character_seeds)
|
842 |
+
# return
|
843 |
refine_text_prompt = "[oral_2][laugh_0][break_4]"
|
844 |
all_wavs = []
|
845 |
|
|
|
853 |
batch_size = 5 # 设置批次大小
|
854 |
# 按角色处理
|
855 |
for character, lines in progress.tqdm(grouped_lines.items(), desc="生成剧本音频"):
|
856 |
+
info = character_seeds[character]
|
857 |
+
seed = info["seed"]
|
858 |
+
speed = info["speed"]
|
859 |
+
orla = info["oral"]
|
860 |
+
laugh = info["laugh"]
|
861 |
+
bk = info["break"]
|
862 |
+
|
863 |
+
refine_text_prompt = f"[oral_{orla}][laugh_{laugh}][break_{bk}]"
|
864 |
+
|
865 |
# 按批次处理
|
866 |
for batch_lines in batch(lines, batch_size):
|
867 |
texts = [normalize_zh(line["txt"]) for line in batch_lines]
|
868 |
+
print(f"seed={seed} t={texts} c={character} s={speed} r={refine_text_prompt}")
|
869 |
+
wavs = generate_audio_for_seed(chat, int(seed), texts, DEFAULT_BATCH_SIZE, speed,
|
870 |
+
refine_text_prompt, None, DEFAULT_TEMPERATURE, DEFAULT_TOP_P,
|
871 |
DEFAULT_TOP_K, skip_save=True) # 批量处理文本
|
872 |
batch_results[character].extend(wavs)
|
873 |
|
|
|
879 |
# 合成所有音频
|
880 |
audio = combine_audio(all_wavs)
|
881 |
fname = f"script_{int(time.time())}.wav"
|
882 |
+
return save_audio(fname, audio)
|
|
|
883 |
|
884 |
|
885 |
script_example = {
|
|
|
914 |
"txt": "当小红帽到达奶奶家时,她发现大灰狼伪装成了奶奶。",
|
915 |
"character": "旁白"
|
916 |
}, {
|
917 |
+
"txt": "小红帽疑惑的问",
|
918 |
"character": "旁白"
|
919 |
}, {
|
920 |
"txt": "奶奶,你的耳朵怎么这么尖?",
|
|
|
963 |
placeholder="请输入API Base URL",
|
964 |
value=r"https://api.openai.com/v1")
|
965 |
openai_api_key_input = gr.Textbox(label="OpenAI API Key", placeholder="请输入API Key",
|
966 |
+
value="sk-xxxxxxx", type="password")
|
967 |
# AI提示词
|
968 |
ai_text_input = gr.Textbox(label="剧情简介或者一段故事", placeholder="请输入文本...", lines=2,
|
969 |
value=ai_text_default)
|
|
|
974 |
with gr.Column(scale=3):
|
975 |
gr.Markdown("### 脚本")
|
976 |
gr.Markdown(
|
977 |
+
"脚本可以手工编写也可以从左侧的AI脚本生成按钮生成。脚本格式 **角色::文本** 一行为一句” 注意是::")
|
978 |
script_text = "\n".join(
|
979 |
[f"{_.get('character', '')}::{_.get('txt', '')}" for _ in script_example['lines']])
|
980 |
|
|
|
986 |
with gr.Column(scale=1):
|
987 |
gr.Markdown("### 角色种子")
|
988 |
# DataFrame 来存放转换后的脚本
|
989 |
+
# 默认数据 [speed_5][oral_2][laugh_0][break_4]
|
990 |
default_data = [
|
991 |
+
["旁白", 2222, 3, 0, 0, 2],
|
992 |
+
["年轻女性", 2, 5, 2, 0, 2],
|
993 |
+
["中年男性", 2424, 5, 2, 0, 2]
|
994 |
]
|
995 |
|
996 |
script_data = gr.DataFrame(
|
997 |
value=default_data,
|
998 |
label="角色对应的音色种子,从抽卡那获取",
|
999 |
+
headers=["角色", "种子", "语速", "口语", "笑声", "停顿"],
|
1000 |
+
datatype=["str", "number", "number", "number", "number", "number"],
|
1001 |
interactive=True,
|
1002 |
+
col_count=(6, "fixed"),
|
1003 |
)
|
1004 |
# 生视频按钮
|
1005 |
script_generate_audio = gr.Button("步骤②:生成音频")
|
|
|
1032 |
outputs=[script_audio]
|
1033 |
)
|
1034 |
|
1035 |
+
demo.launch(share=args.share, inbrowser=True)
|