kevinwang676 commited on
Commit
051c72a
1 Parent(s): 7a0fd5b

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +8 -0
  2. .gitattributes +2 -0
  3. .gitignore +12 -0
  4. Docker/damo.sha256 +3 -0
  5. Docker/download.py +5 -0
  6. Docker/download.sh +11 -0
  7. Docker/links.sha256 +12 -0
  8. Docker/links.txt +34 -0
  9. Dockerfile +45 -0
  10. GPT_SoVITS/AR/__init__.py +0 -0
  11. GPT_SoVITS/AR/data/__init__.py +0 -0
  12. GPT_SoVITS/AR/data/bucket_sampler.py +163 -0
  13. GPT_SoVITS/AR/data/data_module.py +76 -0
  14. GPT_SoVITS/AR/data/dataset.py +321 -0
  15. GPT_SoVITS/AR/models/__init__.py +0 -0
  16. GPT_SoVITS/AR/models/t2s_lightning_module.py +141 -0
  17. GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py +107 -0
  18. GPT_SoVITS/AR/models/t2s_model.py +448 -0
  19. GPT_SoVITS/AR/models/t2s_model_onnx.py +338 -0
  20. GPT_SoVITS/AR/models/utils.py +229 -0
  21. GPT_SoVITS/AR/modules/__init__.py +0 -0
  22. GPT_SoVITS/AR/modules/activation.py +428 -0
  23. GPT_SoVITS/AR/modules/activation_onnx.py +178 -0
  24. GPT_SoVITS/AR/modules/embedding.py +81 -0
  25. GPT_SoVITS/AR/modules/embedding_onnx.py +63 -0
  26. GPT_SoVITS/AR/modules/lr_schedulers.py +83 -0
  27. GPT_SoVITS/AR/modules/optim.py +622 -0
  28. GPT_SoVITS/AR/modules/patched_mha_with_cache.py +465 -0
  29. GPT_SoVITS/AR/modules/patched_mha_with_cache_onnx.py +92 -0
  30. GPT_SoVITS/AR/modules/scaling.py +335 -0
  31. GPT_SoVITS/AR/modules/transformer.py +378 -0
  32. GPT_SoVITS/AR/modules/transformer_onnx.py +292 -0
  33. GPT_SoVITS/AR/text_processing/__init__.py +0 -0
  34. GPT_SoVITS/AR/text_processing/phonemizer.py +79 -0
  35. GPT_SoVITS/AR/text_processing/symbols.py +10 -0
  36. GPT_SoVITS/AR/utils/__init__.py +37 -0
  37. GPT_SoVITS/AR/utils/initialize.py +38 -0
  38. GPT_SoVITS/AR/utils/io.py +34 -0
  39. GPT_SoVITS/configs/s1.yaml +31 -0
  40. GPT_SoVITS/configs/s1big.yaml +31 -0
  41. GPT_SoVITS/configs/s1big2.yaml +31 -0
  42. GPT_SoVITS/configs/s1longer.yaml +31 -0
  43. GPT_SoVITS/configs/s1mq.yaml +77 -0
  44. GPT_SoVITS/configs/s2.json +90 -0
  45. GPT_SoVITS/configs/train.yaml +32 -0
  46. GPT_SoVITS/feature_extractor/__init__.py +6 -0
  47. GPT_SoVITS/feature_extractor/cnhubert.py +104 -0
  48. GPT_SoVITS/feature_extractor/whisper_enc.py +25 -0
  49. GPT_SoVITS/inference_gui.py +340 -0
  50. GPT_SoVITS/inference_webui.py +690 -0
.dockerignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ docs
2
+ logs
3
+ output
4
+ reference
5
+ SoVITS_weights
6
+ GPT_weights
7
+ TEMP
8
+ .git
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ jay_speech.wav filter=lfs diff=lfs merge=lfs -text
37
+ tools/damo_asr/speech_fsmn_vad_zh-cn-16k-common-pytorch/example/vad_example.wav filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .DS_Store
2
+ __pycache__
3
+ *.pyc
4
+ env
5
+ runtime
6
+ .idea
7
+ output
8
+ logs
9
+ reference
10
+ GPT_weights
11
+ SoVITS_weights
12
+ TEMP
Docker/damo.sha256 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ 5bba782a5e9196166233b9ab12ba04cadff9ef9212b4ff6153ed9290ff679025 /workspace/tools/damo_asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/model.pb
2
+ b3be75be477f0780277f3bae0fe489f48718f585f3a6e45d7dd1fbb1a4255fc5 /workspace/tools/damo_asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch/model.pb
3
+ a5818bb9d933805a916eebe41eb41648f7f9caad30b4bd59d56f3ca135421916 /workspace/tools/damo_asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/model.pb
Docker/download.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Download moda ASR related models
2
+ from modelscope import snapshot_download
3
+ model_dir = snapshot_download('damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch',revision="v2.0.4")
4
+ model_dir = snapshot_download('damo/speech_fsmn_vad_zh-cn-16k-common-pytorch',revision="v2.0.4")
5
+ model_dir = snapshot_download('damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch',revision="v2.0.4")
Docker/download.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ set -Eeuo pipefail
4
+
5
+ echo "Downloading models..."
6
+
7
+ aria2c --disable-ipv6 --input-file /workspace/Docker/links.txt --dir /workspace --continue
8
+
9
+ echo "Checking SHA256..."
10
+
11
+ parallel --will-cite -a /workspace/Docker/links.sha256 "echo -n {} | sha256sum -c"
Docker/links.sha256 ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ b1c1e17e9c99547a89388f72048cd6e1b41b5a18b170e86a46dfde0324d63eb1 /workspace/GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt
2
+ fc579c1db3c1e21b721001cf99d7a584214280df19b002e200b630a34fa06eb8 /workspace/GPT_SoVITS/pretrained_models/s2D488k.pth
3
+ 020a014e1e01e550e510f2f61fae5e5f5b6aab40f15c22f1f12f724df507e835 /workspace/GPT_SoVITS/pretrained_models/s2G488k.pth
4
+ 24164f129c66499d1346e2aa55f183250c223161ec2770c0da3d3b08cf432d3c /workspace/GPT_SoVITS/pretrained_models/chinese-hubert-base/pytorch_model.bin
5
+ e53a693acc59ace251d143d068096ae0d7b79e4b1b503fa84c9dcf576448c1d8 /workspace/GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large/pytorch_model.bin
6
+ 39796caa5db18d7f9382d8ac997ac967bfd85f7761014bb807d2543cc844ef05 /workspace/tools/uvr5/uvr5_weights/HP2_all_vocals.pth
7
+ 45e6b65199e781b4a6542002699be9f19cd3d1cb7d1558bc2bfbcd84674dfe28 /workspace/tools/uvr5/uvr5_weights/HP3_all_vocals.pth
8
+ 5908891829634926119720241e8573d97cbeb8277110a7512bdb0bd7563258ee /workspace/tools/uvr5/uvr5_weights/HP5_only_main_vocal.pth
9
+ 8c8fd1582f9aabc363e47af62ddb88df6cae7e064cae75bbf041a067a5e0aee2 /workspace/tools/uvr5/uvr5_weights/VR-DeEchoAggressive.pth
10
+ 01376dd2a571bf3cb9cced680732726d2d732609d09216a610b0d110f133febe /workspace/tools/uvr5/uvr5_weights/VR-DeEchoDeReverb.pth
11
+ 56aba59db3bcdd14a14464e62f3129698ecdea62eee0f003b9360923eb3ac79e /workspace/tools/uvr5/uvr5_weights/VR-DeEchoNormal.pth
12
+ 233bb5c6aaa365e568659a0a81211746fa881f8f47f82d9e864fce1f7692db80 /workspace/tools/uvr5/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx
Docker/links.txt ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GPT-SoVITS models
2
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/s1bert25hz-2kh-longer-epoch%3D68e-step%3D50232.ckpt
3
+ out=GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt
4
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/s2D488k.pth
5
+ out=GPT_SoVITS/pretrained_models/s2D488k.pth
6
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/s2G488k.pth
7
+ out=GPT_SoVITS/pretrained_models/s2G488k.pth
8
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-hubert-base/config.json
9
+ out=GPT_SoVITS/pretrained_models/chinese-hubert-base/config.json
10
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-hubert-base/preprocessor_config.json
11
+ out=GPT_SoVITS/pretrained_models/chinese-hubert-base/preprocessor_config.json
12
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-hubert-base/pytorch_model.bin
13
+ out=GPT_SoVITS/pretrained_models/chinese-hubert-base/pytorch_model.bin
14
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-roberta-wwm-ext-large/config.json
15
+ out=GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large/config.json
16
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-roberta-wwm-ext-large/pytorch_model.bin
17
+ out=GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large/pytorch_model.bin
18
+ https://huggingface.co/lj1995/GPT-SoVITS/resolve/main/chinese-roberta-wwm-ext-large/tokenizer.json
19
+ out=GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large/tokenizer.json
20
+ # UVR5
21
+ https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2_all_vocals.pth
22
+ out=tools/uvr5/uvr5_weights/HP2_all_vocals.pth
23
+ https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP3_all_vocals.pth
24
+ out=tools/uvr5/uvr5_weights/HP3_all_vocals.pth
25
+ https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5_only_main_vocal.pth
26
+ out=tools/uvr5/uvr5_weights/HP5_only_main_vocal.pth
27
+ https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoAggressive.pth
28
+ out=tools/uvr5/uvr5_weights/VR-DeEchoAggressive.pth
29
+ https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoDeReverb.pth
30
+ out=tools/uvr5/uvr5_weights/VR-DeEchoDeReverb.pth
31
+ https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/VR-DeEchoNormal.pth
32
+ out=tools/uvr5/uvr5_weights/VR-DeEchoNormal.pth
33
+ https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx
34
+ out=tools/uvr5/uvr5_weights/onnx_dereverb_By_FoxJoy/vocals.onnx
Dockerfile ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Base CUDA image
2
+ FROM cnstark/pytorch:2.0.1-py3.9.17-cuda11.8.0-ubuntu20.04
3
+
4
+ LABEL maintainer="breakstring@hotmail.com"
5
+ LABEL version="dev-20240209"
6
+ LABEL description="Docker image for GPT-SoVITS"
7
+
8
+
9
+ # Install 3rd party apps
10
+ ENV DEBIAN_FRONTEND=noninteractive
11
+ ENV TZ=Etc/UTC
12
+ RUN apt-get update && \
13
+ apt-get install -y --no-install-recommends tzdata ffmpeg libsox-dev parallel aria2 git git-lfs && \
14
+ git lfs install && \
15
+ rm -rf /var/lib/apt/lists/*
16
+
17
+ # Copy only requirements.txt initially to leverage Docker cache
18
+ WORKDIR /workspace
19
+ COPY requirements.txt /workspace/
20
+ RUN pip install --no-cache-dir -r requirements.txt
21
+
22
+ # Define a build-time argument for image type
23
+ ARG IMAGE_TYPE=full
24
+
25
+ # Conditional logic based on the IMAGE_TYPE argument
26
+ # Always copy the Docker directory, but only use it if IMAGE_TYPE is not "elite"
27
+ COPY ./Docker /workspace/Docker
28
+ # elite 类型的镜像里面不包含额外的模型
29
+ RUN if [ "$IMAGE_TYPE" != "elite" ]; then \
30
+ chmod +x /workspace/Docker/download.sh && \
31
+ /workspace/Docker/download.sh && \
32
+ python /workspace/Docker/download.py && \
33
+ python -m nltk.downloader averaged_perceptron_tagger cmudict; \
34
+ fi
35
+
36
+
37
+ # Copy the rest of the application
38
+ COPY . /workspace
39
+
40
+ # Copy the rest of the application
41
+ COPY . /workspace
42
+
43
+ EXPOSE 9871 9872 9873 9874 9880
44
+
45
+ CMD ["python", "webui.py"]
GPT_SoVITS/AR/__init__.py ADDED
File without changes
GPT_SoVITS/AR/data/__init__.py ADDED
File without changes
GPT_SoVITS/AR/data/bucket_sampler.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/bucket_sampler.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import itertools
4
+ import math
5
+ import random
6
+ from random import shuffle
7
+ from typing import Iterator
8
+ from typing import Optional
9
+ from typing import TypeVar
10
+
11
+ import torch
12
+ import torch.distributed as dist
13
+ from torch.utils.data import Dataset
14
+ from torch.utils.data import Sampler
15
+
16
+ __all__ = [
17
+ "DistributedBucketSampler",
18
+ ]
19
+
20
+ T_co = TypeVar("T_co", covariant=True)
21
+
22
+
23
+ class DistributedBucketSampler(Sampler[T_co]):
24
+ r"""
25
+ sort the dataset wrt. input length
26
+ divide samples into buckets
27
+ sort within buckets
28
+ divide buckets into batches
29
+ sort batches
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ dataset: Dataset,
35
+ num_replicas: Optional[int] = None,
36
+ rank: Optional[int] = None,
37
+ shuffle: bool = True,
38
+ seed: int = 0,
39
+ drop_last: bool = False,
40
+ batch_size: int = 32,
41
+ ) -> None:
42
+ if num_replicas is None:
43
+ if not dist.is_available():
44
+ raise RuntimeError("Requires distributed package to be available")
45
+ num_replicas = dist.get_world_size() if torch.cuda.is_available() else 1
46
+ if rank is None:
47
+ if not dist.is_available():
48
+ raise RuntimeError("Requires distributed package to be available")
49
+ rank = dist.get_rank() if torch.cuda.is_available() else 0
50
+ if torch.cuda.is_available():
51
+ torch.cuda.set_device(rank)
52
+ if rank >= num_replicas or rank < 0:
53
+ raise ValueError(
54
+ "Invalid rank {}, rank should be in the interval"
55
+ " [0, {}]".format(rank, num_replicas - 1)
56
+ )
57
+ self.dataset = dataset
58
+ self.num_replicas = num_replicas
59
+ self.rank = rank
60
+ self.epoch = 0
61
+ self.drop_last = drop_last
62
+ # If the dataset length is evenly divisible by # of replicas, then there
63
+ # is no need to drop any data, since the dataset will be split equally.
64
+ if (
65
+ self.drop_last and len(self.dataset) % self.num_replicas != 0
66
+ ): # type: ignore[arg-type]
67
+ # Split to nearest available length that is evenly divisible.
68
+ # This is to ensure each rank receives the same amount of data when
69
+ # using this Sampler.
70
+ self.num_samples = math.ceil(
71
+ (len(self.dataset) - self.num_replicas)
72
+ / self.num_replicas # type: ignore[arg-type]
73
+ )
74
+ else:
75
+ self.num_samples = math.ceil(
76
+ len(self.dataset) / self.num_replicas
77
+ ) # type: ignore[arg-type]
78
+ self.total_size = self.num_samples * self.num_replicas
79
+ self.shuffle = shuffle
80
+ self.seed = seed
81
+ self.batch_size = batch_size
82
+ self.id_with_length = self._get_sample_lengths()
83
+ self.id_buckets = self.make_buckets(bucket_width=2.0)
84
+
85
+ def _get_sample_lengths(self):
86
+ id_with_lengths = []
87
+ for i in range(len(self.dataset)):
88
+ id_with_lengths.append((i, self.dataset.get_sample_length(i)))
89
+ id_with_lengths.sort(key=lambda x: x[1])
90
+ return id_with_lengths
91
+
92
+ def make_buckets(self, bucket_width: float = 2.0):
93
+ buckets = []
94
+ cur = []
95
+ max_sec = bucket_width
96
+ for id, sec in self.id_with_length:
97
+ if sec < max_sec:
98
+ cur.append(id)
99
+ else:
100
+ buckets.append(cur)
101
+ cur = [id]
102
+ max_sec += bucket_width
103
+ if len(cur) > 0:
104
+ buckets.append(cur)
105
+ return buckets
106
+
107
+ def __iter__(self) -> Iterator[T_co]:
108
+ if self.shuffle:
109
+ # deterministically shuffle based on epoch and seed
110
+ g = torch.Generator()
111
+ g.manual_seed(self.seed + self.epoch)
112
+ random.seed(self.epoch + self.seed)
113
+ shuffled_bucket = []
114
+ for buc in self.id_buckets:
115
+ buc_copy = buc.copy()
116
+ shuffle(buc_copy)
117
+ shuffled_bucket.append(buc_copy)
118
+ grouped_batch_size = self.batch_size * self.num_replicas
119
+ shuffled_bucket = list(itertools.chain(*shuffled_bucket))
120
+ n_batch = int(math.ceil(len(shuffled_bucket) / grouped_batch_size))
121
+ batches = [
122
+ shuffled_bucket[b * grouped_batch_size : (b + 1) * grouped_batch_size]
123
+ for b in range(n_batch)
124
+ ]
125
+ shuffle(batches)
126
+ indices = list(itertools.chain(*batches))
127
+ else:
128
+ # type: ignore[arg-type]
129
+ indices = list(range(len(self.dataset)))
130
+
131
+ if not self.drop_last:
132
+ # add extra samples to make it evenly divisible
133
+ padding_size = self.total_size - len(indices)
134
+ if padding_size <= len(indices):
135
+ indices += indices[:padding_size]
136
+ else:
137
+ indices += (indices * math.ceil(padding_size / len(indices)))[
138
+ :padding_size
139
+ ]
140
+ else:
141
+ # remove tail of data to make it evenly divisible.
142
+ indices = indices[: self.total_size]
143
+ assert len(indices) == self.total_size
144
+
145
+ # subsample
146
+ indices = indices[self.rank : self.total_size : self.num_replicas]
147
+ assert len(indices) == self.num_samples
148
+
149
+ return iter(indices)
150
+
151
+ def __len__(self) -> int:
152
+ return self.num_samples
153
+
154
+ def set_epoch(self, epoch: int) -> None:
155
+ r"""
156
+ Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas
157
+ use a different random ordering for each epoch. Otherwise, the next iteration of this
158
+ sampler will yield the same ordering.
159
+
160
+ Args:
161
+ epoch (int): Epoch number.
162
+ """
163
+ self.epoch = epoch
GPT_SoVITS/AR/data/data_module.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/data_module.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ from pytorch_lightning import LightningDataModule
4
+ from AR.data.bucket_sampler import DistributedBucketSampler
5
+ from AR.data.dataset import Text2SemanticDataset
6
+ from torch.utils.data import DataLoader
7
+
8
+
9
+ class Text2SemanticDataModule(LightningDataModule):
10
+ def __init__(
11
+ self,
12
+ config,
13
+ train_semantic_path,
14
+ train_phoneme_path,
15
+ dev_semantic_path=None,
16
+ dev_phoneme_path=None,
17
+ ):
18
+ super().__init__()
19
+ self.config = config
20
+ self.train_semantic_path = train_semantic_path
21
+ self.train_phoneme_path = train_phoneme_path
22
+ self.dev_semantic_path = dev_semantic_path
23
+ self.dev_phoneme_path = dev_phoneme_path
24
+ self.num_workers = self.config["data"]["num_workers"]
25
+
26
+ def prepare_data(self):
27
+ pass
28
+
29
+ def setup(self, stage=None, output_logs=False):
30
+ self._train_dataset = Text2SemanticDataset(
31
+ phoneme_path=self.train_phoneme_path,
32
+ semantic_path=self.train_semantic_path,
33
+ max_sec=self.config["data"]["max_sec"],
34
+ pad_val=self.config["data"]["pad_val"],
35
+ )
36
+ self._dev_dataset = self._train_dataset
37
+ # self._dev_dataset = Text2SemanticDataset(
38
+ # phoneme_path=self.dev_phoneme_path,
39
+ # semantic_path=self.dev_semantic_path,
40
+ # max_sample=self.config['data']['max_eval_sample'],
41
+ # max_sec=self.config['data']['max_sec'],
42
+ # pad_val=self.config['data']['pad_val'])
43
+
44
+ def train_dataloader(self):
45
+ batch_size=self.config["train"]["batch_size"]//2 if self.config["train"].get("if_dpo",False)==True else self.config["train"]["batch_size"]
46
+ batch_size = max(min(batch_size,len(self._train_dataset)//4),1)#防止不保存
47
+ sampler = DistributedBucketSampler(self._train_dataset, batch_size=batch_size)
48
+ return DataLoader(
49
+ self._train_dataset,
50
+ batch_size=batch_size,
51
+ sampler=sampler,
52
+ collate_fn=self._train_dataset.collate,
53
+ num_workers=self.num_workers,
54
+ persistent_workers=True,
55
+ prefetch_factor=16,
56
+ )
57
+
58
+ def val_dataloader(self):
59
+ return DataLoader(
60
+ self._dev_dataset,
61
+ batch_size=1,
62
+ shuffle=False,
63
+ collate_fn=self._train_dataset.collate,
64
+ num_workers=max(self.num_workers, 12),
65
+ persistent_workers=True,
66
+ prefetch_factor=16,
67
+ )
68
+
69
+ # 这个会使用到嘛?
70
+ def test_dataloader(self):
71
+ return DataLoader(
72
+ self._dev_dataset,
73
+ batch_size=1,
74
+ shuffle=False,
75
+ collate_fn=self._train_dataset.collate,
76
+ )
GPT_SoVITS/AR/data/dataset.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/data/dataset.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import pdb
4
+ import sys
5
+
6
+ # sys.path.append("/data/docker/liujing04/gpt-vits/mq-vits-s1bert_no_bert")
7
+ import traceback, os
8
+ from typing import Dict
9
+ from typing import List
10
+
11
+ import numpy as np
12
+ import pandas as pd
13
+ import torch, json
14
+ from torch.utils.data import DataLoader
15
+ from torch.utils.data import Dataset
16
+ from transformers import AutoTokenizer
17
+
18
+ from text import cleaned_text_to_sequence
19
+
20
+ # from config import exp_dir
21
+
22
+
23
+ def batch_sequences(sequences: List[np.array], axis: int = 0, pad_value: int = 0):
24
+ seq = sequences[0]
25
+ ndim = seq.ndim
26
+ if axis < 0:
27
+ axis += ndim
28
+ dtype = seq.dtype
29
+ pad_value = dtype.type(pad_value)
30
+ seq_lengths = [seq.shape[axis] for seq in sequences]
31
+ max_length = np.max(seq_lengths)
32
+
33
+ padded_sequences = []
34
+ for seq, length in zip(sequences, seq_lengths):
35
+ padding = (
36
+ [(0, 0)] * axis + [(0, max_length - length)] + [(0, 0)] * (ndim - axis - 1)
37
+ )
38
+ padded_seq = np.pad(seq, padding, mode="constant", constant_values=pad_value)
39
+ padded_sequences.append(padded_seq)
40
+ batch = np.stack(padded_sequences)
41
+ return batch
42
+
43
+
44
+ class Text2SemanticDataset(Dataset):
45
+ """dataset class for text tokens to semantic model training."""
46
+
47
+ def __init__(
48
+ self,
49
+ phoneme_path: str,
50
+ semantic_path: str,
51
+ max_sample: int = None,
52
+ max_sec: int = 100,
53
+ pad_val: int = 1024,
54
+ # min value of phoneme/sec
55
+ min_ps_ratio: int = 3,
56
+ # max value of phoneme/sec
57
+ max_ps_ratio: int = 25,
58
+ ) -> None:
59
+ super().__init__()
60
+
61
+ self.semantic_data = pd.read_csv(
62
+ semantic_path, delimiter="\t", encoding="utf-8"
63
+ )
64
+ # get dict
65
+ self.path2 = phoneme_path # "%s/2-name2text.txt"%exp_dir#phoneme_path
66
+ self.path3 = "%s/3-bert" % (
67
+ os.path.basename(phoneme_path)
68
+ ) # "%s/3-bert"%exp_dir#bert_dir
69
+ self.path6 = semantic_path # "%s/6-name2semantic.tsv"%exp_dir#semantic_path
70
+ assert os.path.exists(self.path2)
71
+ assert os.path.exists(self.path6)
72
+ self.phoneme_data = {}
73
+ with open(self.path2, "r", encoding="utf8") as f:
74
+ lines = f.read().strip("\n").split("\n")
75
+
76
+ for line in lines:
77
+ tmp = line.split("\t")
78
+ if len(tmp) != 4:
79
+ continue
80
+ self.phoneme_data[tmp[0]] = [tmp[1], tmp[2], tmp[3]]
81
+
82
+ # self.phoneme_data = np.load(phoneme_path, allow_pickle=True).item()
83
+ # pad for semantic tokens
84
+ self.PAD: int = pad_val
85
+ # self.hz = 25
86
+ # with open("/data/docker/liujing04/gpt-vits/mq-vits-s1bert_no_bert/configs/s2.json", "r") as f:data = f.read()
87
+ # data=json.loads(data)["model"]["semantic_frame_rate"]#50hz
88
+ # self.hz=int(data[:-2])#
89
+ self.hz = int(os.environ.get("hz", "25hz")[:-2])
90
+
91
+ # max seconds of semantic token
92
+ self.max_sec = max_sec
93
+ self.min_ps_ratio = min_ps_ratio
94
+ self.max_ps_ratio = max_ps_ratio
95
+
96
+ if max_sample is not None:
97
+ self.semantic_data = self.semantic_data[:max_sample]
98
+
99
+ # {idx: (semantic, phoneme)}
100
+ # semantic list, phoneme list
101
+ self.semantic_phoneme = []
102
+ self.item_names = []
103
+
104
+ self.inited = False
105
+
106
+ if not self.inited:
107
+ # 调用初始化函数
108
+ self.init_batch()
109
+ self.inited = True
110
+ del self.semantic_data
111
+ del self.phoneme_data
112
+ # self.tokenizer = AutoTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext-large")
113
+ # self.tokenizer = AutoTokenizer.from_pretrained("/data/docker/liujing04/bert-vits2/Bert-VITS2-master20231106/bert/chinese-roberta-wwm-ext-large")
114
+
115
+ def init_batch(self):
116
+ semantic_data_len = len(self.semantic_data)
117
+ phoneme_data_len = len(self.phoneme_data.keys())
118
+ print("semantic_data_len:", semantic_data_len)
119
+ print("phoneme_data_len:", phoneme_data_len)
120
+ print(self.semantic_data)
121
+ idx = 0
122
+ num_not_in = 0
123
+ num_deleted_bigger = 0
124
+ num_deleted_ps = 0
125
+ for i in range(semantic_data_len):
126
+ # 先依次遍历
127
+ # get str
128
+ item_name = self.semantic_data.iloc[i,0]
129
+ # print(self.phoneme_data)
130
+ try:
131
+ phoneme, word2ph, text = self.phoneme_data[item_name]
132
+ except Exception:
133
+ traceback.print_exc()
134
+ # print(f"{item_name} not in self.phoneme_data !")
135
+ num_not_in += 1
136
+ continue
137
+
138
+ semantic_str = self.semantic_data.iloc[i,1]
139
+ # get token list
140
+ semantic_ids = [int(idx) for idx in semantic_str.split(" ")]
141
+ # (T), 是否需要变成 (1, T) -> 不需要,因为需要求 len
142
+ # 过滤掉太长的样本
143
+ if (
144
+ len(semantic_ids) > self.max_sec * self.hz
145
+ ): #########1###根据token个数推测总时长过滤时长60s(config里)#40*25=1k
146
+ num_deleted_bigger += 1
147
+ continue
148
+ # (T, ), 这个速度不会很慢,所以可以在一开始就处理,无需在 __getitem__ 里面单个处理####
149
+ phoneme = phoneme.split(" ")
150
+
151
+ try:
152
+ phoneme_ids = cleaned_text_to_sequence(phoneme)
153
+ except:
154
+ traceback.print_exc()
155
+ # print(f"{item_name} not in self.phoneme_data !")
156
+ num_not_in += 1
157
+ continue
158
+ # if len(phoneme_ids) >400:###########2:改为恒定限制为semantic/2.5就行
159
+ if (
160
+ len(phoneme_ids) > self.max_sec * self.hz / 2.5
161
+ ): ###########2:改为恒定限制为semantic/2.5就行
162
+ num_deleted_ps += 1
163
+ continue
164
+ # if len(semantic_ids) > 1000:###########3
165
+ # num_deleted_bigger += 1
166
+ # continue
167
+
168
+ ps_ratio = len(phoneme_ids) / (len(semantic_ids) / self.hz)
169
+
170
+ if (
171
+ ps_ratio > self.max_ps_ratio or ps_ratio < self.min_ps_ratio
172
+ ): ##########4#3~25#每秒多少个phone
173
+ num_deleted_ps += 1
174
+ # print(item_name)
175
+ continue
176
+
177
+ self.semantic_phoneme.append((semantic_ids, phoneme_ids))
178
+ idx += 1
179
+ self.item_names.append(item_name)
180
+
181
+ min_num = 100 # 20直接不补#30补了也不存ckpt
182
+ leng = len(self.semantic_phoneme)
183
+ if leng < min_num:
184
+ tmp1 = self.semantic_phoneme
185
+ tmp2 = self.item_names
186
+ self.semantic_phoneme = []
187
+ self.item_names = []
188
+ for _ in range(max(2, int(min_num / leng))):
189
+ self.semantic_phoneme += tmp1
190
+ self.item_names += tmp2
191
+ if num_not_in > 0:
192
+ print(f"there are {num_not_in} semantic datas not in phoneme datas")
193
+ if num_deleted_bigger > 0:
194
+ print(
195
+ f"deleted {num_deleted_bigger} audios who's duration are bigger than {self.max_sec} seconds"
196
+ )
197
+ if num_deleted_ps > 0:
198
+ # 4702 for LibriTTS, LirbriTTS 是标注数据, 是否需要筛?=> 需要,有值为 100 的极端值
199
+ print(
200
+ f"deleted {num_deleted_ps} audios who's phoneme/sec are bigger than {self.max_ps_ratio} or smaller than {self.min_ps_ratio}"
201
+ )
202
+ """
203
+ there are 31 semantic datas not in phoneme datas
204
+ deleted 34 audios who's duration are bigger than 54 seconds
205
+ deleted 3190 audios who's phoneme/sec are bigger than 25 or smaller than 3
206
+ dataset.__len__(): 366463
207
+
208
+ """
209
+ # 345410 for LibriTTS
210
+ print("dataset.__len__():", self.__len__())
211
+
212
+ def __get_item_names__(self) -> List[str]:
213
+ return self.item_names
214
+
215
+ def __len__(self) -> int:
216
+ return len(self.semantic_phoneme)
217
+
218
+ def __getitem__(self, idx: int) -> Dict:
219
+ semantic_ids, phoneme_ids = self.semantic_phoneme[idx]
220
+ item_name = self.item_names[idx]
221
+ phoneme_ids_len = len(phoneme_ids)
222
+ # semantic tokens target
223
+ semantic_ids_len = len(semantic_ids)
224
+
225
+ flag = 0
226
+ path_bert = "%s/%s.pt" % (self.path3, item_name)
227
+ if os.path.exists(path_bert) == True:
228
+ bert_feature = torch.load(path_bert, map_location="cpu")
229
+ else:
230
+ flag = 1
231
+ if flag == 1:
232
+ # bert_feature=torch.zeros_like(phoneme_ids,dtype=torch.float32)
233
+ bert_feature = None
234
+ else:
235
+ assert bert_feature.shape[-1] == len(phoneme_ids)
236
+ return {
237
+ "idx": idx,
238
+ "phoneme_ids": phoneme_ids,
239
+ "phoneme_ids_len": phoneme_ids_len,
240
+ "semantic_ids": semantic_ids,
241
+ "semantic_ids_len": semantic_ids_len,
242
+ "bert_feature": bert_feature,
243
+ }
244
+
245
+ def get_sample_length(self, idx: int):
246
+ semantic_ids = self.semantic_phoneme[idx][0]
247
+ sec = 1.0 * len(semantic_ids) / self.hz
248
+ return sec
249
+
250
+ def collate(self, examples: List[Dict]) -> Dict:
251
+ sample_index: List[int] = []
252
+ phoneme_ids: List[torch.Tensor] = []
253
+ phoneme_ids_lens: List[int] = []
254
+ semantic_ids: List[torch.Tensor] = []
255
+ semantic_ids_lens: List[int] = []
256
+ # return
257
+
258
+ for item in examples:
259
+ sample_index.append(item["idx"])
260
+ phoneme_ids.append(np.array(item["phoneme_ids"], dtype=np.int64))
261
+ semantic_ids.append(np.array(item["semantic_ids"], dtype=np.int64))
262
+ phoneme_ids_lens.append(item["phoneme_ids_len"])
263
+ semantic_ids_lens.append(item["semantic_ids_len"])
264
+
265
+ # pad 0
266
+ phoneme_ids = batch_sequences(phoneme_ids)
267
+ semantic_ids = batch_sequences(semantic_ids, pad_value=self.PAD)
268
+
269
+ # # convert each batch to torch.tensor
270
+ phoneme_ids = torch.tensor(phoneme_ids)
271
+ semantic_ids = torch.tensor(semantic_ids)
272
+ phoneme_ids_lens = torch.tensor(phoneme_ids_lens)
273
+ semantic_ids_lens = torch.tensor(semantic_ids_lens)
274
+ bert_padded = torch.FloatTensor(len(examples), 1024, max(phoneme_ids_lens))
275
+ bert_padded.zero_()
276
+
277
+ for idx, item in enumerate(examples):
278
+ bert = item["bert_feature"]
279
+ if bert != None:
280
+ bert_padded[idx, :, : bert.shape[-1]] = bert
281
+
282
+ return {
283
+ # List[int]
284
+ "ids": sample_index,
285
+ # torch.Tensor (B, max_phoneme_length)
286
+ "phoneme_ids": phoneme_ids,
287
+ # torch.Tensor (B)
288
+ "phoneme_ids_len": phoneme_ids_lens,
289
+ # torch.Tensor (B, max_semantic_ids_length)
290
+ "semantic_ids": semantic_ids,
291
+ # torch.Tensor (B)
292
+ "semantic_ids_len": semantic_ids_lens,
293
+ # torch.Tensor (B, 1024, max_phoneme_length)
294
+ "bert_feature": bert_padded,
295
+ }
296
+
297
+
298
+ if __name__ == "__main__":
299
+ root_dir = "/data/docker/liujing04/gpt-vits/prepare/dump_mix/"
300
+ dataset = Text2SemanticDataset(
301
+ phoneme_path=root_dir + "phoneme_train.npy",
302
+ semantic_path=root_dir + "semantic_train.tsv",
303
+ )
304
+
305
+ batch_size = 12
306
+ dataloader = DataLoader(
307
+ dataset, batch_size=batch_size, collate_fn=dataset.collate, shuffle=False
308
+ )
309
+ for i, batch in enumerate(dataloader):
310
+ if i % 1000 == 0:
311
+ print(i)
312
+ # if i == 0:
313
+ # print('batch["ids"]:', batch["ids"])
314
+ # print('batch["phoneme_ids"]:', batch["phoneme_ids"],
315
+ # batch["phoneme_ids"].shape)
316
+ # print('batch["phoneme_ids_len"]:', batch["phoneme_ids_len"],
317
+ # batch["phoneme_ids_len"].shape)
318
+ # print('batch["semantic_ids"]:', batch["semantic_ids"],
319
+ # batch["semantic_ids"].shape)
320
+ # print('batch["semantic_ids_len"]:', batch["semantic_ids_len"],
321
+ # batch["semantic_ids_len"].shape)
GPT_SoVITS/AR/models/__init__.py ADDED
File without changes
GPT_SoVITS/AR/models/t2s_lightning_module.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_lightning_module.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import os, sys
4
+
5
+ now_dir = os.getcwd()
6
+ sys.path.append(now_dir)
7
+ from typing import Dict
8
+
9
+ import torch
10
+ from pytorch_lightning import LightningModule
11
+ from AR.models.t2s_model import Text2SemanticDecoder
12
+ from AR.modules.lr_schedulers import WarmupCosineLRSchedule
13
+ from AR.modules.optim import ScaledAdam
14
+
15
+ class Text2SemanticLightningModule(LightningModule):
16
+ def __init__(self, config, output_dir, is_train=True):
17
+ super().__init__()
18
+ self.config = config
19
+ self.top_k = 3
20
+ self.model = Text2SemanticDecoder(config=config, top_k=self.top_k)
21
+ pretrained_s1 = config.get("pretrained_s1")
22
+ if pretrained_s1 and is_train:
23
+ # print(self.load_state_dict(torch.load(pretrained_s1,map_location="cpu")["state_dict"]))
24
+ print(
25
+ self.load_state_dict(
26
+ torch.load(pretrained_s1, map_location="cpu")["weight"]
27
+ )
28
+ )
29
+ if is_train:
30
+ self.automatic_optimization = False
31
+ self.save_hyperparameters()
32
+ self.eval_dir = output_dir / "eval"
33
+ self.eval_dir.mkdir(parents=True, exist_ok=True)
34
+
35
+ def training_step(self, batch: Dict, batch_idx: int):
36
+ opt = self.optimizers()
37
+ scheduler = self.lr_schedulers()
38
+ forward=self.model.forward if self.config["train"].get("if_dpo",False)==True else self.model.forward_old
39
+ loss, acc = forward(
40
+ batch["phoneme_ids"],
41
+ batch["phoneme_ids_len"],
42
+ batch["semantic_ids"],
43
+ batch["semantic_ids_len"],
44
+ batch["bert_feature"],
45
+ )
46
+ self.manual_backward(loss)
47
+ if batch_idx > 0 and batch_idx % 4 == 0:
48
+ opt.step()
49
+ opt.zero_grad()
50
+ scheduler.step()
51
+
52
+ self.log(
53
+ "total_loss",
54
+ loss,
55
+ on_step=True,
56
+ on_epoch=True,
57
+ prog_bar=True,
58
+ sync_dist=True,
59
+ )
60
+ self.log(
61
+ "lr",
62
+ scheduler.get_last_lr()[0],
63
+ on_epoch=True,
64
+ prog_bar=True,
65
+ sync_dist=True,
66
+ )
67
+ self.log(
68
+ f"top_{self.top_k}_acc",
69
+ acc,
70
+ on_step=True,
71
+ on_epoch=True,
72
+ prog_bar=True,
73
+ sync_dist=True,
74
+ )
75
+
76
+ def validation_step(self, batch: Dict, batch_idx: int):
77
+ return
78
+
79
+ # # get loss
80
+ # loss, acc = self.model.forward(
81
+ # batch['phoneme_ids'], batch['phoneme_ids_len'],
82
+ # batch['semantic_ids'], batch['semantic_ids_len'],
83
+ # batch['bert_feature']
84
+ # )
85
+ #
86
+ # self.log(
87
+ # "val_total_loss",
88
+ # loss,
89
+ # on_step=True,
90
+ # on_epoch=True,
91
+ # prog_bar=True,
92
+ # sync_dist=True)
93
+ # self.log(
94
+ # f"val_top_{self.top_k}_acc",
95
+ # acc,
96
+ # on_step=True,
97
+ # on_epoch=True,
98
+ # prog_bar=True,
99
+ # sync_dist=True)
100
+ #
101
+ # # get infer output
102
+ # semantic_len = batch['semantic_ids'].size(1)
103
+ # prompt_len = min(int(semantic_len * 0.5), 150)
104
+ # prompt = batch['semantic_ids'][:, :prompt_len]
105
+ # pred_semantic = self.model.infer(batch['phoneme_ids'],
106
+ # batch['phoneme_ids_len'], prompt,
107
+ # batch['bert_feature']
108
+ # )
109
+ # save_name = f'semantic_toks_{batch_idx}.pt'
110
+ # save_path = os.path.join(self.eval_dir, save_name)
111
+ # torch.save(pred_semantic.detach().cpu(), save_path)
112
+
113
+ def configure_optimizers(self):
114
+ model_parameters = self.model.parameters()
115
+ parameters_names = []
116
+ parameters_names.append(
117
+ [name_param_pair[0] for name_param_pair in self.model.named_parameters()]
118
+ )
119
+ lm_opt = ScaledAdam(
120
+ model_parameters,
121
+ lr=0.01,
122
+ betas=(0.9, 0.95),
123
+ clipping_scale=2.0,
124
+ parameters_names=parameters_names,
125
+ show_dominant_parameters=False,
126
+ clipping_update_period=1000,
127
+ )
128
+
129
+ return {
130
+ "optimizer": lm_opt,
131
+ "lr_scheduler": {
132
+ "scheduler": WarmupCosineLRSchedule(
133
+ lm_opt,
134
+ init_lr=self.config["optimizer"]["lr_init"],
135
+ peak_lr=self.config["optimizer"]["lr"],
136
+ end_lr=self.config["optimizer"]["lr_end"],
137
+ warmup_steps=self.config["optimizer"]["warmup_steps"],
138
+ total_steps=self.config["optimizer"]["decay_steps"],
139
+ )
140
+ },
141
+ }
GPT_SoVITS/AR/models/t2s_lightning_module_onnx.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_lightning_module.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import os, sys
4
+
5
+ now_dir = os.getcwd()
6
+ sys.path.append(now_dir)
7
+ from typing import Dict
8
+
9
+ import torch
10
+ from pytorch_lightning import LightningModule
11
+ from AR.models.t2s_model_onnx import Text2SemanticDecoder
12
+ from AR.modules.lr_schedulers import WarmupCosineLRSchedule
13
+ from AR.modules.optim import ScaledAdam
14
+
15
+
16
+ class Text2SemanticLightningModule(LightningModule):
17
+ def __init__(self, config, output_dir, is_train=True):
18
+ super().__init__()
19
+ self.config = config
20
+ self.top_k = 3
21
+ self.model = Text2SemanticDecoder(config=config, top_k=self.top_k)
22
+ pretrained_s1 = config.get("pretrained_s1")
23
+ if pretrained_s1 and is_train:
24
+ # print(self.load_state_dict(torch.load(pretrained_s1,map_location="cpu")["state_dict"]))
25
+ print(
26
+ self.load_state_dict(
27
+ torch.load(pretrained_s1, map_location="cpu")["weight"]
28
+ )
29
+ )
30
+ if is_train:
31
+ self.automatic_optimization = False
32
+ self.save_hyperparameters()
33
+ self.eval_dir = output_dir / "eval"
34
+ self.eval_dir.mkdir(parents=True, exist_ok=True)
35
+
36
+ def training_step(self, batch: Dict, batch_idx: int):
37
+ opt = self.optimizers()
38
+ scheduler = self.lr_schedulers()
39
+ loss, acc = self.model.forward(
40
+ batch["phoneme_ids"],
41
+ batch["phoneme_ids_len"],
42
+ batch["semantic_ids"],
43
+ batch["semantic_ids_len"],
44
+ batch["bert_feature"],
45
+ )
46
+ self.manual_backward(loss)
47
+ if batch_idx > 0 and batch_idx % 4 == 0:
48
+ opt.step()
49
+ opt.zero_grad()
50
+ scheduler.step()
51
+
52
+ self.log(
53
+ "total_loss",
54
+ loss,
55
+ on_step=True,
56
+ on_epoch=True,
57
+ prog_bar=True,
58
+ sync_dist=True,
59
+ )
60
+ self.log(
61
+ "lr",
62
+ scheduler.get_last_lr()[0],
63
+ on_epoch=True,
64
+ prog_bar=True,
65
+ sync_dist=True,
66
+ )
67
+ self.log(
68
+ f"top_{self.top_k}_acc",
69
+ acc,
70
+ on_step=True,
71
+ on_epoch=True,
72
+ prog_bar=True,
73
+ sync_dist=True,
74
+ )
75
+
76
+ def validation_step(self, batch: Dict, batch_idx: int):
77
+ return
78
+
79
+ def configure_optimizers(self):
80
+ model_parameters = self.model.parameters()
81
+ parameters_names = []
82
+ parameters_names.append(
83
+ [name_param_pair[0] for name_param_pair in self.model.named_parameters()]
84
+ )
85
+ lm_opt = ScaledAdam(
86
+ model_parameters,
87
+ lr=0.01,
88
+ betas=(0.9, 0.95),
89
+ clipping_scale=2.0,
90
+ parameters_names=parameters_names,
91
+ show_dominant_parameters=False,
92
+ clipping_update_period=1000,
93
+ )
94
+
95
+ return {
96
+ "optimizer": lm_opt,
97
+ "lr_scheduler": {
98
+ "scheduler": WarmupCosineLRSchedule(
99
+ lm_opt,
100
+ init_lr=self.config["optimizer"]["lr_init"],
101
+ peak_lr=self.config["optimizer"]["lr"],
102
+ end_lr=self.config["optimizer"]["lr_end"],
103
+ warmup_steps=self.config["optimizer"]["warmup_steps"],
104
+ total_steps=self.config["optimizer"]["decay_steps"],
105
+ )
106
+ },
107
+ }
GPT_SoVITS/AR/models/t2s_model.py ADDED
@@ -0,0 +1,448 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import torch
4
+ from tqdm import tqdm
5
+
6
+ from AR.models.utils import make_pad_mask
7
+ from AR.models.utils import (
8
+ topk_sampling,
9
+ sample,
10
+ logits_to_probs,
11
+ multinomial_sample_one_no_sync,
12
+ dpo_loss,
13
+ make_reject_y,
14
+ get_batch_logps
15
+ )
16
+ from AR.modules.embedding import SinePositionalEmbedding
17
+ from AR.modules.embedding import TokenEmbedding
18
+ from AR.modules.transformer import LayerNorm
19
+ from AR.modules.transformer import TransformerEncoder
20
+ from AR.modules.transformer import TransformerEncoderLayer
21
+ from torch import nn
22
+ from torch.nn import functional as F
23
+ from torchmetrics.classification import MulticlassAccuracy
24
+
25
+ default_config = {
26
+ "embedding_dim": 512,
27
+ "hidden_dim": 512,
28
+ "num_head": 8,
29
+ "num_layers": 12,
30
+ "num_codebook": 8,
31
+ "p_dropout": 0.0,
32
+ "vocab_size": 1024 + 1,
33
+ "phoneme_vocab_size": 512,
34
+ "EOS": 1024,
35
+ }
36
+
37
+
38
+ class Text2SemanticDecoder(nn.Module):
39
+ def __init__(self, config, norm_first=False, top_k=3):
40
+ super(Text2SemanticDecoder, self).__init__()
41
+ self.model_dim = config["model"]["hidden_dim"]
42
+ self.embedding_dim = config["model"]["embedding_dim"]
43
+ self.num_head = config["model"]["head"]
44
+ self.num_layers = config["model"]["n_layer"]
45
+ self.norm_first = norm_first
46
+ self.vocab_size = config["model"]["vocab_size"]
47
+ self.phoneme_vocab_size = config["model"]["phoneme_vocab_size"]
48
+ self.p_dropout = config["model"]["dropout"]
49
+ self.EOS = config["model"]["EOS"]
50
+ self.norm_first = norm_first
51
+ assert self.EOS == self.vocab_size - 1
52
+ # should be same as num of kmeans bin
53
+ # assert self.EOS == 1024
54
+ self.bert_proj = nn.Linear(1024, self.embedding_dim)
55
+ self.ar_text_embedding = TokenEmbedding(
56
+ self.embedding_dim, self.phoneme_vocab_size, self.p_dropout
57
+ )
58
+ self.ar_text_position = SinePositionalEmbedding(
59
+ self.embedding_dim, dropout=0.1, scale=False, alpha=True
60
+ )
61
+ self.ar_audio_embedding = TokenEmbedding(
62
+ self.embedding_dim, self.vocab_size, self.p_dropout
63
+ )
64
+ self.ar_audio_position = SinePositionalEmbedding(
65
+ self.embedding_dim, dropout=0.1, scale=False, alpha=True
66
+ )
67
+
68
+ self.h = TransformerEncoder(
69
+ TransformerEncoderLayer(
70
+ d_model=self.model_dim,
71
+ nhead=self.num_head,
72
+ dim_feedforward=self.model_dim * 4,
73
+ dropout=0.1,
74
+ batch_first=True,
75
+ norm_first=norm_first,
76
+ ),
77
+ num_layers=self.num_layers,
78
+ norm=LayerNorm(self.model_dim) if norm_first else None,
79
+ )
80
+
81
+ self.ar_predict_layer = nn.Linear(self.model_dim, self.vocab_size, bias=False)
82
+ self.loss_fct = nn.CrossEntropyLoss(reduction="sum")
83
+
84
+ self.ar_accuracy_metric = MulticlassAccuracy(
85
+ self.vocab_size,
86
+ top_k=top_k,
87
+ average="micro",
88
+ multidim_average="global",
89
+ ignore_index=self.EOS,
90
+ )
91
+
92
+ def make_input_data(self, x, x_lens, y, y_lens, bert_feature):
93
+ x = self.ar_text_embedding(x)
94
+ x = x + self.bert_proj(bert_feature.transpose(1, 2))
95
+ x = self.ar_text_position(x)
96
+ x_mask = make_pad_mask(x_lens)
97
+
98
+ y_mask = make_pad_mask(y_lens)
99
+ y_mask_int = y_mask.type(torch.int64)
100
+ codes = y.type(torch.int64) * (1 - y_mask_int)
101
+
102
+ # Training
103
+ # AR Decoder
104
+ y, targets = self.pad_y_eos(codes, y_mask_int, eos_id=self.EOS)
105
+ x_len = x_lens.max()
106
+ y_len = y_lens.max()
107
+ y_emb = self.ar_audio_embedding(y)
108
+ y_pos = self.ar_audio_position(y_emb)
109
+
110
+ xy_padding_mask = torch.concat([x_mask, y_mask], dim=1)
111
+
112
+ ar_xy_padding_mask = xy_padding_mask
113
+
114
+ x_attn_mask = F.pad(
115
+ torch.zeros((x_len, x_len), dtype=torch.bool, device=x.device),
116
+ (0, y_len),
117
+ value=True,
118
+ )
119
+
120
+ y_attn_mask = F.pad(
121
+ torch.triu(
122
+ torch.ones(y_len, y_len, dtype=torch.bool, device=x.device),
123
+ diagonal=1,
124
+ ),
125
+ (x_len, 0),
126
+ value=False,
127
+ )
128
+
129
+ xy_attn_mask = torch.concat([x_attn_mask, y_attn_mask], dim=0)
130
+ bsz, src_len = x.shape[0], x_len + y_len
131
+ _xy_padding_mask = (
132
+ ar_xy_padding_mask.view(bsz, 1, 1, src_len)
133
+ .expand(-1, self.num_head, -1, -1)
134
+ .reshape(bsz * self.num_head, 1, src_len)
135
+ )
136
+ xy_attn_mask = xy_attn_mask.logical_or(_xy_padding_mask)
137
+ new_attn_mask = torch.zeros_like(xy_attn_mask, dtype=x.dtype)
138
+ new_attn_mask.masked_fill_(xy_attn_mask, float("-inf"))
139
+ xy_attn_mask = new_attn_mask
140
+ # x 和完整的 y 一次���输入模型
141
+ xy_pos = torch.concat([x, y_pos], dim=1)
142
+
143
+ return xy_pos, xy_attn_mask, targets
144
+
145
+ def forward(self, x, x_lens, y, y_lens, bert_feature):
146
+ """
147
+ x: phoneme_ids
148
+ y: semantic_ids
149
+ """
150
+
151
+ reject_y, reject_y_lens = make_reject_y(y, y_lens)
152
+
153
+ xy_pos, xy_attn_mask, targets = self.make_input_data(x, x_lens, y, y_lens, bert_feature)
154
+
155
+ xy_dec, _ = self.h(
156
+ (xy_pos, None),
157
+ mask=xy_attn_mask,
158
+ )
159
+ x_len = x_lens.max()
160
+ logits = self.ar_predict_layer(xy_dec[:, x_len:])
161
+
162
+ ###### DPO #############
163
+ reject_xy_pos, reject_xy_attn_mask, reject_targets = self.make_input_data(x, x_lens, reject_y, reject_y_lens, bert_feature)
164
+
165
+ reject_xy_dec, _ = self.h(
166
+ (reject_xy_pos, None),
167
+ mask=reject_xy_attn_mask,
168
+ )
169
+ x_len = x_lens.max()
170
+ reject_logits = self.ar_predict_layer(reject_xy_dec[:, x_len:])
171
+
172
+ # loss
173
+ # from feiteng: 每次 duration 越多, 梯度更新也应该更多, 所以用 sum
174
+
175
+ loss_1 = F.cross_entropy(logits.permute(0, 2, 1), targets, reduction="sum")
176
+ acc = self.ar_accuracy_metric(logits.permute(0, 2, 1).detach(), targets).item()
177
+
178
+ A_logits, R_logits = get_batch_logps(logits, reject_logits, targets, reject_targets)
179
+ loss_2, _, _ = dpo_loss(A_logits, R_logits, 0, 0, 0.2, reference_free=True)
180
+
181
+ loss = loss_1 + loss_2
182
+
183
+ return loss, acc
184
+
185
+ def forward_old(self, x, x_lens, y, y_lens, bert_feature):
186
+ """
187
+ x: phoneme_ids
188
+ y: semantic_ids
189
+ """
190
+ x = self.ar_text_embedding(x)
191
+ x = x + self.bert_proj(bert_feature.transpose(1, 2))
192
+ x = self.ar_text_position(x)
193
+ x_mask = make_pad_mask(x_lens)
194
+
195
+ y_mask = make_pad_mask(y_lens)
196
+ y_mask_int = y_mask.type(torch.int64)
197
+ codes = y.type(torch.int64) * (1 - y_mask_int)
198
+
199
+ # Training
200
+ # AR Decoder
201
+ y, targets = self.pad_y_eos(codes, y_mask_int, eos_id=self.EOS)
202
+ x_len = x_lens.max()
203
+ y_len = y_lens.max()
204
+ y_emb = self.ar_audio_embedding(y)
205
+ y_pos = self.ar_audio_position(y_emb)
206
+
207
+ xy_padding_mask = torch.concat([x_mask, y_mask], dim=1)
208
+ ar_xy_padding_mask = xy_padding_mask
209
+
210
+ x_attn_mask = F.pad(
211
+ torch.zeros((x_len, x_len), dtype=torch.bool, device=x.device),
212
+ (0, y_len),
213
+ value=True,
214
+ )
215
+ y_attn_mask = F.pad(
216
+ torch.triu(
217
+ torch.ones(y_len, y_len, dtype=torch.bool, device=x.device),
218
+ diagonal=1,
219
+ ),
220
+ (x_len, 0),
221
+ value=False,
222
+ )
223
+ xy_attn_mask = torch.concat([x_attn_mask, y_attn_mask], dim=0)
224
+ bsz, src_len = x.shape[0], x_len + y_len
225
+ _xy_padding_mask = (
226
+ ar_xy_padding_mask.view(bsz, 1, 1, src_len)
227
+ .expand(-1, self.num_head, -1, -1)
228
+ .reshape(bsz * self.num_head, 1, src_len)
229
+ )
230
+ xy_attn_mask = xy_attn_mask.logical_or(_xy_padding_mask)
231
+ new_attn_mask = torch.zeros_like(xy_attn_mask, dtype=x.dtype)
232
+ new_attn_mask.masked_fill_(xy_attn_mask, float("-inf"))
233
+ xy_attn_mask = new_attn_mask
234
+ # x 和完整的 y 一次性输入模型
235
+ xy_pos = torch.concat([x, y_pos], dim=1)
236
+ xy_dec, _ = self.h(
237
+ (xy_pos, None),
238
+ mask=xy_attn_mask,
239
+ )
240
+ logits = self.ar_predict_layer(xy_dec[:, x_len:]).permute(0, 2, 1)
241
+ # loss
242
+ # from feiteng: 每次 duration 越多, 梯度更新也应该更多, 所以用 sum
243
+ loss = F.cross_entropy(logits, targets, reduction="sum")
244
+ acc = self.ar_accuracy_metric(logits.detach(), targets).item()
245
+ return loss, acc
246
+
247
+ # 需要看下这个函数和 forward 的区别以及没有 semantic 的时候 prompts 输入什么
248
+ def infer(
249
+ self,
250
+ x,
251
+ x_lens,
252
+ prompts,
253
+ bert_feature,
254
+ top_k: int = -100,
255
+ early_stop_num: int = -1,
256
+ temperature: float = 1.0,
257
+ ):
258
+ x = self.ar_text_embedding(x)
259
+ x = x + self.bert_proj(bert_feature.transpose(1, 2))
260
+ x = self.ar_text_position(x)
261
+
262
+ # AR Decoder
263
+ y = prompts
264
+ prefix_len = y.shape[1]
265
+ x_len = x.shape[1]
266
+ x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool)
267
+ stop = False
268
+ for _ in tqdm(range(1500)):
269
+ y_emb = self.ar_audio_embedding(y)
270
+ y_pos = self.ar_audio_position(y_emb)
271
+ # x 和逐渐增长的 y 一起输入给模型
272
+ xy_pos = torch.concat([x, y_pos], dim=1)
273
+ y_len = y.shape[1]
274
+ x_attn_mask_pad = F.pad(
275
+ x_attn_mask,
276
+ (0, y_len),
277
+ value=True,
278
+ )
279
+ y_attn_mask = F.pad(
280
+ torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
281
+ (x_len, 0),
282
+ value=False,
283
+ )
284
+ xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0).to(
285
+ y.device
286
+ )
287
+
288
+ xy_dec, _ = self.h(
289
+ (xy_pos, None),
290
+ mask=xy_attn_mask,
291
+ )
292
+ logits = self.ar_predict_layer(xy_dec[:, -1])
293
+ samples = topk_sampling(
294
+ logits, top_k=top_k, top_p=1.0, temperature=temperature
295
+ )
296
+
297
+ if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
298
+ print("use early stop num:", early_stop_num)
299
+ stop = True
300
+
301
+ if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
302
+ # print(torch.argmax(logits, dim=-1)[0] == self.EOS, samples[0, 0] == self.EOS)
303
+ stop = True
304
+ if stop:
305
+ if prompts.shape[1] == y.shape[1]:
306
+ y = torch.concat([y, torch.zeros_like(samples)], dim=1)
307
+ print("bad zero prediction")
308
+ print(f"T2S Decoding EOS [{prefix_len} -> {y.shape[1]}]")
309
+ break
310
+ # 本次生成的 semantic_ids 和之前的 y 构成新的 y
311
+ # print(samples.shape)#[1,1]#第一个1是bs
312
+ # import os
313
+ # os._exit(2333)
314
+ y = torch.concat([y, samples], dim=1)
315
+ return y
316
+
317
+ def pad_y_eos(self, y, y_mask_int, eos_id):
318
+ targets = F.pad(y, (0, 1), value=0) + eos_id * F.pad(
319
+ y_mask_int, (0, 1), value=1
320
+ )
321
+ # 错位
322
+ return targets[:, :-1], targets[:, 1:]
323
+
324
+ def infer_panel(
325
+ self,
326
+ x, #####全部文本token
327
+ x_lens,
328
+ prompts, ####参考音频token
329
+ bert_feature,
330
+ top_k: int = -100,
331
+ top_p: int = 100,
332
+ early_stop_num: int = -1,
333
+ temperature: float = 1.0,
334
+ ):
335
+ x = self.ar_text_embedding(x)
336
+ x = x + self.bert_proj(bert_feature.transpose(1, 2))
337
+ x = self.ar_text_position(x)
338
+
339
+ # AR Decoder
340
+ y = prompts
341
+
342
+ x_len = x.shape[1]
343
+ x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool)
344
+ stop = False
345
+ # print(1111111,self.num_layers)
346
+ cache = {
347
+ "all_stage": self.num_layers,
348
+ "k": [None] * self.num_layers, ###根据配置自己手写
349
+ "v": [None] * self.num_layers,
350
+ # "xy_pos":None,##y_pos位置编码每次都不一样的没法缓存,每次都要重新拼xy_pos.主要还是写法原因,其实是可以历史统一一样的,但也没啥计算量就不管了
351
+ "y_emb": None, ##只需要对最新的samples求emb,再拼历史的就行
352
+ # "logits":None,###原版就已经只对结尾求再拼接了,不用管
353
+ # "xy_dec":None,###不需要,本来只需要最后一个做logits
354
+ "first_infer": 1,
355
+ "stage": 0,
356
+ }
357
+ ################### first step ##########################
358
+ if y is not None:
359
+ y_emb = self.ar_audio_embedding(y)
360
+ y_len = y_emb.shape[1]
361
+ prefix_len = y.shape[1]
362
+ y_pos = self.ar_audio_position(y_emb)
363
+ xy_pos = torch.concat([x, y_pos], dim=1)
364
+ cache["y_emb"] = y_emb
365
+ ref_free = False
366
+ else:
367
+ y_emb = None
368
+ y_len = 0
369
+ prefix_len = 0
370
+ y_pos = None
371
+ xy_pos = x
372
+ y = torch.zeros(x.shape[0], 0, dtype=torch.int, device=x.device)
373
+ ref_free = True
374
+
375
+ x_attn_mask_pad = F.pad(
376
+ x_attn_mask,
377
+ (0, y_len), ###xx的纯0扩展到xx纯0+xy纯1,(x,x+y)
378
+ value=True,
379
+ )
380
+ y_attn_mask = F.pad( ###yy的右上1扩展到左边xy的0,(y,x+y)
381
+ torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
382
+ (x_len, 0),
383
+ value=False,
384
+ )
385
+ xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0).to(
386
+ x.device
387
+ )
388
+
389
+
390
+ for idx in tqdm(range(1500)):
391
+
392
+ xy_dec, _ = self.h((xy_pos, None), mask=xy_attn_mask, cache=cache)
393
+ logits = self.ar_predict_layer(
394
+ xy_dec[:, -1]
395
+ ) ##不用改,如果用了cache的默认就是只有一帧,取最后一帧一样的
396
+ # samples = topk_sampling(logits, top_k=top_k, top_p=1.0, temperature=temperature)
397
+ if(idx==0):###第一次跑不能EOS否则没有了
398
+ logits = logits[:, :-1] ###刨除1024终止符号的概率
399
+ samples = sample(
400
+ logits[0], y, top_k=top_k, top_p=top_p, repetition_penalty=1.35, temperature=temperature
401
+ )[0].unsqueeze(0)
402
+ # 本次生成的 semantic_ids 和之前的 y 构成新的 y
403
+ # print(samples.shape)#[1,1]#第一个1是bs
404
+ y = torch.concat([y, samples], dim=1)
405
+
406
+ if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
407
+ print("use early stop num:", early_stop_num)
408
+ stop = True
409
+
410
+ if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
411
+ # print(torch.argmax(logits, dim=-1)[0] == self.EOS, samples[0, 0] == self.EOS)
412
+ stop = True
413
+ if stop:
414
+ # if prompts.shape[1] == y.shape[1]:
415
+ # y = torch.concat([y, torch.zeros_like(samples)], dim=1)
416
+ # print("bad zero prediction")
417
+ if y.shape[1]==0:
418
+ y = torch.concat([y, torch.zeros_like(samples)], dim=1)
419
+ print("bad zero prediction")
420
+ print(f"T2S Decoding EOS [{prefix_len} -> {y.shape[1]}]")
421
+ break
422
+
423
+ ####################### update next step ###################################
424
+ cache["first_infer"] = 0
425
+ if cache["y_emb"] is not None:
426
+ y_emb = torch.cat(
427
+ [cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], dim = 1
428
+ )
429
+ cache["y_emb"] = y_emb
430
+ y_pos = self.ar_audio_position(y_emb)
431
+ xy_pos = y_pos[:, -1:]
432
+ else:
433
+ y_emb = self.ar_audio_embedding(y[:, -1:])
434
+ cache["y_emb"] = y_emb
435
+ y_pos = self.ar_audio_position(y_emb)
436
+ xy_pos = y_pos
437
+ y_len = y_pos.shape[1]
438
+
439
+ ###最右边一列(是错的)
440
+ # xy_attn_mask=torch.ones((1, x_len+y_len), dtype=torch.bool,device=xy_pos.device)
441
+ # xy_attn_mask[:,-1]=False
442
+ ###最下面一行(是对的)
443
+ xy_attn_mask = torch.zeros(
444
+ (1, x_len + y_len), dtype=torch.bool, device=xy_pos.device
445
+ )
446
+ if ref_free:
447
+ return y[:, :-1], 0
448
+ return y[:, :-1], idx-1
GPT_SoVITS/AR/models/t2s_model_onnx.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/t2s_model.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import torch
4
+ from tqdm import tqdm
5
+
6
+ from AR.modules.embedding_onnx import SinePositionalEmbedding
7
+ from AR.modules.embedding_onnx import TokenEmbedding
8
+ from AR.modules.transformer_onnx import LayerNorm
9
+ from AR.modules.transformer_onnx import TransformerEncoder
10
+ from AR.modules.transformer_onnx import TransformerEncoderLayer
11
+ from torch import nn
12
+ from torch.nn import functional as F
13
+ from torchmetrics.classification import MulticlassAccuracy
14
+
15
+ default_config = {
16
+ "embedding_dim": 512,
17
+ "hidden_dim": 512,
18
+ "num_head": 8,
19
+ "num_layers": 12,
20
+ "num_codebook": 8,
21
+ "p_dropout": 0.0,
22
+ "vocab_size": 1024 + 1,
23
+ "phoneme_vocab_size": 512,
24
+ "EOS": 1024,
25
+ }
26
+
27
+ inf_tensor_value = torch.FloatTensor([-float("Inf")]).float()
28
+
29
+ def logits_to_probs(
30
+ logits,
31
+ previous_tokens = None,
32
+ temperature: float = 1.0,
33
+ top_k = None,
34
+ top_p = None,
35
+ repetition_penalty: float = 1.0,
36
+ ):
37
+ previous_tokens = previous_tokens.squeeze()
38
+ if previous_tokens is not None and repetition_penalty != 1.0:
39
+ previous_tokens = previous_tokens.long()
40
+ score = torch.gather(logits, dim=0, index=previous_tokens)
41
+ score = torch.where(
42
+ score < 0, score * repetition_penalty, score / repetition_penalty
43
+ )
44
+ logits.scatter_(dim=0, index=previous_tokens, src=score)
45
+
46
+ if top_p is not None and top_p < 1.0:
47
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
48
+ cum_probs = torch.cumsum(
49
+ torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1
50
+ )
51
+ sorted_indices_to_remove = cum_probs > top_p
52
+ sorted_indices_to_remove[0] = False # keep at least one option
53
+ indices_to_remove = sorted_indices_to_remove.scatter(
54
+ dim=0, index=sorted_indices, src=sorted_indices_to_remove
55
+ )
56
+ logits = logits.masked_fill(indices_to_remove, -float("Inf"))
57
+
58
+ logits = logits / max(temperature, 1e-5)
59
+
60
+ if top_k is not None:
61
+ v, _ = torch.topk(logits, top_k)
62
+ pivot = v.select(-1, -1).unsqueeze(-1)
63
+ logits = torch.where(logits < pivot, inf_tensor_value, logits)
64
+
65
+ probs = torch.nn.functional.softmax(logits, dim=-1)
66
+ return probs
67
+
68
+
69
+ def multinomial_sample_one_no_sync(
70
+ probs_sort
71
+ ): # Does multinomial sampling without a cuda synchronization
72
+ q = torch.randn_like(probs_sort)
73
+ return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
74
+
75
+
76
+ def sample(
77
+ logits,
78
+ previous_tokens,
79
+ **sampling_kwargs,
80
+ ):
81
+ probs = logits_to_probs(
82
+ logits=logits, previous_tokens=previous_tokens, **sampling_kwargs
83
+ )
84
+ idx_next = multinomial_sample_one_no_sync(probs)
85
+ return idx_next, probs
86
+
87
+
88
+ class OnnxEncoder(nn.Module):
89
+ def __init__(self, ar_text_embedding, bert_proj, ar_text_position):
90
+ super().__init__()
91
+ self.ar_text_embedding = ar_text_embedding
92
+ self.bert_proj = bert_proj
93
+ self.ar_text_position = ar_text_position
94
+
95
+ def forward(self, x, bert_feature):
96
+ x = self.ar_text_embedding(x)
97
+ x = x + self.bert_proj(bert_feature.transpose(1, 2))
98
+ return self.ar_text_position(x)
99
+
100
+
101
+ class T2SFirstStageDecoder(nn.Module):
102
+ def __init__(self, ar_audio_embedding, ar_audio_position, h, ar_predict_layer, loss_fct, ar_accuracy_metric,
103
+ top_k, early_stop_num, num_layers):
104
+ super().__init__()
105
+ self.ar_audio_embedding = ar_audio_embedding
106
+ self.ar_audio_position = ar_audio_position
107
+ self.h = h
108
+ self.ar_predict_layer = ar_predict_layer
109
+ self.loss_fct = loss_fct
110
+ self.ar_accuracy_metric = ar_accuracy_metric
111
+ self.top_k = top_k
112
+ self.early_stop_num = early_stop_num
113
+ self.num_layers = num_layers
114
+
115
+ def forward(self, x, prompt):
116
+ y = prompt
117
+ x_example = x[:,:,0] * 0.0
118
+ #N, 1, 512
119
+ cache = {
120
+ "all_stage": self.num_layers,
121
+ "k": None,
122
+ "v": None,
123
+ "y_emb": None,
124
+ "first_infer": 1,
125
+ "stage": 0,
126
+ }
127
+
128
+ y_emb = self.ar_audio_embedding(y)
129
+
130
+ cache["y_emb"] = y_emb
131
+ y_pos = self.ar_audio_position(y_emb)
132
+
133
+ xy_pos = torch.concat([x, y_pos], dim=1)
134
+
135
+ y_example = y_pos[:,:,0] * 0.0
136
+ x_attn_mask = torch.matmul(x_example.transpose(0, 1) , x_example).bool()
137
+ y_attn_mask = torch.ones_like(torch.matmul(y_example.transpose(0, 1), y_example), dtype=torch.int64)
138
+ y_attn_mask = torch.cumsum(y_attn_mask, dim=1) - torch.cumsum(
139
+ torch.ones_like(y_example.transpose(0, 1), dtype=torch.int64), dim=0
140
+ )
141
+ y_attn_mask = y_attn_mask > 0
142
+
143
+ x_y_pad = torch.matmul(x_example.transpose(0, 1), y_example).bool()
144
+ y_x_pad = torch.matmul(y_example.transpose(0, 1), x_example).bool()
145
+ x_attn_mask_pad = torch.cat([x_attn_mask, torch.ones_like(x_y_pad)], dim=1)
146
+ y_attn_mask = torch.cat([y_x_pad, y_attn_mask], dim=1)
147
+ xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0)
148
+ cache["k"] = torch.matmul(x_attn_mask_pad[0].float().unsqueeze(-1), torch.zeros((1, 512)))\
149
+ .unsqueeze(1).repeat(self.num_layers, 1, 1, 1)
150
+ cache["v"] = torch.matmul(x_attn_mask_pad[0].float().unsqueeze(-1), torch.zeros((1, 512)))\
151
+ .unsqueeze(1).repeat(self.num_layers, 1, 1, 1)
152
+
153
+ xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache)
154
+ logits = self.ar_predict_layer(xy_dec[:, -1])
155
+ samples = sample(logits[0], y, top_k=self.top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0)
156
+
157
+ y = torch.concat([y, samples], dim=1)
158
+
159
+ return y, cache["k"], cache["v"], cache["y_emb"], x_example
160
+
161
+
162
+ class T2SStageDecoder(nn.Module):
163
+ def __init__(self, ar_audio_embedding, ar_audio_position, h, ar_predict_layer, loss_fct, ar_accuracy_metric,
164
+ top_k, early_stop_num, num_layers):
165
+ super().__init__()
166
+ self.ar_audio_embedding = ar_audio_embedding
167
+ self.ar_audio_position = ar_audio_position
168
+ self.h = h
169
+ self.ar_predict_layer = ar_predict_layer
170
+ self.loss_fct = loss_fct
171
+ self.ar_accuracy_metric = ar_accuracy_metric
172
+ self.top_k = top_k
173
+ self.early_stop_num = early_stop_num
174
+ self.num_layers = num_layers
175
+
176
+ def forward(self, y, k, v, y_emb, x_example):
177
+ cache = {
178
+ "all_stage": self.num_layers,
179
+ "k": torch.nn.functional.pad(k, (0, 0, 0, 0, 0, 1)),
180
+ "v": torch.nn.functional.pad(v, (0, 0, 0, 0, 0, 1)),
181
+ "y_emb": y_emb,
182
+ "first_infer": 0,
183
+ "stage": 0,
184
+ }
185
+
186
+ y_emb = torch.cat(
187
+ [cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], 1
188
+ )
189
+ cache["y_emb"] = y_emb
190
+ y_pos = self.ar_audio_position(y_emb)
191
+
192
+ xy_pos = y_pos[:, -1:]
193
+
194
+ y_example = y_pos[:,:,0] * 0.0
195
+
196
+ xy_attn_mask = torch.cat([x_example, y_example], dim=1)
197
+ xy_attn_mask = torch.zeros_like(xy_attn_mask, dtype=torch.bool)
198
+
199
+ xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache)
200
+ logits = self.ar_predict_layer(xy_dec[:, -1])
201
+ samples = sample(logits[0], y, top_k=self.top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0)
202
+
203
+ y = torch.concat([y, samples], dim=1)
204
+
205
+ return y, cache["k"], cache["v"], cache["y_emb"], logits, samples
206
+
207
+
208
+ class Text2SemanticDecoder(nn.Module):
209
+ def __init__(self, config, norm_first=False, top_k=3):
210
+ super(Text2SemanticDecoder, self).__init__()
211
+ self.model_dim = config["model"]["hidden_dim"]
212
+ self.embedding_dim = config["model"]["embedding_dim"]
213
+ self.num_head = config["model"]["head"]
214
+ self.num_layers = config["model"]["n_layer"]
215
+ self.norm_first = norm_first
216
+ self.vocab_size = config["model"]["vocab_size"]
217
+ self.phoneme_vocab_size = config["model"]["phoneme_vocab_size"]
218
+ self.p_dropout = float(config["model"]["dropout"])
219
+ self.EOS = config["model"]["EOS"]
220
+ self.norm_first = norm_first
221
+ assert self.EOS == self.vocab_size - 1
222
+ self.bert_proj = nn.Linear(1024, self.embedding_dim)
223
+ self.ar_text_embedding = TokenEmbedding(self.embedding_dim, self.phoneme_vocab_size, self.p_dropout)
224
+ self.ar_text_position = SinePositionalEmbedding(self.embedding_dim, dropout=0.1, scale=False, alpha=True)
225
+ self.ar_audio_embedding = TokenEmbedding(self.embedding_dim, self.vocab_size, self.p_dropout)
226
+ self.ar_audio_position = SinePositionalEmbedding(self.embedding_dim, dropout=0.1, scale=False, alpha=True)
227
+ self.h = TransformerEncoder(
228
+ TransformerEncoderLayer(
229
+ d_model=self.model_dim,
230
+ nhead=self.num_head,
231
+ dim_feedforward=self.model_dim * 4,
232
+ dropout=0.1,
233
+ batch_first=True,
234
+ norm_first=norm_first,
235
+ ),
236
+ num_layers=self.num_layers,
237
+ norm=LayerNorm(self.model_dim) if norm_first else None,
238
+ )
239
+ self.ar_predict_layer = nn.Linear(self.model_dim, self.vocab_size, bias=False)
240
+ self.loss_fct = nn.CrossEntropyLoss(reduction="sum")
241
+ self.ar_accuracy_metric = MulticlassAccuracy(
242
+ self.vocab_size,
243
+ top_k=top_k,
244
+ average="micro",
245
+ multidim_average="global",
246
+ ignore_index=self.EOS,
247
+ )
248
+ self.top_k = torch.LongTensor([1])
249
+ self.early_stop_num = torch.LongTensor([-1])
250
+
251
+ def init_onnx(self):
252
+ self.onnx_encoder = OnnxEncoder(self.ar_text_embedding, self.bert_proj, self.ar_text_position)
253
+ self.first_stage_decoder = T2SFirstStageDecoder(self.ar_audio_embedding, self.ar_audio_position, self.h,
254
+ self.ar_predict_layer, self.loss_fct, self.ar_accuracy_metric, self.top_k, self.early_stop_num,
255
+ self.num_layers)
256
+ self.stage_decoder = T2SStageDecoder(self.ar_audio_embedding, self.ar_audio_position, self.h,
257
+ self.ar_predict_layer, self.loss_fct, self.ar_accuracy_metric, self.top_k, self.early_stop_num,
258
+ self.num_layers)
259
+
260
+ def forward(self, x, prompts, bert_feature):
261
+ early_stop_num = self.early_stop_num
262
+ prefix_len = prompts.shape[1]
263
+
264
+ x = self.onnx_encoder(x, bert_feature)
265
+ y, k, v, y_emb, stage, x_example = self.first_stage_decoder(x, prompts)
266
+
267
+ stop = False
268
+ for idx in range(1, 1500):
269
+ enco = self.stage_decoder(y, k, v, y_emb, stage, x_example)
270
+ y, k, v, y_emb, stage, logits, samples = enco
271
+ if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
272
+ stop = True
273
+ if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
274
+ stop = True
275
+ if stop:
276
+ break
277
+ y[0, -1] = 0
278
+ return y, idx
279
+
280
+ def infer(self, x, prompts, bert_feature):
281
+ top_k = self.top_k
282
+ early_stop_num = self.early_stop_num
283
+
284
+ x = self.onnx_encoder(x, bert_feature)
285
+
286
+ y = prompts
287
+ prefix_len = y.shape[1]
288
+ x_len = x.shape[1]
289
+ x_example = x[:,:,0] * 0.0
290
+ x_attn_mask = torch.matmul(x_example.transpose(0, 1), x_example)
291
+ x_attn_mask = torch.zeros_like(x_attn_mask, dtype=torch.bool)
292
+
293
+ stop = False
294
+ cache = {
295
+ "all_stage": self.num_layers,
296
+ "k": [None] * self.num_layers,
297
+ "v": [None] * self.num_layers,
298
+ "y_emb": None,
299
+ "first_infer": 1,
300
+ "stage": 0,
301
+ }
302
+ for idx in range(1500):
303
+ if cache["first_infer"] == 1:
304
+ y_emb = self.ar_audio_embedding(y)
305
+ else:
306
+ y_emb = torch.cat(
307
+ [cache["y_emb"], self.ar_audio_embedding(y[:, -1:])], 1
308
+ )
309
+ cache["y_emb"] = y_emb
310
+ y_pos = self.ar_audio_position(y_emb)
311
+ if cache["first_infer"] == 1:
312
+ xy_pos = torch.concat([x, y_pos], dim=1)
313
+ else:
314
+ xy_pos = y_pos[:, -1:]
315
+ y_len = y_pos.shape[1]
316
+ if cache["first_infer"] == 1:
317
+ x_attn_mask_pad = F.pad(x_attn_mask, (0, y_len), value=True)
318
+ y_attn_mask = F.pad(
319
+ torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),
320
+ (x_len, 0), value=False
321
+ )
322
+ xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0)
323
+ else:
324
+ xy_attn_mask = torch.zeros((1, x_len + y_len), dtype=torch.bool)
325
+ xy_dec = self.h(xy_pos, mask=xy_attn_mask, cache=cache)
326
+ logits = self.ar_predict_layer(xy_dec[:, -1])
327
+ samples = sample(logits[0], y, top_k=top_k, top_p=1.0, repetition_penalty=1.35)[0].unsqueeze(0)
328
+ if early_stop_num != -1 and (y.shape[1] - prefix_len) > early_stop_num:
329
+ stop = True
330
+ if torch.argmax(logits, dim=-1)[0] == self.EOS or samples[0, 0] == self.EOS:
331
+ stop = True
332
+ if stop:
333
+ if prompts.shape[1] == y.shape[1]:
334
+ y = torch.concat([y, torch.zeros_like(samples)], dim=1)
335
+ break
336
+ y = torch.concat([y, samples], dim=1)
337
+ cache["first_infer"] = 0
338
+ return y, idx
GPT_SoVITS/AR/models/utils.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/models/utils.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import torch
4
+ import torch.nn.functional as F
5
+ from typing import Tuple
6
+
7
+ def sequence_mask(length, max_length=None):
8
+ if max_length is None:
9
+ max_length = length.max()
10
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
11
+ return x.unsqueeze(0) < length.unsqueeze(1)
12
+
13
+
14
+ def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
15
+ """
16
+ Args:
17
+ lengths:
18
+ A 1-D tensor containing sentence lengths.
19
+ max_len:
20
+ The length of masks.
21
+ Returns:
22
+ Return a 2-D bool tensor, where masked positions
23
+ are filled with `True` and non-masked positions are
24
+ filled with `False`.
25
+
26
+ #>>> lengths = torch.tensor([1, 3, 2, 5])
27
+ #>>> make_pad_mask(lengths)
28
+ tensor([[False, True, True, True, True],
29
+ [False, False, False, True, True],
30
+ [False, False, True, True, True],
31
+ [False, False, False, False, False]])
32
+ """
33
+ assert lengths.ndim == 1, lengths.ndim
34
+ max_len = max(max_len, lengths.max())
35
+ n = lengths.size(0)
36
+ seq_range = torch.arange(0, max_len, device=lengths.device)
37
+ expaned_lengths = seq_range.unsqueeze(0).expand(n, max_len)
38
+
39
+ return expaned_lengths >= lengths.unsqueeze(-1)
40
+
41
+
42
+ # https://github.com/microsoft/unilm/blob/master/xtune/src/transformers/modeling_utils.py
43
+ def top_k_top_p_filtering(
44
+ logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1
45
+ ):
46
+ """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
47
+ Args:
48
+ logits: logits distribution shape (batch size, vocabulary size)
49
+ if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
50
+ if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
51
+ Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
52
+ Make sure we keep at least min_tokens_to_keep per batch example in the output
53
+ From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
54
+ """
55
+ if top_k > 0:
56
+ top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
57
+ # Remove all tokens with a probability less than the last token of the top-k
58
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
59
+ logits[indices_to_remove] = filter_value
60
+
61
+ if top_p < 1.0:
62
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
63
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
64
+
65
+ # Remove tokens with cumulative probability above the threshold (token with 0 are kept)
66
+ sorted_indices_to_remove = cumulative_probs > top_p
67
+ if min_tokens_to_keep > 1:
68
+ # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
69
+ sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
70
+ # Shift the indices to the right to keep also the first token above the threshold
71
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
72
+ sorted_indices_to_remove[..., 0] = 0
73
+
74
+ # scatter sorted tensors to original indexing
75
+ indices_to_remove = sorted_indices_to_remove.scatter(
76
+ 1, sorted_indices, sorted_indices_to_remove
77
+ )
78
+ logits[indices_to_remove] = filter_value
79
+ return logits
80
+
81
+
82
+ def topk_sampling(logits, top_k=10, top_p=1.0, temperature=1.0):
83
+ # temperature: (`optional`) float
84
+ # The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.
85
+ # top_k: (`optional`) int
86
+ # The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
87
+ # top_p: (`optional`) float
88
+ # The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
89
+
90
+ # Temperature (higher temperature => more likely to sample low probability tokens)
91
+ if temperature != 1.0:
92
+ logits = logits / temperature
93
+ # Top-p/top-k filtering
94
+ logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
95
+ # Sample
96
+ token = torch.multinomial(F.softmax(logits, dim=-1), num_samples=1)
97
+ return token
98
+
99
+
100
+ from typing import Optional, Tuple
101
+
102
+
103
+ def multinomial_sample_one_no_sync(
104
+ probs_sort,
105
+ ): # Does multinomial sampling without a cuda synchronization
106
+ q = torch.empty_like(probs_sort).exponential_(1)
107
+ return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
108
+
109
+
110
+ def logits_to_probs(
111
+ logits,
112
+ previous_tokens: Optional[torch.Tensor] = None,
113
+ temperature: float = 1.0,
114
+ top_k: Optional[int] = None,
115
+ top_p: Optional[int] = None,
116
+ repetition_penalty: float = 1.0,
117
+ ):
118
+ if previous_tokens is not None:
119
+ previous_tokens = previous_tokens.squeeze()
120
+ # print(logits.shape,previous_tokens.shape)
121
+ # pdb.set_trace()
122
+ if previous_tokens is not None and repetition_penalty != 1.0:
123
+ previous_tokens = previous_tokens.long()
124
+ score = torch.gather(logits, dim=0, index=previous_tokens)
125
+ score = torch.where(
126
+ score < 0, score * repetition_penalty, score / repetition_penalty
127
+ )
128
+ logits.scatter_(dim=0, index=previous_tokens, src=score)
129
+
130
+ if top_p is not None and top_p < 1.0:
131
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
132
+ cum_probs = torch.cumsum(
133
+ torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1
134
+ )
135
+ sorted_indices_to_remove = cum_probs > top_p
136
+ sorted_indices_to_remove[0] = False # keep at least one option
137
+ indices_to_remove = sorted_indices_to_remove.scatter(
138
+ dim=0, index=sorted_indices, src=sorted_indices_to_remove
139
+ )
140
+ logits = logits.masked_fill(indices_to_remove, -float("Inf"))
141
+
142
+ logits = logits / max(temperature, 1e-5)
143
+
144
+ if top_k is not None:
145
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
146
+ pivot = v.select(-1, -1).unsqueeze(-1)
147
+ logits = torch.where(logits < pivot, -float("Inf"), logits)
148
+
149
+ probs = torch.nn.functional.softmax(logits, dim=-1)
150
+ return probs
151
+
152
+
153
+ def sample(
154
+ logits,
155
+ previous_tokens: Optional[torch.Tensor] = None,
156
+ **sampling_kwargs,
157
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
158
+ probs = logits_to_probs(
159
+ logits=logits, previous_tokens=previous_tokens, **sampling_kwargs
160
+ )
161
+ idx_next = multinomial_sample_one_no_sync(probs)
162
+ return idx_next, probs
163
+
164
+ def dpo_loss(policy_chosen_logps: torch.FloatTensor,
165
+ policy_rejected_logps: torch.FloatTensor,
166
+ reference_chosen_logps: torch.FloatTensor,
167
+ reference_rejected_logps: torch.FloatTensor,
168
+ beta: float,
169
+ reference_free: bool = False) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
170
+ pi_logratios = policy_chosen_logps - policy_rejected_logps
171
+ ref_logratios = reference_chosen_logps - reference_rejected_logps
172
+
173
+ if reference_free:
174
+ ref_logratios = 0
175
+
176
+ logits = pi_logratios - ref_logratios
177
+
178
+ losses = -F.logsigmoid(beta * logits)
179
+ chosen_rewards = beta * (policy_chosen_logps - reference_chosen_logps).detach()
180
+ rejected_rewards = beta * (policy_rejected_logps - reference_rejected_logps).detach()
181
+
182
+ return losses.mean(), chosen_rewards, rejected_rewards
183
+
184
+ def get_batch_logps(logits_target: torch.FloatTensor, logits_reject: torch.FloatTensor, labels_target: torch.LongTensor, labels_reject: torch.LongTensor, average_log_prob: bool = False) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
185
+
186
+ # dummy token; we'll ignore the losses on these tokens later
187
+
188
+ per_token_logps_target = torch.gather(logits_target.log_softmax(-1), dim=2, index=labels_target.unsqueeze(2)).squeeze(2)
189
+ per_token_logps_reject = torch.gather(logits_reject.log_softmax(-1), dim=2, index=labels_reject.unsqueeze(2)).squeeze(2)
190
+
191
+ return per_token_logps_target.sum(-1), per_token_logps_reject.sum(-1)
192
+
193
+ def make_reject_y(y_o, y_lens):
194
+ def repeat_P(y):
195
+ range_idx, _ = torch.randint(0, len(y), size=(2,)).sort()
196
+ pre = y[:range_idx[0]]
197
+ shf = y[range_idx[1]:]
198
+ range_text = y[range_idx[0]:range_idx[1]]
199
+ new_y = torch.cat([pre, range_text, range_text, shf])
200
+ return new_y
201
+ def lost_P(y):
202
+ range_idx, _ = torch.randint(0, len(y), size=(2,)).sort()
203
+ pre = y[:range_idx[0]]
204
+ shf = y[range_idx[1]:]
205
+ range_text = y[range_idx[0]:range_idx[1]]
206
+ new_y = torch.cat([pre, shf])
207
+ return new_y
208
+ bs = len(y_lens)
209
+ reject_y = []
210
+ reject_y_lens = []
211
+ for b in range(bs):
212
+ process_item_idx = torch.randint(0, 1, size=(1, ))[0]
213
+ if process_item_idx == 0:
214
+ new_y = repeat_P(y_o[b])
215
+ reject_y.append(new_y)
216
+ reject_y_lens.append(len(new_y))
217
+ elif process_item_idx==1:
218
+ new_y = lost_P(y_o[b])
219
+ reject_y.append(new_y)
220
+ reject_y_lens.append(len(new_y))
221
+ max_length = max(reject_y_lens)
222
+ for b in range(bs):
223
+ pad_length = max_length - reject_y_lens[b]
224
+ reject_y[b] = torch.cat([reject_y[b], torch.zeros(pad_length, dtype=y_o.dtype, device=y_o.device)], dim=0)
225
+
226
+ reject_y = torch.stack(reject_y, dim = 0)
227
+ reject_y_lens = torch.tensor(reject_y_lens, device=y_lens.device)
228
+
229
+ return reject_y, reject_y_lens
GPT_SoVITS/AR/modules/__init__.py ADDED
File without changes
GPT_SoVITS/AR/modules/activation.py ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/activation.py
2
+ from typing import Optional
3
+ from typing import Tuple
4
+ import torch
5
+ from torch import Tensor
6
+ from torch.nn import Linear
7
+ from torch.nn import Module
8
+ from torch.nn.init import constant_
9
+ from torch.nn.init import xavier_normal_
10
+ from torch.nn.init import xavier_uniform_
11
+ from torch.nn.modules.linear import NonDynamicallyQuantizableLinear
12
+ from torch.nn.parameter import Parameter
13
+
14
+ from torch.nn import functional as F
15
+ from AR.modules.patched_mha_with_cache import multi_head_attention_forward_patched
16
+
17
+ F.multi_head_attention_forward = multi_head_attention_forward_patched
18
+
19
+
20
+ class MultiheadAttention(Module):
21
+ r"""Allows the model to jointly attend to information
22
+ from different representation subspaces as described in the paper:
23
+ `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.
24
+
25
+ Multi-Head Attention is defined as:
26
+
27
+ .. math::
28
+ \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
29
+
30
+ where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
31
+
32
+ ``forward()`` will use a special optimized implementation if all of the following
33
+ conditions are met:
34
+
35
+ - self attention is being computed (i.e., ``query``, ``key``, and ``value`` are the same tensor. This
36
+ restriction will be loosened in the future.)
37
+ - Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor argument ``requires_grad``
38
+ - training is disabled (using ``.eval()``)
39
+ - dropout is 0
40
+ - ``add_bias_kv`` is ``False``
41
+ - ``add_zero_attn`` is ``False``
42
+ - ``batch_first`` is ``True`` and the input is batched
43
+ - ``kdim`` and ``vdim`` are equal to ``embed_dim``
44
+ - at most one of ``key_padding_mask`` or ``attn_mask`` is passed
45
+ - if a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ is passed, neither ``key_padding_mask``
46
+ nor ``attn_mask`` is passed
47
+
48
+ If the optimized implementation is in use, a
49
+ `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be passed for
50
+ ``query``/``key``/``value`` to represent padding more efficiently than using a
51
+ padding mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_
52
+ will be returned, and an additional speedup proportional to the fraction of the input
53
+ that is padding can be expected.
54
+
55
+ Args:
56
+ embed_dim: Total dimension of the model.
57
+ num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split
58
+ across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``).
59
+ dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout).
60
+ bias: If specified, adds bias to input / output projection layers. Default: ``True``.
61
+ add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``.
62
+ add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1.
63
+ Default: ``False``.
64
+ kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``).
65
+ vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``).
66
+ batch_first: If ``True``, then the input and output tensors are provided
67
+ as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
68
+
69
+ Examples::
70
+
71
+ >>> # xdoctest: +SKIP
72
+ >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
73
+ >>> attn_output, attn_output_weights = multihead_attn(query, key, value)
74
+
75
+ """
76
+ __constants__ = ["batch_first"]
77
+ bias_k: Optional[torch.Tensor]
78
+ bias_v: Optional[torch.Tensor]
79
+
80
+ def __init__(
81
+ self,
82
+ embed_dim,
83
+ num_heads,
84
+ dropout=0.0,
85
+ bias=True,
86
+ add_bias_kv=False,
87
+ add_zero_attn=False,
88
+ kdim=None,
89
+ vdim=None,
90
+ batch_first=False,
91
+ linear1_cls=Linear,
92
+ linear2_cls=Linear,
93
+ device=None,
94
+ dtype=None,
95
+ ) -> None:
96
+ factory_kwargs = {"device": device, "dtype": dtype}
97
+ super(MultiheadAttention, self).__init__()
98
+ self.embed_dim = embed_dim
99
+ self.kdim = kdim if kdim is not None else embed_dim
100
+ self.vdim = vdim if vdim is not None else embed_dim
101
+ self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
102
+
103
+ self.num_heads = num_heads
104
+ self.dropout = dropout
105
+ self.batch_first = batch_first
106
+ self.head_dim = embed_dim // num_heads
107
+ assert (
108
+ self.head_dim * num_heads == self.embed_dim
109
+ ), "embed_dim must be divisible by num_heads"
110
+
111
+ if add_bias_kv:
112
+ self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
113
+ self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
114
+ else:
115
+ self.bias_k = self.bias_v = None
116
+
117
+ if linear1_cls == Linear:
118
+ if not self._qkv_same_embed_dim:
119
+ self.q_proj_weight = Parameter(
120
+ torch.empty((embed_dim, embed_dim), **factory_kwargs)
121
+ )
122
+ self.k_proj_weight = Parameter(
123
+ torch.empty((embed_dim, self.kdim), **factory_kwargs)
124
+ )
125
+ self.v_proj_weight = Parameter(
126
+ torch.empty((embed_dim, self.vdim), **factory_kwargs)
127
+ )
128
+ self.register_parameter("in_proj_weight", None)
129
+ else:
130
+ self.in_proj_weight = Parameter(
131
+ torch.empty((3 * embed_dim, embed_dim), **factory_kwargs)
132
+ )
133
+ self.register_parameter("q_proj_weight", None)
134
+ self.register_parameter("k_proj_weight", None)
135
+ self.register_parameter("v_proj_weight", None)
136
+
137
+ if bias:
138
+ self.in_proj_bias = Parameter(
139
+ torch.empty(3 * embed_dim, **factory_kwargs)
140
+ )
141
+ else:
142
+ self.register_parameter("in_proj_bias", None)
143
+ self.out_proj = NonDynamicallyQuantizableLinear(
144
+ embed_dim, embed_dim, bias=bias, **factory_kwargs
145
+ )
146
+
147
+ self._reset_parameters()
148
+ else:
149
+ if not self._qkv_same_embed_dim:
150
+ raise NotImplementedError
151
+ else:
152
+ self.in_proj_linear = linear1_cls(
153
+ embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs
154
+ )
155
+ self.in_proj_weight = self.in_proj_linear.weight
156
+
157
+ self.register_parameter("q_proj_weight", None)
158
+ self.register_parameter("k_proj_weight", None)
159
+ self.register_parameter("v_proj_weight", None)
160
+
161
+ if bias:
162
+ self.in_proj_bias = self.in_proj_linear.bias
163
+ else:
164
+ self.register_parameter("in_proj_bias", None)
165
+
166
+ self.out_proj = linear2_cls(
167
+ embed_dim, embed_dim, bias=bias, **factory_kwargs
168
+ )
169
+
170
+ if self.bias_k is not None:
171
+ xavier_normal_(self.bias_k)
172
+ if self.bias_v is not None:
173
+ xavier_normal_(self.bias_v)
174
+
175
+ self.add_zero_attn = add_zero_attn
176
+
177
+ def _reset_parameters(self):
178
+ if self._qkv_same_embed_dim:
179
+ xavier_uniform_(self.in_proj_weight)
180
+ else:
181
+ xavier_uniform_(self.q_proj_weight)
182
+ xavier_uniform_(self.k_proj_weight)
183
+ xavier_uniform_(self.v_proj_weight)
184
+
185
+ if self.in_proj_bias is not None:
186
+ constant_(self.in_proj_bias, 0.0)
187
+ constant_(self.out_proj.bias, 0.0)
188
+
189
+ if self.bias_k is not None:
190
+ xavier_normal_(self.bias_k)
191
+ if self.bias_v is not None:
192
+ xavier_normal_(self.bias_v)
193
+
194
+ def __setstate__(self, state):
195
+ # Support loading old MultiheadAttention checkpoints generated by v1.1.0
196
+ if "_qkv_same_embed_dim" not in state:
197
+ state["_qkv_same_embed_dim"] = True
198
+
199
+ super(MultiheadAttention, self).__setstate__(state)
200
+
201
+ def forward(
202
+ self,
203
+ query: Tensor,
204
+ key: Tensor,
205
+ value: Tensor,
206
+ key_padding_mask: Optional[Tensor] = None,
207
+ need_weights: bool = True,
208
+ attn_mask: Optional[Tensor] = None,
209
+ average_attn_weights: bool = True,
210
+ cache=None,
211
+ ) -> Tuple[Tensor, Optional[Tensor]]:
212
+ r"""
213
+ Args:
214
+ query: Query embeddings of shape :math:`(L, E_q)` for unbatched input, :math:`(L, N, E_q)` when ``batch_first=False``
215
+ or :math:`(N, L, E_q)` when ``batch_first=True``, where :math:`L` is the target sequence length,
216
+ :math:`N` is the batch size, and :math:`E_q` is the query embedding dimension ``embed_dim``.
217
+ Queries are compared against key-value pairs to produce the output.
218
+ See "Attention Is All You Need" for more details.
219
+ key: Key embeddings of shape :math:`(S, E_k)` for unbatched input, :math:`(S, N, E_k)` when ``batch_first=False``
220
+ or :math:`(N, S, E_k)` when ``batch_first=True``, where :math:`S` is the source sequence length,
221
+ :math:`N` is the batch size, and :math:`E_k` is the key embedding dimension ``kdim``.
222
+ See "Attention Is All You Need" for more details.
223
+ value: Value embeddings of shape :math:`(S, E_v)` for unbatched input, :math:`(S, N, E_v)` when
224
+ ``batch_first=False`` or :math:`(N, S, E_v)` when ``batch_first=True``, where :math:`S` is the source
225
+ sequence length, :math:`N` is the batch size, and :math:`E_v` is the value embedding dimension ``vdim``.
226
+ See "Attention Is All You Need" for more details.
227
+ key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key``
228
+ to ignore for the purpose of attention (i.e. treat as "padding"). For unbatched `query`, shape should be :math:`(S)`.
229
+ Binary and byte masks are supported.
230
+ For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for
231
+ the purpose of attention. For a float mask, it will be directly added to the corresponding ``key`` value.
232
+ need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``.
233
+ Default: ``True``.
234
+ attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape
235
+ :math:`(L, S)` or :math:`(N\cdot\text{num\_heads}, L, S)`, where :math:`N` is the batch size,
236
+ :math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be
237
+ broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch.
238
+ Binary, byte, and float masks are supported. For a binary mask, a ``True`` value indicates that the
239
+ corresponding position is not allowed to attend. For a byte mask, a non-zero value indicates that the
240
+ corresponding position is not allowed to attend. For a float mask, the mask values will be added to
241
+ the attention weight.
242
+ average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across
243
+ heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an
244
+ effect when ``need_weights=True``. Default: ``True`` (i.e. average weights across heads)
245
+
246
+ Outputs:
247
+ - **attn_output** - Attention outputs of shape :math:`(L, E)` when input is unbatched,
248
+ :math:`(L, N, E)` when ``batch_first=False`` or :math:`(N, L, E)` when ``batch_first=True``,
249
+ where :math:`L` is the target sequence length, :math:`N` is the batch size, and :math:`E` is the
250
+ embedding dimension ``embed_dim``.
251
+ - **attn_output_weights** - Only returned when ``need_weights=True``. If ``average_attn_weights=True``,
252
+ returns attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
253
+ :math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
254
+ :math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
255
+ head of shape :math:`(\text{num\_heads}, L, S)` when input is unbatched or :math:`(N, \text{num\_heads}, L, S)`.
256
+
257
+ .. note::
258
+ `batch_first` argument is ignored for unbatched inputs.
259
+ """
260
+ is_batched = query.dim() == 3
261
+ if key_padding_mask is not None:
262
+ _kpm_dtype = key_padding_mask.dtype
263
+ if _kpm_dtype != torch.bool and not torch.is_floating_point(
264
+ key_padding_mask
265
+ ):
266
+ raise AssertionError(
267
+ "only bool and floating types of key_padding_mask are supported"
268
+ )
269
+ why_not_fast_path = ""
270
+ if not is_batched:
271
+ why_not_fast_path = (
272
+ f"input not batched; expected query.dim() of 3 but got {query.dim()}"
273
+ )
274
+ elif query is not key or key is not value:
275
+ # When lifting this restriction, don't forget to either
276
+ # enforce that the dtypes all match or test cases where
277
+ # they don't!
278
+ why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
279
+ elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
280
+ why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
281
+ elif (
282
+ self.in_proj_weight is not None and query.dtype != self.in_proj_weight.dtype
283
+ ):
284
+ # this case will fail anyway, but at least they'll get a useful error message.
285
+ why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
286
+ elif self.training:
287
+ why_not_fast_path = "training is enabled"
288
+ elif not self.batch_first:
289
+ why_not_fast_path = "batch_first was not True"
290
+ elif self.bias_k is not None:
291
+ why_not_fast_path = "self.bias_k was not None"
292
+ elif self.bias_v is not None:
293
+ why_not_fast_path = "self.bias_v was not None"
294
+ elif self.dropout:
295
+ why_not_fast_path = f"dropout was {self.dropout}, required zero"
296
+ elif self.add_zero_attn:
297
+ why_not_fast_path = "add_zero_attn was enabled"
298
+ elif not self._qkv_same_embed_dim:
299
+ why_not_fast_path = "_qkv_same_embed_dim was not True"
300
+ elif attn_mask is not None:
301
+ why_not_fast_path = "attn_mask was not None"
302
+ elif query.is_nested and key_padding_mask is not None:
303
+ why_not_fast_path = (
304
+ "key_padding_mask is not supported with NestedTensor input"
305
+ )
306
+ elif self.num_heads % 2 == 1:
307
+ why_not_fast_path = "num_heads is odd"
308
+ elif torch.is_autocast_enabled():
309
+ why_not_fast_path = "autocast is enabled"
310
+
311
+ if not why_not_fast_path:
312
+ tensor_args = (
313
+ query,
314
+ key,
315
+ value,
316
+ self.in_proj_weight,
317
+ self.in_proj_bias,
318
+ self.out_proj.weight,
319
+ self.out_proj.bias,
320
+ )
321
+ # We have to use list comprehensions below because TorchScript does not support
322
+ # generator expressions.
323
+ if torch.overrides.has_torch_function(tensor_args):
324
+ why_not_fast_path = "some Tensor argument has_torch_function"
325
+ elif not all(
326
+ [
327
+ (x is None or x.is_cuda or "cpu" in str(x.device))
328
+ for x in tensor_args
329
+ ]
330
+ ):
331
+ why_not_fast_path = "some Tensor argument is neither CUDA nor CPU"
332
+ elif torch.is_grad_enabled() and any(
333
+ [x is not None and x.requires_grad for x in tensor_args]
334
+ ):
335
+ why_not_fast_path = (
336
+ "grad is enabled and at least one of query or the "
337
+ "input/output projection weights or biases requires_grad"
338
+ )
339
+ if not why_not_fast_path:
340
+ return torch._native_multi_head_attention(
341
+ query,
342
+ key,
343
+ value,
344
+ self.embed_dim,
345
+ self.num_heads,
346
+ self.in_proj_weight,
347
+ self.in_proj_bias,
348
+ self.out_proj.weight,
349
+ self.out_proj.bias,
350
+ key_padding_mask if key_padding_mask is not None else attn_mask,
351
+ need_weights,
352
+ average_attn_weights,
353
+ 1
354
+ if key_padding_mask is not None
355
+ else 0
356
+ if attn_mask is not None
357
+ else None,
358
+ )
359
+
360
+ any_nested = query.is_nested or key.is_nested or value.is_nested
361
+ assert not any_nested, (
362
+ "MultiheadAttention does not support NestedTensor outside of its fast path. "
363
+ + f"The fast path was not hit because {why_not_fast_path}"
364
+ )
365
+
366
+ if self.batch_first and is_batched:
367
+ # make sure that the transpose op does not affect the "is" property
368
+ if key is value:
369
+ if query is key:
370
+ query = key = value = query.transpose(1, 0)
371
+ else:
372
+ query, key = [x.transpose(1, 0) for x in (query, key)]
373
+ value = key
374
+ else:
375
+ query, key, value = [x.transpose(1, 0) for x in (query, key, value)]
376
+
377
+ if not self._qkv_same_embed_dim:
378
+ attn_output, attn_output_weights = F.multi_head_attention_forward(
379
+ query,
380
+ key,
381
+ value,
382
+ self.embed_dim,
383
+ self.num_heads,
384
+ self.in_proj_weight,
385
+ self.in_proj_bias,
386
+ self.bias_k,
387
+ self.bias_v,
388
+ self.add_zero_attn,
389
+ self.dropout,
390
+ self.out_proj.weight,
391
+ self.out_proj.bias,
392
+ training=self.training,
393
+ key_padding_mask=key_padding_mask,
394
+ need_weights=need_weights,
395
+ attn_mask=attn_mask,
396
+ use_separate_proj_weight=True,
397
+ q_proj_weight=self.q_proj_weight,
398
+ k_proj_weight=self.k_proj_weight,
399
+ v_proj_weight=self.v_proj_weight,
400
+ average_attn_weights=average_attn_weights,
401
+ cache=cache,
402
+ )
403
+ else:
404
+ attn_output, attn_output_weights = F.multi_head_attention_forward(
405
+ query,
406
+ key,
407
+ value,
408
+ self.embed_dim,
409
+ self.num_heads,
410
+ self.in_proj_weight,
411
+ self.in_proj_bias,
412
+ self.bias_k,
413
+ self.bias_v,
414
+ self.add_zero_attn,
415
+ self.dropout,
416
+ self.out_proj.weight,
417
+ self.out_proj.bias,
418
+ training=self.training,
419
+ key_padding_mask=key_padding_mask,
420
+ need_weights=need_weights,
421
+ attn_mask=attn_mask,
422
+ average_attn_weights=average_attn_weights,
423
+ cache=cache,
424
+ )
425
+ if self.batch_first and is_batched:
426
+ return attn_output.transpose(1, 0), attn_output_weights
427
+ else:
428
+ return attn_output, attn_output_weights
GPT_SoVITS/AR/modules/activation_onnx.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/activation.py
2
+ from typing import Optional
3
+ from typing import Tuple
4
+ import torch
5
+ from torch import Tensor
6
+ from torch.nn import Linear
7
+ from torch.nn import Module
8
+ from torch.nn.init import constant_
9
+ from torch.nn.init import xavier_normal_
10
+ from torch.nn.init import xavier_uniform_
11
+ from torch.nn.modules.linear import NonDynamicallyQuantizableLinear
12
+ from torch.nn.parameter import Parameter
13
+
14
+ from torch.nn import functional as F
15
+ from AR.modules.patched_mha_with_cache_onnx import multi_head_attention_forward_patched
16
+
17
+
18
+ class MultiheadAttention(Module):
19
+ __constants__ = ["batch_first"]
20
+ bias_k: Optional[torch.Tensor]
21
+ bias_v: Optional[torch.Tensor]
22
+
23
+ def __init__(
24
+ self,
25
+ embed_dim,
26
+ num_heads,
27
+ dropout=0.0,
28
+ bias=True,
29
+ add_bias_kv=False,
30
+ add_zero_attn=False,
31
+ kdim=None,
32
+ vdim=None,
33
+ batch_first=False,
34
+ linear1_cls=Linear,
35
+ linear2_cls=Linear,
36
+ device=None,
37
+ dtype=None,
38
+ ) -> None:
39
+ factory_kwargs = {"device": device, "dtype": dtype}
40
+ super(MultiheadAttention, self).__init__()
41
+ self.embed_dim = embed_dim
42
+ self.kdim = kdim if kdim is not None else embed_dim
43
+ self.vdim = vdim if vdim is not None else embed_dim
44
+ self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
45
+
46
+ self.num_heads = num_heads
47
+ self.dropout = dropout
48
+ self.batch_first = batch_first
49
+ self.head_dim = embed_dim // num_heads
50
+ assert (
51
+ self.head_dim * num_heads == self.embed_dim
52
+ ), "embed_dim must be divisible by num_heads"
53
+
54
+ if add_bias_kv:
55
+ self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
56
+ self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
57
+ else:
58
+ self.bias_k = self.bias_v = None
59
+
60
+ if linear1_cls == Linear:
61
+ if not self._qkv_same_embed_dim:
62
+ self.q_proj_weight = Parameter(
63
+ torch.empty((embed_dim, embed_dim), **factory_kwargs)
64
+ )
65
+ self.k_proj_weight = Parameter(
66
+ torch.empty((embed_dim, self.kdim), **factory_kwargs)
67
+ )
68
+ self.v_proj_weight = Parameter(
69
+ torch.empty((embed_dim, self.vdim), **factory_kwargs)
70
+ )
71
+ self.register_parameter("in_proj_weight", None)
72
+ else:
73
+ self.in_proj_weight = Parameter(
74
+ torch.empty((3 * embed_dim, embed_dim), **factory_kwargs)
75
+ )
76
+ self.register_parameter("q_proj_weight", None)
77
+ self.register_parameter("k_proj_weight", None)
78
+ self.register_parameter("v_proj_weight", None)
79
+
80
+ if bias:
81
+ self.in_proj_bias = Parameter(
82
+ torch.empty(3 * embed_dim, **factory_kwargs)
83
+ )
84
+ else:
85
+ self.register_parameter("in_proj_bias", None)
86
+ self.out_proj = NonDynamicallyQuantizableLinear(
87
+ embed_dim, embed_dim, bias=bias, **factory_kwargs
88
+ )
89
+
90
+ self._reset_parameters()
91
+ else:
92
+ if not self._qkv_same_embed_dim:
93
+ raise NotImplementedError
94
+ else:
95
+ self.in_proj_linear = linear1_cls(
96
+ embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs
97
+ )
98
+ self.in_proj_weight = self.in_proj_linear.weight
99
+
100
+ self.register_parameter("q_proj_weight", None)
101
+ self.register_parameter("k_proj_weight", None)
102
+ self.register_parameter("v_proj_weight", None)
103
+
104
+ if bias:
105
+ self.in_proj_bias = self.in_proj_linear.bias
106
+ else:
107
+ self.register_parameter("in_proj_bias", None)
108
+
109
+ self.out_proj = linear2_cls(
110
+ embed_dim, embed_dim, bias=bias, **factory_kwargs
111
+ )
112
+
113
+ if self.bias_k is not None:
114
+ xavier_normal_(self.bias_k)
115
+ if self.bias_v is not None:
116
+ xavier_normal_(self.bias_v)
117
+
118
+ self.add_zero_attn = add_zero_attn
119
+
120
+ def _reset_parameters(self):
121
+ if self._qkv_same_embed_dim:
122
+ xavier_uniform_(self.in_proj_weight)
123
+ else:
124
+ xavier_uniform_(self.q_proj_weight)
125
+ xavier_uniform_(self.k_proj_weight)
126
+ xavier_uniform_(self.v_proj_weight)
127
+
128
+ if self.in_proj_bias is not None:
129
+ constant_(self.in_proj_bias, 0.0)
130
+ constant_(self.out_proj.bias, 0.0)
131
+
132
+ if self.bias_k is not None:
133
+ xavier_normal_(self.bias_k)
134
+ if self.bias_v is not None:
135
+ xavier_normal_(self.bias_v)
136
+
137
+ def __setstate__(self, state):
138
+ # Support loading old MultiheadAttention checkpoints generated by v1.1.0
139
+ if "_qkv_same_embed_dim" not in state:
140
+ state["_qkv_same_embed_dim"] = True
141
+
142
+ super(MultiheadAttention, self).__setstate__(state)
143
+
144
+ def forward(
145
+ self,
146
+ query: Tensor,
147
+ key: Tensor,
148
+ value: Tensor,
149
+ key_padding_mask: Optional[Tensor] = None,
150
+ need_weights: bool = True,
151
+ attn_mask: Optional[Tensor] = None,
152
+ average_attn_weights: bool = True,
153
+ cache=None,
154
+ ) -> Tuple[Tensor, Optional[Tensor]]:
155
+ any_nested = query.is_nested or key.is_nested or value.is_nested
156
+ query = key = value = query.transpose(1, 0)
157
+ attn_output = multi_head_attention_forward_patched(
158
+ query,
159
+ key,
160
+ value,
161
+ self.embed_dim,
162
+ self.num_heads,
163
+ self.in_proj_weight,
164
+ self.in_proj_bias,
165
+ self.bias_k,
166
+ self.bias_v,
167
+ self.add_zero_attn,
168
+ self.dropout,
169
+ self.out_proj.weight,
170
+ self.out_proj.bias,
171
+ training=self.training,
172
+ key_padding_mask=key_padding_mask,
173
+ need_weights=need_weights,
174
+ attn_mask=attn_mask,
175
+ average_attn_weights=average_attn_weights,
176
+ cache=cache,
177
+ )
178
+ return attn_output.transpose(1, 0)
GPT_SoVITS/AR/modules/embedding.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/embedding.py
2
+ import math
3
+
4
+ import torch
5
+ from torch import nn
6
+
7
+
8
+ class TokenEmbedding(nn.Module):
9
+ def __init__(
10
+ self,
11
+ embedding_dim: int,
12
+ vocab_size: int,
13
+ dropout: float = 0.0,
14
+ ):
15
+ super().__init__()
16
+
17
+ self.vocab_size = vocab_size
18
+ self.embedding_dim = embedding_dim
19
+
20
+ self.dropout = torch.nn.Dropout(p=dropout)
21
+ self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim)
22
+
23
+ @property
24
+ def weight(self) -> torch.Tensor:
25
+ return self.word_embeddings.weight
26
+
27
+ def embedding(self, index: int) -> torch.Tensor:
28
+ return self.word_embeddings.weight[index : index + 1]
29
+
30
+ def forward(self, x: torch.Tensor):
31
+ x = self.word_embeddings(x)
32
+ x = self.dropout(x)
33
+ return x
34
+
35
+
36
+ class SinePositionalEmbedding(nn.Module):
37
+ def __init__(
38
+ self,
39
+ embedding_dim: int,
40
+ dropout: float = 0.0,
41
+ scale: bool = False,
42
+ alpha: bool = False,
43
+ ):
44
+ super().__init__()
45
+ self.embedding_dim = embedding_dim
46
+ self.x_scale = math.sqrt(embedding_dim) if scale else 1.0
47
+ self.alpha = nn.Parameter(torch.ones(1), requires_grad=alpha)
48
+ self.dropout = torch.nn.Dropout(p=dropout)
49
+
50
+ self.reverse = False
51
+ self.pe = None
52
+ self.extend_pe(torch.tensor(0.0).expand(1, 4000))
53
+
54
+ def extend_pe(self, x):
55
+ """Reset the positional encodings."""
56
+ if self.pe is not None:
57
+ if self.pe.size(1) >= x.size(1):
58
+ if self.pe.dtype != x.dtype or self.pe.device != x.device:
59
+ self.pe = self.pe.to(dtype=x.dtype, device=x.device)
60
+ return
61
+ pe = torch.zeros(x.size(1), self.embedding_dim)
62
+ if self.reverse:
63
+ position = torch.arange(
64
+ x.size(1) - 1, -1, -1.0, dtype=torch.float32
65
+ ).unsqueeze(1)
66
+ else:
67
+ position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
68
+ div_term = torch.exp(
69
+ torch.arange(0, self.embedding_dim, 2, dtype=torch.float32)
70
+ * -(math.log(10000.0) / self.embedding_dim)
71
+ )
72
+ pe[:, 0::2] = torch.sin(position * div_term)
73
+ pe[:, 1::2] = torch.cos(position * div_term)
74
+ pe = pe.unsqueeze(0)
75
+ self.pe = pe.to(device=x.device, dtype=x.dtype).detach()
76
+
77
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
78
+ self.extend_pe(x)
79
+ output = x.unsqueeze(-1) if x.ndim == 2 else x
80
+ output = output * self.x_scale + self.alpha * self.pe[:, : x.size(1)]
81
+ return self.dropout(output)
GPT_SoVITS/AR/modules/embedding_onnx.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/embedding.py
2
+ import math
3
+
4
+ import torch
5
+ from torch import nn
6
+
7
+
8
+ class TokenEmbedding(nn.Module):
9
+ def __init__(
10
+ self,
11
+ embedding_dim: int,
12
+ vocab_size: int,
13
+ dropout: float = 0.0,
14
+ ):
15
+ super().__init__()
16
+
17
+ self.vocab_size = vocab_size
18
+ self.embedding_dim = embedding_dim
19
+
20
+ self.dropout = torch.nn.Dropout(p=dropout)
21
+ self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim)
22
+
23
+ @property
24
+ def weight(self) -> torch.Tensor:
25
+ return self.word_embeddings.weight
26
+
27
+ def embedding(self, index: int) -> torch.Tensor:
28
+ return self.word_embeddings.weight[index : index + 1]
29
+
30
+ def forward(self, x: torch.Tensor):
31
+ x = self.word_embeddings(x)
32
+ x = self.dropout(x)
33
+ return x
34
+
35
+
36
+ class SinePositionalEmbedding(nn.Module):
37
+ def __init__(
38
+ self,
39
+ embedding_dim: int,
40
+ dropout: float = 0.0,
41
+ scale: bool = False,
42
+ alpha: bool = False,
43
+ ):
44
+ super().__init__()
45
+ self.embedding_dim = embedding_dim
46
+ self.x_scale = math.sqrt(embedding_dim) if scale else 1.0
47
+ self.alpha = nn.Parameter(torch.ones(1), requires_grad=alpha)
48
+ self.dropout = torch.nn.Dropout(p=dropout)
49
+ self.reverse = False
50
+ self.div_term = torch.exp(torch.arange(0, self.embedding_dim, 2) * -(math.log(10000.0) / self.embedding_dim))
51
+
52
+ def extend_pe(self, x):
53
+ position = torch.cumsum(torch.ones_like(x[:,:,0]), dim=1).transpose(0, 1)
54
+ scpe = (position * self.div_term).unsqueeze(0)
55
+ pe = torch.cat([torch.sin(scpe), torch.cos(scpe)]).permute(1, 2, 0)
56
+ pe = pe.contiguous().view(1, -1, self.embedding_dim)
57
+ return pe
58
+
59
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
60
+ pe = self.extend_pe(x)
61
+ output = x.unsqueeze(-1) if x.ndim == 2 else x
62
+ output = output * self.x_scale + self.alpha * pe
63
+ return self.dropout(output)
GPT_SoVITS/AR/modules/lr_schedulers.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/modules/lr_schedulers.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import math
4
+
5
+ import torch
6
+ from matplotlib import pyplot as plt
7
+ from torch import nn
8
+ from torch.optim import Adam
9
+
10
+
11
+ class WarmupCosineLRSchedule(torch.optim.lr_scheduler._LRScheduler):
12
+ """
13
+ Implements Warmup learning rate schedule until 'warmup_steps', going from 'init_lr' to 'peak_lr' for multiple optimizers.
14
+ """
15
+
16
+ def __init__(
17
+ self,
18
+ optimizer,
19
+ init_lr,
20
+ peak_lr,
21
+ end_lr,
22
+ warmup_steps=10000,
23
+ total_steps=400000,
24
+ current_step=0,
25
+ ):
26
+ self.init_lr = init_lr
27
+ self.peak_lr = peak_lr
28
+ self.end_lr = end_lr
29
+ self.optimizer = optimizer
30
+ self._warmup_rate = (peak_lr - init_lr) / warmup_steps
31
+ self._decay_rate = (end_lr - peak_lr) / (total_steps - warmup_steps)
32
+ self._current_step = current_step
33
+ self.lr = init_lr
34
+ self.warmup_steps = warmup_steps
35
+ self.total_steps = total_steps
36
+ self._last_lr = [self.lr]
37
+
38
+ def set_lr(self, lr):
39
+ self._last_lr = [g["lr"] for g in self.optimizer.param_groups]
40
+ for g in self.optimizer.param_groups:
41
+ # g['lr'] = lr
42
+ g["lr"] = self.end_lr ###锁定用线性
43
+
44
+ def step(self):
45
+ if self._current_step < self.warmup_steps:
46
+ lr = self.init_lr + self._warmup_rate * self._current_step
47
+
48
+ elif self._current_step > self.total_steps:
49
+ lr = self.end_lr
50
+
51
+ else:
52
+ decay_ratio = (self._current_step - self.warmup_steps) / (
53
+ self.total_steps - self.warmup_steps
54
+ )
55
+ if decay_ratio < 0.0 or decay_ratio > 1.0:
56
+ raise RuntimeError(
57
+ "Decay ratio must be in [0.0, 1.0]. Fix LR scheduler settings."
58
+ )
59
+ coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio))
60
+ lr = self.end_lr + coeff * (self.peak_lr - self.end_lr)
61
+
62
+ self.lr = lr = self.end_lr = 0.002 ###锁定用线性###不听话,直接锁定!
63
+ self.set_lr(lr)
64
+ self.lr = lr
65
+ self._current_step += 1
66
+ return self.lr
67
+
68
+
69
+ if __name__ == "__main__":
70
+ m = nn.Linear(10, 10)
71
+ opt = Adam(m.parameters(), lr=1e-4)
72
+ s = WarmupCosineLRSchedule(
73
+ opt, 1e-6, 2e-4, 1e-6, warmup_steps=2000, total_steps=20000, current_step=0
74
+ )
75
+ lrs = []
76
+ for i in range(25000):
77
+ s.step()
78
+ lrs.append(s.lr)
79
+ print(s.lr)
80
+
81
+ plt.plot(lrs)
82
+ plt.plot(range(0, 25000), lrs)
83
+ plt.show()
GPT_SoVITS/AR/modules/optim.py ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Xiaomi Corp. (authors: Daniel Povey)
2
+ #
3
+ # See ../LICENSE for clarification regarding multiple authors
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import contextlib
17
+ import logging
18
+ from collections import defaultdict
19
+ from typing import List
20
+ from typing import Tuple
21
+
22
+ import torch
23
+ from torch import Tensor
24
+ from torch.optim import Optimizer
25
+
26
+
27
+ class BatchedOptimizer(Optimizer):
28
+ """
29
+ This class adds to class Optimizer the capability to optimize parameters in batches:
30
+ it will stack the parameters and their grads for you so the optimizer can work
31
+ on tensors with an extra leading dimension. This is intended for speed with GPUs,
32
+ as it reduces the number of kernels launched in the optimizer.
33
+
34
+ Args:
35
+ params:
36
+ """
37
+
38
+ def __init__(self, params, defaults):
39
+ super(BatchedOptimizer, self).__init__(params, defaults)
40
+
41
+ @contextlib.contextmanager
42
+ def batched_params(self, param_group, group_params_names):
43
+ """
44
+ This function returns (technically, yields) a list of
45
+ of tuples (p, state), where
46
+ p is a `fake` parameter that is stacked (over axis 0) from real parameters
47
+ that share the same shape, and its gradient is also stacked;
48
+ `state` is the state corresponding to this batch of parameters
49
+ (it will be physically located in the "state" for one of the real
50
+ parameters, the last one that has any particular shape and dtype).
51
+
52
+ This function is decorated as a context manager so that it can
53
+ write parameters back to their "real" locations.
54
+
55
+ The idea is, instead of doing:
56
+ <code>
57
+ for p in group["params"]:
58
+ state = self.state[p]
59
+ ...
60
+ </code>
61
+ you can do:
62
+ <code>
63
+ with self.batched_params(group["params"]) as batches:
64
+ for p, state, p_names in batches:
65
+ ...
66
+ </code>
67
+
68
+ Args:
69
+ group: a parameter group, which is a list of parameters; should be
70
+ one of self.param_groups.
71
+ group_params_names: name for each parameter in group,
72
+ which is List[str].
73
+ """
74
+ batches = defaultdict(
75
+ list
76
+ ) # `batches` maps from tuple (dtype_as_str,*shape) to list of nn.Parameter
77
+ batches_names = defaultdict(
78
+ list
79
+ ) # `batches` maps from tuple (dtype_as_str,*shape) to list of str
80
+
81
+ assert len(param_group) == len(group_params_names)
82
+ for p, named_p in zip(param_group, group_params_names):
83
+ key = (str(p.dtype), *p.shape)
84
+ batches[key].append(p)
85
+ batches_names[key].append(named_p)
86
+
87
+ batches_names_keys = list(batches_names.keys())
88
+ sorted_idx = sorted(
89
+ range(len(batches_names)), key=lambda i: batches_names_keys[i])
90
+ batches_names = [
91
+ batches_names[batches_names_keys[idx]] for idx in sorted_idx
92
+ ]
93
+ batches = [batches[batches_names_keys[idx]] for idx in sorted_idx]
94
+
95
+ stacked_params_dict = dict()
96
+
97
+ # turn batches into a list, in deterministic order.
98
+ # tuples will contain tuples of (stacked_param, state, stacked_params_names),
99
+ # one for each batch in `batches`.
100
+ tuples = []
101
+
102
+ for batch, batch_names in zip(batches, batches_names):
103
+ p = batch[0]
104
+ # we arbitrarily store the state in the
105
+ # state corresponding to the 1st parameter in the
106
+ # group. class Optimizer will take care of saving/loading state.
107
+ state = self.state[p]
108
+ p_stacked = torch.stack(batch)
109
+ grad = torch.stack([
110
+ torch.zeros_like(p) if p.grad is None else p.grad for p in batch
111
+ ])
112
+ p_stacked.grad = grad
113
+ stacked_params_dict[key] = p_stacked
114
+ tuples.append((p_stacked, state, batch_names))
115
+
116
+ yield tuples # <-- calling code will do the actual optimization here!
117
+
118
+ for ((stacked_params, _state, _names), batch) in zip(tuples, batches):
119
+ for i, p in enumerate(batch): # batch is list of Parameter
120
+ p.copy_(stacked_params[i])
121
+
122
+
123
+ class ScaledAdam(BatchedOptimizer):
124
+ """
125
+ Implements 'Scaled Adam', a variant of Adam where we scale each parameter's update
126
+ proportional to the norm of that parameter; and also learn the scale of the parameter,
127
+ in log space, subject to upper and lower limits (as if we had factored each parameter as
128
+ param = underlying_param * log_scale.exp())
129
+
130
+
131
+ Args:
132
+ params: The parameters or param_groups to optimize (like other Optimizer subclasses)
133
+ lr: The learning rate. We will typically use a learning rate schedule that starts
134
+ at 0.03 and decreases over time, i.e. much higher than other common
135
+ optimizers.
136
+ clipping_scale: (e.g. 2.0)
137
+ A scale for gradient-clipping: if specified, the normalized gradients
138
+ over the whole model will be clipped to have 2-norm equal to
139
+ `clipping_scale` times the median 2-norm over the most recent period
140
+ of `clipping_update_period` minibatches. By "normalized gradients",
141
+ we mean after multiplying by the rms parameter value for this tensor
142
+ [for non-scalars]; this is appropriate because our update is scaled
143
+ by this quantity.
144
+ betas: beta1,beta2 are momentum constants for regular momentum, and moving sum-sq grad.
145
+ Must satisfy 0 < beta <= beta2 < 1.
146
+ scalar_lr_scale: A scaling factor on the learning rate, that we use to update the
147
+ scale of each parameter tensor and scalar parameters of the mode..
148
+ If each parameter were decomposed
149
+ as p * p_scale.exp(), where (p**2).mean().sqrt() == 1.0, scalar_lr_scale
150
+ would be a the scaling factor on the learning rate of p_scale.
151
+ eps: A general-purpose epsilon to prevent division by zero
152
+ param_min_rms: Minimum root-mean-square value of parameter tensor, for purposes of
153
+ learning the scale on the parameters (we'll constrain the rms of each non-scalar
154
+ parameter tensor to be >= this value)
155
+ param_max_rms: Maximum root-mean-square value of parameter tensor, for purposes of
156
+ learning the scale on the parameters (we'll constrain the rms of each non-scalar
157
+ parameter tensor to be <= this value)
158
+ scalar_max: Maximum absolute value for scalar parameters (applicable if your
159
+ model has any parameters with numel() == 1).
160
+ size_update_period: The periodicity, in steps, with which we update the size (scale)
161
+ of the parameter tensor. This is provided to save a little time
162
+ in the update.
163
+ clipping_update_period: if clipping_scale is specified, this is the period
164
+ """
165
+
166
+ def __init__(
167
+ self,
168
+ params,
169
+ lr=3e-02,
170
+ clipping_scale=None,
171
+ betas=(0.9, 0.98),
172
+ scalar_lr_scale=0.1,
173
+ eps=1.0e-08,
174
+ param_min_rms=1.0e-05,
175
+ param_max_rms=3.0,
176
+ scalar_max=10.0,
177
+ size_update_period=4,
178
+ clipping_update_period=100,
179
+ parameters_names=None,
180
+ show_dominant_parameters=True, ):
181
+
182
+ assert parameters_names is not None, (
183
+ "Please prepare parameters_names,"
184
+ "which is a List[List[str]]. Each List[str] is for a group"
185
+ "and each str is for a parameter")
186
+ defaults = dict(
187
+ lr=lr,
188
+ clipping_scale=clipping_scale,
189
+ betas=betas,
190
+ scalar_lr_scale=scalar_lr_scale,
191
+ eps=eps,
192
+ param_min_rms=param_min_rms,
193
+ param_max_rms=param_max_rms,
194
+ scalar_max=scalar_max,
195
+ size_update_period=size_update_period,
196
+ clipping_update_period=clipping_update_period, )
197
+
198
+ super(ScaledAdam, self).__init__(params, defaults)
199
+ assert len(self.param_groups) == len(parameters_names)
200
+ self.parameters_names = parameters_names
201
+ self.show_dominant_parameters = show_dominant_parameters
202
+
203
+ def __setstate__(self, state):
204
+ super(ScaledAdam, self).__setstate__(state)
205
+
206
+ @torch.no_grad()
207
+ def step(self, closure=None):
208
+ """Performs a single optimization step.
209
+
210
+ Arguments:
211
+ closure (callable, optional): A closure that reevaluates the model
212
+ and returns the loss.
213
+ """
214
+ loss = None
215
+ if closure is not None:
216
+ with torch.enable_grad():
217
+ loss = closure()
218
+
219
+ batch = True
220
+
221
+ for group, group_params_names in zip(self.param_groups,
222
+ self.parameters_names):
223
+
224
+ with self.batched_params(group["params"],
225
+ group_params_names) as batches:
226
+
227
+ # batches is list of pairs (stacked_param, state). stacked_param is like
228
+ # a regular parameter, and will have a .grad, but the 1st dim corresponds to
229
+ # a stacking dim, it is not a real dim.
230
+
231
+ if (len(batches[0][1]) ==
232
+ 0): # if len(first state) == 0: not yet initialized
233
+ clipping_scale = 1
234
+ else:
235
+ clipping_scale = self._get_clipping_scale(group, batches)
236
+
237
+ for p, state, _ in batches:
238
+ # Perform optimization step.
239
+ # grad is not going to be None, we handled that when creating the batches.
240
+ grad = p.grad
241
+ if grad.is_sparse:
242
+ raise RuntimeError(
243
+ "ScaledAdam optimizer does not support sparse gradients"
244
+ )
245
+ # State initialization
246
+ if len(state) == 0:
247
+ self._init_state(group, p, state)
248
+
249
+ self._step_one_batch(group, p, state, clipping_scale)
250
+
251
+ return loss
252
+
253
+ def _init_state(self, group: dict, p: Tensor, state: dict):
254
+ """
255
+ Initializes state dict for parameter 'p'. Assumes that dim 0 of tensor p
256
+ is actually the batch dimension, corresponding to batched-together
257
+ parameters of a given shape.
258
+
259
+
260
+ Args:
261
+ group: Dict to look up configuration values.
262
+ p: The parameter that we are initializing the state for
263
+ state: Dict from string to whatever state we are initializing
264
+ """
265
+ size_update_period = group["size_update_period"]
266
+
267
+ state["step"] = 0
268
+
269
+ kwargs = {"device": p.device, "dtype": p.dtype}
270
+
271
+ # 'delta' implements conventional momentum. There are
272
+ # several different kinds of update going on, so rather than
273
+ # compute "exp_avg" like in Adam, we store and decay a
274
+ # parameter-change "delta", which combines all forms of
275
+ # update. this is equivalent to how it's done in Adam,
276
+ # except for the first few steps.
277
+ state["delta"] = torch.zeros_like(
278
+ p, memory_format=torch.preserve_format)
279
+
280
+ batch_size = p.shape[0]
281
+ numel = p.numel() // batch_size
282
+ numel = p.numel()
283
+
284
+ if numel > 1:
285
+ # "param_rms" just periodically records the scalar root-mean-square value of
286
+ # the parameter tensor.
287
+ # it has a shape like (batch_size, 1, 1, 1, 1)
288
+ param_rms = (
289
+ (p**2).mean(dim=list(range(1, p.ndim)), keepdim=True).sqrt())
290
+ state["param_rms"] = param_rms
291
+
292
+ state["scale_exp_avg_sq"] = torch.zeros_like(param_rms)
293
+ state["scale_grads"] = torch.zeros(size_update_period,
294
+ *param_rms.shape, **kwargs)
295
+
296
+ # exp_avg_sq is the weighted sum of scaled gradients. as in Adam.
297
+ state["exp_avg_sq"] = torch.zeros_like(
298
+ p, memory_format=torch.preserve_format)
299
+
300
+ def _get_clipping_scale(self,
301
+ group: dict,
302
+ tuples: List[Tuple[Tensor, dict, List[str]]]
303
+ ) -> float:
304
+ """
305
+ Returns a scalar factor <= 1.0 that dictates gradient clipping, i.e. we will scale the gradients
306
+ by this amount before applying the rest of the update.
307
+
308
+ Args:
309
+ group: the parameter group, an item in self.param_groups
310
+ tuples: a list of tuples of (param, state, param_names)
311
+ where param is a batched set of parameters,
312
+ with a .grad (1st dim is batch dim)
313
+ and state is the state-dict where optimization parameters are kept.
314
+ param_names is a List[str] while each str is name for a parameter
315
+ in batched set of parameters "param".
316
+ """
317
+ assert len(tuples) >= 1
318
+ clipping_scale = group["clipping_scale"]
319
+ (first_p, first_state, _) = tuples[0]
320
+ step = first_state["step"]
321
+ if clipping_scale is None or step == 0:
322
+ # no clipping. return early on step == 0 because the other
323
+ # parameters' state won't have been initialized yet.
324
+ return 1.0
325
+ clipping_update_period = group["clipping_update_period"]
326
+
327
+ tot_sumsq = torch.tensor(0.0, device=first_p.device)
328
+ for (p, state, param_names) in tuples:
329
+ grad = p.grad
330
+ if grad.is_sparse:
331
+ raise RuntimeError(
332
+ "ScaledAdam optimizer does not support sparse gradients")
333
+ if p.numel() == p.shape[0]: # a batch of scalars
334
+ tot_sumsq += (grad**2).sum() # sum() to change shape [1] to []
335
+ else:
336
+ tot_sumsq += ((grad * state["param_rms"])**2).sum()
337
+
338
+ tot_norm = tot_sumsq.sqrt()
339
+ if "model_norms" not in first_state:
340
+ first_state["model_norms"] = torch.zeros(
341
+ clipping_update_period, device=p.device)
342
+ first_state["model_norms"][step % clipping_update_period] = tot_norm
343
+
344
+ if step % clipping_update_period == 0:
345
+ # Print some stats.
346
+ # We don't reach here if step == 0 because we would have returned
347
+ # above.
348
+ sorted_norms = first_state["model_norms"].sort()[0].to("cpu")
349
+ quartiles = []
350
+ for n in range(0, 5):
351
+ index = min(
352
+ clipping_update_period - 1,
353
+ (clipping_update_period // 4) * n, )
354
+ quartiles.append(sorted_norms[index].item())
355
+
356
+ median = quartiles[2]
357
+ threshold = clipping_scale * median
358
+ first_state["model_norm_threshold"] = threshold
359
+ percent_clipped = (first_state["num_clipped"] * 100.0 /
360
+ clipping_update_period
361
+ if "num_clipped" in first_state else 0.0)
362
+ first_state["num_clipped"] = 0
363
+ quartiles = " ".join(["%.3e" % x for x in quartiles])
364
+ logging.info(
365
+ f"Clipping_scale={clipping_scale}, grad-norm quartiles {quartiles}, "
366
+ f"threshold={threshold:.3e}, percent-clipped={percent_clipped:.1f}"
367
+ )
368
+
369
+ if step < clipping_update_period:
370
+ return 1.0 # We have not yet estimated a norm to clip to.
371
+ else:
372
+ try:
373
+ model_norm_threshold = first_state["model_norm_threshold"]
374
+ except KeyError:
375
+ logging.info(
376
+ "Warning: model_norm_threshold not in state: possibly "
377
+ "you changed config when restarting, adding clipping_scale option?"
378
+ )
379
+ return 1.0
380
+ ans = min(1.0, (model_norm_threshold / (tot_norm + 1.0e-20)).item())
381
+ if ans < 1.0:
382
+ first_state["num_clipped"] += 1
383
+ if ans < 0.1:
384
+ logging.warn(
385
+ f"Scaling gradients by {ans}, model_norm_threshold={model_norm_threshold}"
386
+ )
387
+ if self.show_dominant_parameters:
388
+ assert p.shape[0] == len(param_names)
389
+ self._show_gradient_dominating_parameter(tuples, tot_sumsq)
390
+ return ans
391
+
392
+ def _show_gradient_dominating_parameter(
393
+ self, tuples: List[Tuple[Tensor, dict, List[str]]],
394
+ tot_sumsq: Tensor):
395
+ """
396
+ Show information of parameter wihch dominanting tot_sumsq.
397
+
398
+ Args:
399
+ tuples: a list of tuples of (param, state, param_names)
400
+ where param is a batched set of parameters,
401
+ with a .grad (1st dim is batch dim)
402
+ and state is the state-dict where optimization parameters are kept.
403
+ param_names is a List[str] while each str is name for a parameter
404
+ in batched set of parameters "param".
405
+ tot_sumsq: sumsq of all parameters. Though it's could be calculated
406
+ from tuples, we still pass it to save some time.
407
+ """
408
+ all_sumsq_orig = {}
409
+ for (p, state, batch_param_names) in tuples:
410
+ # p is a stacked batch parameters.
411
+ batch_grad = p.grad
412
+ if p.numel() == p.shape[0]: # a batch of scalars
413
+ batch_sumsq_orig = batch_grad**2
414
+ # Dummpy values used by following `zip` statement.
415
+ batch_rms_orig = torch.ones(p.shape[0])
416
+ else:
417
+ batch_rms_orig = state["param_rms"]
418
+ batch_sumsq_orig = ((batch_grad * batch_rms_orig)**2).sum(
419
+ dim=list(range(1, batch_grad.ndim)))
420
+
421
+ for name, sumsq_orig, rms, grad in zip(batch_param_names,
422
+ batch_sumsq_orig,
423
+ batch_rms_orig, batch_grad):
424
+
425
+ proportion_orig = sumsq_orig / tot_sumsq
426
+ all_sumsq_orig[name] = (proportion_orig, sumsq_orig, rms, grad)
427
+
428
+ assert torch.isclose(
429
+ sum([value[0] for value in all_sumsq_orig.values()]).cpu(),
430
+ torch.tensor(1.0), )
431
+ sorted_by_proportion = {
432
+ k: v
433
+ for k, v in sorted(
434
+ all_sumsq_orig.items(),
435
+ key=lambda item: item[1][0],
436
+ reverse=True, )
437
+ }
438
+ dominant_param_name = next(iter(sorted_by_proportion))
439
+ (dominant_proportion, dominant_sumsq, dominant_rms,
440
+ dominant_grad, ) = sorted_by_proportion[dominant_param_name]
441
+ logging.info(f"Parameter Dominanting tot_sumsq {dominant_param_name}"
442
+ f" with proportion {dominant_proportion:.2f},"
443
+ f" where dominant_sumsq=(grad_sumsq*orig_rms_sq)"
444
+ f"={dominant_sumsq:.3e},"
445
+ f" grad_sumsq = {(dominant_grad**2).sum():.3e},"
446
+ f" orig_rms_sq={(dominant_rms**2).item():.3e}")
447
+
448
+ def _step_one_batch(self,
449
+ group: dict,
450
+ p: Tensor,
451
+ state: dict,
452
+ clipping_scale: float):
453
+ """
454
+ Do the step for one parameter, which is actually going to be a batch of
455
+ `real` parameters, with dim 0 as the batch dim.
456
+ Args:
457
+ group: dict to look up configuration values
458
+ p: parameter to update (actually multiple parameters stacked together
459
+ as a batch)
460
+ state: state-dict for p, to look up the optimizer state
461
+ """
462
+ lr = group["lr"]
463
+ size_update_period = group["size_update_period"]
464
+ beta1 = group["betas"][0]
465
+
466
+ grad = p.grad
467
+ if clipping_scale != 1.0:
468
+ grad = grad * clipping_scale
469
+ step = state["step"]
470
+ delta = state["delta"]
471
+
472
+ delta.mul_(beta1)
473
+ batch_size = p.shape[0]
474
+ numel = p.numel() // batch_size
475
+ if numel > 1:
476
+ # Update the size/scale of p, and set param_rms
477
+ scale_grads = state["scale_grads"]
478
+ scale_grads[step % size_update_period] = (p * grad).sum(
479
+ dim=list(range(1, p.ndim)), keepdim=True)
480
+ if step % size_update_period == size_update_period - 1:
481
+ param_rms = state["param_rms"] # shape: (batch_size, 1, 1, ..)
482
+ param_rms.copy_((p**2)
483
+ .mean(dim=list(range(1, p.ndim)), keepdim=True)
484
+ .sqrt())
485
+ if step > 0:
486
+ # self._size_update() learns the overall scale on the
487
+ # parameter, by shrinking or expanding it.
488
+ self._size_update(group, scale_grads, p, state)
489
+
490
+ if numel == 1:
491
+ # For parameters with 1 element we just use regular Adam.
492
+ # Updates delta.
493
+ self._step_scalar(group, p, state)
494
+ else:
495
+ self._step(group, p, state)
496
+
497
+ state["step"] = step + 1
498
+
499
+ def _size_update(self,
500
+ group: dict,
501
+ scale_grads: Tensor,
502
+ p: Tensor,
503
+ state: dict) -> None:
504
+ """
505
+ Called only where p.numel() > 1, this updates the scale of the parameter.
506
+ If we imagine: p = underlying_param * scale.exp(), and we are doing
507
+ gradient descent on underlying param and on scale, this function does the update
508
+ on `scale`.
509
+
510
+ Args:
511
+ group: dict to look up configuration values
512
+ scale_grads: a tensor of shape (size_update_period, batch_size, 1, 1,...) containing
513
+ grads w.r.t. the scales.
514
+ p: The parameter to update
515
+ state: The state-dict of p
516
+ """
517
+
518
+ param_rms = state["param_rms"]
519
+ beta1, beta2 = group["betas"]
520
+ size_lr = group["lr"] * group["scalar_lr_scale"]
521
+ param_min_rms = group["param_min_rms"]
522
+ param_max_rms = group["param_max_rms"]
523
+ eps = group["eps"]
524
+ step = state["step"]
525
+ batch_size = p.shape[0]
526
+
527
+ size_update_period = scale_grads.shape[0]
528
+ # correct beta2 for the size update period: we will have
529
+ # faster decay at this level.
530
+ beta2_corr = beta2**size_update_period
531
+
532
+ scale_exp_avg_sq = state[
533
+ "scale_exp_avg_sq"] # shape: (batch_size, 1, 1, ..)
534
+ scale_exp_avg_sq.mul_(beta2_corr).add_(
535
+ (scale_grads**2).mean(dim=0), # mean over dim `size_update_period`
536
+ alpha=1 - beta2_corr, ) # shape is (batch_size, 1, 1, ...)
537
+
538
+ # The 1st time we reach here is when size_step == 1.
539
+ size_step = (step + 1) // size_update_period
540
+ bias_correction2 = 1 - beta2_corr**size_step
541
+ # we don't bother with bias_correction1; this will help prevent divergence
542
+ # at the start of training.
543
+
544
+ denom = scale_exp_avg_sq.sqrt() + eps
545
+
546
+ scale_step = (-size_lr * (bias_correction2**0.5) *
547
+ scale_grads.sum(dim=0) / denom)
548
+
549
+ is_too_small = param_rms < param_min_rms
550
+ is_too_large = param_rms > param_max_rms
551
+
552
+ # when the param gets too small, just don't shrink it any further.
553
+ scale_step.masked_fill_(is_too_small, 0.0)
554
+ # when it gets too large, stop it from getting any larger.
555
+ scale_step.masked_fill_(is_too_large, -size_lr * size_update_period)
556
+ delta = state["delta"]
557
+ # the factor of (1-beta1) relates to momentum.
558
+ delta.add_(p * scale_step, alpha=(1 - beta1))
559
+
560
+ def _step(self, group: dict, p: Tensor, state: dict):
561
+ """
562
+ This function does the core update of self.step(), in the case where the members of
563
+ the batch have more than 1 element.
564
+
565
+ Args:
566
+ group: A dict which will be used to look up configuration values
567
+ p: The parameter to be updated
568
+ grad: The grad of p
569
+ state: The state-dict corresponding to parameter p
570
+
571
+ This function modifies p.
572
+ """
573
+ grad = p.grad
574
+ lr = group["lr"]
575
+ beta1, beta2 = group["betas"]
576
+ eps = group["eps"]
577
+ param_min_rms = group["param_min_rms"]
578
+ step = state["step"]
579
+
580
+ exp_avg_sq = state["exp_avg_sq"]
581
+ exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
582
+
583
+ this_step = state["step"] - (state["zero_step"]
584
+ if "zero_step" in state else 0)
585
+ bias_correction2 = 1 - beta2**(this_step + 1)
586
+ if bias_correction2 < 0.99:
587
+ # note: not in-place.
588
+ exp_avg_sq = exp_avg_sq * (1.0 / bias_correction2)
589
+
590
+ denom = exp_avg_sq.sqrt()
591
+ denom += eps
592
+ grad = grad / denom
593
+
594
+ alpha = -lr * (1 - beta1) * state["param_rms"].clamp(min=param_min_rms)
595
+
596
+ delta = state["delta"]
597
+ delta.add_(grad * alpha)
598
+ p.add_(delta)
599
+
600
+ def _step_scalar(self, group: dict, p: Tensor, state: dict):
601
+ """
602
+ A simplified form of the core update for scalar tensors, where we cannot get a good
603
+ estimate of the parameter rms.
604
+ """
605
+ beta1, beta2 = group["betas"]
606
+ scalar_max = group["scalar_max"]
607
+ eps = group["eps"]
608
+ lr = group["lr"] * group["scalar_lr_scale"]
609
+ grad = p.grad
610
+
611
+ exp_avg_sq = state["exp_avg_sq"] # shape: (batch_size,)
612
+ exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
613
+
614
+ # bias_correction2 is like in Adam. Don't bother with bias_correction1;
615
+ # slower update at the start will help stability anyway.
616
+ bias_correction2 = 1 - beta2**(state["step"] + 1)
617
+ denom = (exp_avg_sq / bias_correction2).sqrt() + eps
618
+
619
+ delta = state["delta"]
620
+ delta.add_(grad / denom, alpha=-lr * (1 - beta1))
621
+ p.clamp_(min=-scalar_max, max=scalar_max)
622
+ p.add_(delta)
GPT_SoVITS/AR/modules/patched_mha_with_cache.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.nn.functional import *
2
+ from torch.nn.functional import (
3
+ _mha_shape_check,
4
+ _canonical_mask,
5
+ _none_or_dtype,
6
+ _in_projection_packed,
7
+ )
8
+ from torch.nn import functional as F
9
+ import torch
10
+ # Tensor = torch.Tensor
11
+ # from typing import Callable, List, Optional, Tuple, Union
12
+
13
+
14
+ def multi_head_attention_forward_patched(
15
+ query: Tensor,
16
+ key: Tensor,
17
+ value: Tensor,
18
+ embed_dim_to_check: int,
19
+ num_heads: int,
20
+ in_proj_weight: Optional[Tensor],
21
+ in_proj_bias: Optional[Tensor],
22
+ bias_k: Optional[Tensor],
23
+ bias_v: Optional[Tensor],
24
+ add_zero_attn: bool,
25
+ dropout_p: float,
26
+ out_proj_weight: Tensor,
27
+ out_proj_bias: Optional[Tensor],
28
+ training: bool = True,
29
+ key_padding_mask: Optional[Tensor] = None,
30
+ need_weights: bool = True,
31
+ attn_mask: Optional[Tensor] = None,
32
+ use_separate_proj_weight: bool = False,
33
+ q_proj_weight: Optional[Tensor] = None,
34
+ k_proj_weight: Optional[Tensor] = None,
35
+ v_proj_weight: Optional[Tensor] = None,
36
+ static_k: Optional[Tensor] = None,
37
+ static_v: Optional[Tensor] = None,
38
+ average_attn_weights: bool = True,
39
+ is_causal: bool = False,
40
+ cache=None,
41
+ ) -> Tuple[Tensor, Optional[Tensor]]:
42
+ r"""
43
+ Args:
44
+ query, key, value: map a query and a set of key-value pairs to an output.
45
+ See "Attention Is All You Need" for more details.
46
+ embed_dim_to_check: total dimension of the model.
47
+ num_heads: parallel attention heads.
48
+ in_proj_weight, in_proj_bias: input projection weight and bias.
49
+ bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
50
+ add_zero_attn: add a new batch of zeros to the key and
51
+ value sequences at dim=1.
52
+ dropout_p: probability of an element to be zeroed.
53
+ out_proj_weight, out_proj_bias: the output projection weight and bias.
54
+ training: apply dropout if is ``True``.
55
+ key_padding_mask: if provided, specified padding elements in the key will
56
+ be ignored by the attention. This is an binary mask. When the value is True,
57
+ the corresponding value on the attention layer will be filled with -inf.
58
+ need_weights: output attn_output_weights.
59
+ Default: `True`
60
+ Note: `needs_weight` defaults to `True`, but should be set to `False`
61
+ For best performance when attention weights are not nedeeded.
62
+ *Setting needs_weights to `True`
63
+ leads to a significant performance degradation.*
64
+ attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
65
+ the batches while a 3D mask allows to specify a different mask for the entries of each batch.
66
+ is_causal: If specified, applies a causal mask as attention mask, and ignores
67
+ attn_mask for computing scaled dot product attention.
68
+ Default: ``False``.
69
+ .. warning::
70
+ is_causal is provides a hint that the attn_mask is the
71
+ causal mask.Providing incorrect hints can result in
72
+ incorrect execution, including forward and backward
73
+ compatibility.
74
+ use_separate_proj_weight: the function accept the proj. weights for query, key,
75
+ and value in different forms. If false, in_proj_weight will be used, which is
76
+ a combination of q_proj_weight, k_proj_weight, v_proj_weight.
77
+ q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
78
+ static_k, static_v: static key and value used for attention operators.
79
+ average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across heads.
80
+ Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an effect
81
+ when ``need_weights=True.``. Default: True
82
+
83
+
84
+ Shape:
85
+ Inputs:
86
+ - query: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
87
+ the embedding dimension.
88
+ - key: :math:`(S, E)` or :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
89
+ the embedding dimension.
90
+ - value: :math:`(S, E)` or :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
91
+ the embedding dimension.
92
+ - key_padding_mask: :math:`(S)` or :math:`(N, S)` where N is the batch size, S is the source sequence length.
93
+ If a FloatTensor is provided, it will be directly added to the value.
94
+ If a BoolTensor is provided, the positions with the
95
+ value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
96
+ - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
97
+ 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
98
+ S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
99
+ positions. If a BoolTensor is provided, positions with ``True``
100
+ are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
101
+ is provided, it will be added to the attention weight.
102
+ - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
103
+ N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
104
+ - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
105
+ N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
106
+
107
+ Outputs:
108
+ - attn_output: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
109
+ E is the embedding dimension.
110
+ - attn_output_weights: Only returned when ``need_weights=True``. If ``average_attn_weights=True``, returns
111
+ attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
112
+ :math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
113
+ :math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
114
+ head of shape :math:`(num_heads, L, S)` when input is unbatched or :math:`(N, num_heads, L, S)`.
115
+ """
116
+ tens_ops = (
117
+ query,
118
+ key,
119
+ value,
120
+ in_proj_weight,
121
+ in_proj_bias,
122
+ bias_k,
123
+ bias_v,
124
+ out_proj_weight,
125
+ out_proj_bias,
126
+ )
127
+ if has_torch_function(tens_ops):
128
+ return handle_torch_function(
129
+ multi_head_attention_forward,
130
+ tens_ops,
131
+ query,
132
+ key,
133
+ value,
134
+ embed_dim_to_check,
135
+ num_heads,
136
+ in_proj_weight,
137
+ in_proj_bias,
138
+ bias_k,
139
+ bias_v,
140
+ add_zero_attn,
141
+ dropout_p,
142
+ out_proj_weight,
143
+ out_proj_bias,
144
+ training=training,
145
+ key_padding_mask=key_padding_mask,
146
+ need_weights=need_weights,
147
+ attn_mask=attn_mask,
148
+ is_causal=is_causal,
149
+ use_separate_proj_weight=use_separate_proj_weight,
150
+ q_proj_weight=q_proj_weight,
151
+ k_proj_weight=k_proj_weight,
152
+ v_proj_weight=v_proj_weight,
153
+ static_k=static_k,
154
+ static_v=static_v,
155
+ average_attn_weights=average_attn_weights,
156
+ cache=cache,
157
+ )
158
+
159
+ is_batched = _mha_shape_check(
160
+ query, key, value, key_padding_mask, attn_mask, num_heads
161
+ )
162
+
163
+ # For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input
164
+ # is batched, run the computation and before returning squeeze the
165
+ # batch dimension so that the output doesn't carry this temporary batch dimension.
166
+ if not is_batched:
167
+ # unsqueeze if the input is unbatched
168
+ query = query.unsqueeze(1)
169
+ key = key.unsqueeze(1)
170
+ value = value.unsqueeze(1)
171
+ if key_padding_mask is not None:
172
+ key_padding_mask = key_padding_mask.unsqueeze(0)
173
+
174
+ # set up shape vars
175
+ tgt_len, bsz, embed_dim = query.shape
176
+ src_len, _, _ = key.shape
177
+
178
+ key_padding_mask = _canonical_mask(
179
+ mask=key_padding_mask,
180
+ mask_name="key_padding_mask",
181
+ other_type=_none_or_dtype(attn_mask),
182
+ other_name="attn_mask",
183
+ target_type=query.dtype,
184
+ )
185
+
186
+ if is_causal and attn_mask is None:
187
+ raise RuntimeError(
188
+ "Need attn_mask if specifying the is_causal hint. "
189
+ "You may use the Transformer module method "
190
+ "`generate_square_subsequent_mask` to create this mask."
191
+ )
192
+
193
+ if is_causal and key_padding_mask is None and not need_weights:
194
+ # when we have a kpm or need weights, we need attn_mask
195
+ # Otherwise, we use the is_causal hint go as is_causal
196
+ # indicator to SDPA.
197
+ attn_mask = None
198
+ else:
199
+ attn_mask = _canonical_mask(
200
+ mask=attn_mask,
201
+ mask_name="attn_mask",
202
+ other_type=None,
203
+ other_name="",
204
+ target_type=query.dtype,
205
+ check_other=False,
206
+ )
207
+
208
+ if key_padding_mask is not None:
209
+ # We have the attn_mask, and use that to merge kpm into it.
210
+ # Turn off use of is_causal hint, as the merged mask is no
211
+ # longer causal.
212
+ is_causal = False
213
+
214
+ assert (
215
+ embed_dim == embed_dim_to_check
216
+ ), f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
217
+ if isinstance(embed_dim, torch.Tensor):
218
+ # embed_dim can be a tensor when JIT tracing
219
+ head_dim = embed_dim.div(num_heads, rounding_mode="trunc")
220
+ else:
221
+ head_dim = embed_dim // num_heads
222
+ assert (
223
+ head_dim * num_heads == embed_dim
224
+ ), f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
225
+ if use_separate_proj_weight:
226
+ # allow MHA to have different embedding dimensions when separate projection weights are used
227
+ assert (
228
+ key.shape[:2] == value.shape[:2]
229
+ ), f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
230
+ else:
231
+ assert (
232
+ key.shape == value.shape
233
+ ), f"key shape {key.shape} does not match value shape {value.shape}"
234
+
235
+ #
236
+ # compute in-projection
237
+ #
238
+ if not use_separate_proj_weight:
239
+ assert (
240
+ in_proj_weight is not None
241
+ ), "use_separate_proj_weight is False but in_proj_weight is None"
242
+ q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
243
+ else:
244
+ assert (
245
+ q_proj_weight is not None
246
+ ), "use_separate_proj_weight is True but q_proj_weight is None"
247
+ assert (
248
+ k_proj_weight is not None
249
+ ), "use_separate_proj_weight is True but k_proj_weight is None"
250
+ assert (
251
+ v_proj_weight is not None
252
+ ), "use_separate_proj_weight is True but v_proj_weight is None"
253
+ if in_proj_bias is None:
254
+ b_q = b_k = b_v = None
255
+ else:
256
+ b_q, b_k, b_v = in_proj_bias.chunk(3)
257
+ q, k, v = _in_projection(
258
+ query,
259
+ key,
260
+ value,
261
+ q_proj_weight,
262
+ k_proj_weight,
263
+ v_proj_weight,
264
+ b_q,
265
+ b_k,
266
+ b_v,
267
+ )
268
+ if cache != None:
269
+ if cache["first_infer"] == 1:
270
+ cache["k"][cache["stage"]] = k
271
+ # print(0,cache["k"].shape)
272
+ cache["v"][cache["stage"]] = v
273
+ else: ###12个layer每个都要留自己的cache_kv
274
+ # print(1,cache["k"].shape)
275
+ cache["k"][cache["stage"]] = torch.cat(
276
+ [cache["k"][cache["stage"]], k], 0
277
+ ) ##本来时序是1,但是proj的时候可能transpose了所以时序到0维了
278
+ cache["v"][cache["stage"]] = torch.cat([cache["v"][cache["stage"]], v], 0)
279
+ # print(2, cache["k"].shape)
280
+ src_len = cache["k"][cache["stage"]].shape[0]
281
+ k = cache["k"][cache["stage"]]
282
+ v = cache["v"][cache["stage"]]
283
+ # if attn_mask is not None:
284
+ # attn_mask=attn_mask[-1:,]
285
+ # print(attn_mask.shape,attn_mask)
286
+ cache["stage"] = (cache["stage"] + 1) % cache["all_stage"]
287
+ # print(2333,cache)
288
+ # prep attention mask
289
+
290
+ attn_mask = _canonical_mask(
291
+ mask=attn_mask,
292
+ mask_name="attn_mask",
293
+ other_type=None,
294
+ other_name="",
295
+ target_type=q.dtype,
296
+ check_other=False,
297
+ )
298
+
299
+ if attn_mask is not None:
300
+ # ensure attn_mask's dim is 3
301
+ if attn_mask.dim() == 2:
302
+ correct_2d_size = (tgt_len, src_len)
303
+ if attn_mask.shape != correct_2d_size:
304
+ raise RuntimeError(
305
+ f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}."
306
+ )
307
+ attn_mask = attn_mask.unsqueeze(0)
308
+ elif attn_mask.dim() == 3:
309
+ correct_3d_size = (bsz * num_heads, tgt_len, src_len)
310
+ if attn_mask.shape != correct_3d_size:
311
+ raise RuntimeError(
312
+ f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}."
313
+ )
314
+ else:
315
+ raise RuntimeError(
316
+ f"attn_mask's dimension {attn_mask.dim()} is not supported"
317
+ )
318
+
319
+ # add bias along batch dimension (currently second)
320
+ if bias_k is not None and bias_v is not None:
321
+ assert static_k is None, "bias cannot be added to static key."
322
+ assert static_v is None, "bias cannot be added to static value."
323
+ k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
324
+ v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
325
+ if attn_mask is not None:
326
+ attn_mask = pad(attn_mask, (0, 1))
327
+ if key_padding_mask is not None:
328
+ key_padding_mask = pad(key_padding_mask, (0, 1))
329
+ else:
330
+ assert bias_k is None
331
+ assert bias_v is None
332
+
333
+ #
334
+ # reshape q, k, v for multihead attention and make em batch first
335
+ #
336
+ q = q.view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
337
+ if static_k is None:
338
+ k = k.view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
339
+ else:
340
+ # TODO finish disentangling control flow so we don't do in-projections when statics are passed
341
+ assert (
342
+ static_k.size(0) == bsz * num_heads
343
+ ), f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}"
344
+ assert (
345
+ static_k.size(2) == head_dim
346
+ ), f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
347
+ k = static_k
348
+ if static_v is None:
349
+ v = v.view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
350
+ else:
351
+ # TODO finish disentangling control flow so we don't do in-projections when statics are passed
352
+ assert (
353
+ static_v.size(0) == bsz * num_heads
354
+ ), f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}"
355
+ assert (
356
+ static_v.size(2) == head_dim
357
+ ), f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
358
+ v = static_v
359
+
360
+ # add zero attention along batch dimension (now first)
361
+ if add_zero_attn:
362
+ zero_attn_shape = (bsz * num_heads, 1, head_dim)
363
+ k = torch.cat(
364
+ [k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1
365
+ )
366
+ v = torch.cat(
367
+ [v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1
368
+ )
369
+ if attn_mask is not None:
370
+ attn_mask = pad(attn_mask, (0, 1))
371
+ if key_padding_mask is not None:
372
+ key_padding_mask = pad(key_padding_mask, (0, 1))
373
+
374
+ # update source sequence length after adjustments
375
+ src_len = k.size(1)
376
+
377
+ # merge key padding and attention masks
378
+ if key_padding_mask is not None:
379
+ assert key_padding_mask.shape == (
380
+ bsz,
381
+ src_len,
382
+ ), f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
383
+ key_padding_mask = (
384
+ key_padding_mask.view(bsz, 1, 1, src_len)
385
+ .expand(-1, num_heads, -1, -1)
386
+ .reshape(bsz * num_heads, 1, src_len)
387
+ )
388
+ if attn_mask is None:
389
+ attn_mask = key_padding_mask
390
+ else:
391
+ attn_mask = attn_mask + key_padding_mask
392
+
393
+ # adjust dropout probability
394
+ if not training:
395
+ dropout_p = 0.0
396
+
397
+ #
398
+ # (deep breath) calculate attention and out projection
399
+ #
400
+
401
+ if need_weights:
402
+ B, Nt, E = q.shape
403
+ q_scaled = q / math.sqrt(E)
404
+
405
+ assert not (
406
+ is_causal and attn_mask is None
407
+ ), "FIXME: is_causal not implemented for need_weights"
408
+
409
+ if attn_mask is not None:
410
+ attn_output_weights = torch.baddbmm(
411
+ attn_mask, q_scaled, k.transpose(-2, -1)
412
+ )
413
+ else:
414
+ attn_output_weights = torch.bmm(q_scaled, k.transpose(-2, -1))
415
+ attn_output_weights = softmax(attn_output_weights, dim=-1)
416
+ if dropout_p > 0.0:
417
+ attn_output_weights = dropout(attn_output_weights, p=dropout_p)
418
+
419
+ attn_output = torch.bmm(attn_output_weights, v)
420
+
421
+ attn_output = (
422
+ attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim)
423
+ )
424
+ attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
425
+ attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
426
+
427
+ # optionally average attention weights over heads
428
+ attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
429
+ if average_attn_weights:
430
+ attn_output_weights = attn_output_weights.mean(dim=1)
431
+
432
+ if not is_batched:
433
+ # squeeze the output if input was unbatched
434
+ attn_output = attn_output.squeeze(1)
435
+ attn_output_weights = attn_output_weights.squeeze(0)
436
+ return attn_output, attn_output_weights
437
+ else:
438
+ # attn_mask can be either (L,S) or (N*num_heads, L, S)
439
+ # if attn_mask's shape is (1, L, S) we need to unsqueeze to (1, 1, L, S)
440
+ # in order to match the input for SDPA of (N, num_heads, L, S)
441
+ if attn_mask is not None:
442
+ if attn_mask.size(0) == 1 and attn_mask.dim() == 3:
443
+ attn_mask = attn_mask.unsqueeze(0)
444
+ else:
445
+ attn_mask = attn_mask.view(bsz, num_heads, -1, src_len)
446
+
447
+ q = q.view(bsz, num_heads, tgt_len, head_dim)
448
+ k = k.view(bsz, num_heads, src_len, head_dim)
449
+ v = v.view(bsz, num_heads, src_len, head_dim)
450
+
451
+ # with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True):
452
+ attn_output = scaled_dot_product_attention(
453
+ q, k, v, attn_mask, dropout_p, is_causal
454
+ )
455
+
456
+ attn_output = (
457
+ attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim)
458
+ )
459
+
460
+ attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
461
+ attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
462
+ if not is_batched:
463
+ # squeeze the output if input was unbatched
464
+ attn_output = attn_output.squeeze(1)
465
+ return attn_output, None
GPT_SoVITS/AR/modules/patched_mha_with_cache_onnx.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.nn.functional import *
2
+ from torch.nn.functional import (
3
+ _mha_shape_check,
4
+ _canonical_mask,
5
+ _none_or_dtype,
6
+ _in_projection_packed,
7
+ )
8
+
9
+ def multi_head_attention_forward_patched(
10
+ query,
11
+ key,
12
+ value,
13
+ embed_dim_to_check: int,
14
+ num_heads: int,
15
+ in_proj_weight,
16
+ in_proj_bias: Optional[Tensor],
17
+ bias_k: Optional[Tensor],
18
+ bias_v: Optional[Tensor],
19
+ add_zero_attn: bool,
20
+ dropout_p: float,
21
+ out_proj_weight: Tensor,
22
+ out_proj_bias: Optional[Tensor],
23
+ training: bool = True,
24
+ key_padding_mask: Optional[Tensor] = None,
25
+ need_weights: bool = True,
26
+ attn_mask: Optional[Tensor] = None,
27
+ use_separate_proj_weight: bool = False,
28
+ q_proj_weight: Optional[Tensor] = None,
29
+ k_proj_weight: Optional[Tensor] = None,
30
+ v_proj_weight: Optional[Tensor] = None,
31
+ static_k: Optional[Tensor] = None,
32
+ static_v: Optional[Tensor] = None,
33
+ average_attn_weights: bool = True,
34
+ is_causal: bool = False,
35
+ cache=None,
36
+ ) -> Tuple[Tensor, Optional[Tensor]]:
37
+
38
+ # set up shape vars
39
+ _, _, embed_dim = query.shape
40
+ attn_mask = _canonical_mask(
41
+ mask=attn_mask,
42
+ mask_name="attn_mask",
43
+ other_type=None,
44
+ other_name="",
45
+ target_type=query.dtype,
46
+ check_other=False,
47
+ )
48
+ head_dim = embed_dim // num_heads
49
+
50
+ proj_qkv = linear(query, in_proj_weight, in_proj_bias)
51
+ proj_qkv = proj_qkv.unflatten(-1, (3, query.size(-1))).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
52
+ q, k, v = proj_qkv[0], proj_qkv[1], proj_qkv[2]
53
+
54
+ if cache["first_infer"] == 1:
55
+ cache["k"][cache["stage"]] = k
56
+ cache["v"][cache["stage"]] = v
57
+ else:
58
+ cache["k"][cache["stage"]] = torch.cat([cache["k"][cache["stage"]][:-1], k], 0)
59
+ cache["v"][cache["stage"]] = torch.cat([cache["v"][cache["stage"]][:-1], v], 0)
60
+ k = cache["k"][cache["stage"]]
61
+ v = cache["v"][cache["stage"]]
62
+ cache["stage"] = (cache["stage"] + 1) % cache["all_stage"]
63
+
64
+ attn_mask = _canonical_mask(
65
+ mask=attn_mask,
66
+ mask_name="attn_mask",
67
+ other_type=None,
68
+ other_name="",
69
+ target_type=q.dtype,
70
+ check_other=False,
71
+ )
72
+ attn_mask = attn_mask.unsqueeze(0)
73
+
74
+ q = q.view(-1, num_heads, head_dim).transpose(0, 1)
75
+ k = k.view(-1, num_heads, head_dim).transpose(0, 1)
76
+ v = v.view(-1, num_heads, head_dim).transpose(0, 1)
77
+
78
+ dropout_p = 0.0
79
+ attn_mask = attn_mask.unsqueeze(0)
80
+ q = q.view(num_heads, -1, head_dim).unsqueeze(0)
81
+ k = k.view(num_heads, -1, head_dim).unsqueeze(0)
82
+ v = v.view(num_heads, -1, head_dim).unsqueeze(0)
83
+ attn_output = scaled_dot_product_attention(
84
+ q, k, v, attn_mask, dropout_p, is_causal
85
+ )
86
+ attn_output = (
87
+ attn_output.permute(2, 0, 1, 3).contiguous().view(-1, embed_dim)
88
+ )
89
+ attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
90
+ attn_output = attn_output.view(-1, 1, attn_output.size(1))
91
+
92
+ return attn_output
GPT_SoVITS/AR/modules/scaling.py ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Xiaomi Corp. (authors: Daniel Povey)
2
+ #
3
+ # See ../../../../LICENSE for clarification regarding multiple authors
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import logging
17
+ import math
18
+ import random
19
+ from typing import Optional
20
+ from typing import Tuple
21
+ from typing import Union
22
+
23
+ import torch
24
+ import torch.nn as nn
25
+ from torch import Tensor
26
+
27
+
28
+ class DoubleSwishFunction(torch.autograd.Function):
29
+ """
30
+ double_swish(x) = x * torch.sigmoid(x-1)
31
+ This is a definition, originally motivated by its close numerical
32
+ similarity to swish(swish(x)), where swish(x) = x * sigmoid(x).
33
+
34
+ Memory-efficient derivative computation:
35
+ double_swish(x) = x * s, where s(x) = torch.sigmoid(x-1)
36
+ double_swish'(x) = d/dx double_swish(x) = x * s'(x) + x' * s(x) = x * s'(x) + s(x).
37
+ Now, s'(x) = s(x) * (1-s(x)).
38
+ double_swish'(x) = x * s'(x) + s(x).
39
+ = x * s(x) * (1-s(x)) + s(x).
40
+ = double_swish(x) * (1-s(x)) + s(x)
41
+ ... so we just need to remember s(x) but not x itself.
42
+ """
43
+
44
+ @staticmethod
45
+ def forward(ctx, x: Tensor) -> Tensor:
46
+ requires_grad = x.requires_grad
47
+ x_dtype = x.dtype
48
+ if x.dtype == torch.float16:
49
+ x = x.to(torch.float32)
50
+
51
+ s = torch.sigmoid(x - 1.0)
52
+ y = x * s
53
+
54
+ if requires_grad:
55
+ deriv = y * (1 - s) + s
56
+ # notes on derivative of x * sigmoid(x - 1):
57
+ # https://www.wolframalpha.com/input?i=d%2Fdx+%28x+*+sigmoid%28x-1%29%29
58
+ # min \simeq -0.043638. Take floor as -0.043637 so it's a lower bund
59
+ # max \simeq 1.1990. Take ceil to be 1.2 so it's an upper bound.
60
+ # the combination of "+ torch.rand_like(deriv)" and casting to torch.uint8 (which
61
+ # floors), should be expectation-preserving.
62
+ floor = -0.043637
63
+ ceil = 1.2
64
+ d_scaled = (deriv - floor) * (255.0 / (ceil - floor)) + torch.rand_like(
65
+ deriv
66
+ )
67
+ if __name__ == "__main__":
68
+ # for self-testing only.
69
+ assert d_scaled.min() >= 0.0
70
+ assert d_scaled.max() < 256.0
71
+ d_int = d_scaled.to(torch.uint8)
72
+ ctx.save_for_backward(d_int)
73
+ if x.dtype == torch.float16 or torch.is_autocast_enabled():
74
+ y = y.to(torch.float16)
75
+ return y
76
+
77
+ @staticmethod
78
+ def backward(ctx, y_grad: Tensor) -> Tensor:
79
+ (d,) = ctx.saved_tensors
80
+ # the same constants as used in forward pass.
81
+ floor = -0.043637
82
+ ceil = 1.2
83
+ d = d * ((ceil - floor) / 255.0) + floor
84
+ return y_grad * d
85
+
86
+
87
+ class DoubleSwish(torch.nn.Module):
88
+ def forward(self, x: Tensor) -> Tensor:
89
+ """Return double-swish activation function which is an approximation to Swish(Swish(x)),
90
+ that we approximate closely with x * sigmoid(x-1).
91
+ """
92
+ if torch.jit.is_scripting() or torch.jit.is_tracing():
93
+ return x * torch.sigmoid(x - 1.0)
94
+ return DoubleSwishFunction.apply(x)
95
+
96
+
97
+ class ActivationBalancerFunction(torch.autograd.Function):
98
+ @staticmethod
99
+ def forward(
100
+ ctx,
101
+ x: Tensor,
102
+ scale_factor: Tensor,
103
+ sign_factor: Optional[Tensor],
104
+ channel_dim: int,
105
+ ) -> Tensor:
106
+ if channel_dim < 0:
107
+ channel_dim += x.ndim
108
+ ctx.channel_dim = channel_dim
109
+ xgt0 = x > 0
110
+ if sign_factor is None:
111
+ ctx.save_for_backward(xgt0, scale_factor)
112
+ else:
113
+ ctx.save_for_backward(xgt0, scale_factor, sign_factor)
114
+ return x
115
+
116
+ @staticmethod
117
+ def backward(ctx, x_grad: Tensor) -> Tuple[Tensor, None, None, None]:
118
+ if len(ctx.saved_tensors) == 3:
119
+ xgt0, scale_factor, sign_factor = ctx.saved_tensors
120
+ for _ in range(ctx.channel_dim, x_grad.ndim - 1):
121
+ scale_factor = scale_factor.unsqueeze(-1)
122
+ sign_factor = sign_factor.unsqueeze(-1)
123
+ factor = sign_factor + scale_factor * (xgt0.to(x_grad.dtype) - 0.5)
124
+ else:
125
+ xgt0, scale_factor = ctx.saved_tensors
126
+ for _ in range(ctx.channel_dim, x_grad.ndim - 1):
127
+ scale_factor = scale_factor.unsqueeze(-1)
128
+ factor = scale_factor * (xgt0.to(x_grad.dtype) - 0.5)
129
+ neg_delta_grad = x_grad.abs() * factor
130
+ return (
131
+ x_grad - neg_delta_grad,
132
+ None,
133
+ None,
134
+ None,
135
+ )
136
+
137
+
138
+ def _compute_scale_factor(
139
+ x: Tensor,
140
+ channel_dim: int,
141
+ min_abs: float,
142
+ max_abs: float,
143
+ gain_factor: float,
144
+ max_factor: float,
145
+ ) -> Tensor:
146
+ if channel_dim < 0:
147
+ channel_dim += x.ndim
148
+ sum_dims = [d for d in range(x.ndim) if d != channel_dim]
149
+ x_abs_mean = torch.mean(x.abs(), dim=sum_dims).to(torch.float32)
150
+
151
+ if min_abs == 0.0:
152
+ below_threshold = 0.0
153
+ else:
154
+ # below_threshold is 0 if x_abs_mean > min_abs, can be at most max_factor if
155
+ # x_abs)_mean , min_abs.
156
+ below_threshold = ((min_abs - x_abs_mean) * (gain_factor / min_abs)).clamp(
157
+ min=0, max=max_factor
158
+ )
159
+
160
+ above_threshold = ((x_abs_mean - max_abs) * (gain_factor / max_abs)).clamp(
161
+ min=0, max=max_factor
162
+ )
163
+
164
+ return below_threshold - above_threshold
165
+
166
+
167
+ def _compute_sign_factor(
168
+ x: Tensor,
169
+ channel_dim: int,
170
+ min_positive: float,
171
+ max_positive: float,
172
+ gain_factor: float,
173
+ max_factor: float,
174
+ ) -> Tensor:
175
+ if channel_dim < 0:
176
+ channel_dim += x.ndim
177
+ sum_dims = [d for d in range(x.ndim) if d != channel_dim]
178
+ proportion_positive = torch.mean((x > 0).to(torch.float32), dim=sum_dims)
179
+ if min_positive == 0.0:
180
+ factor1 = 0.0
181
+ else:
182
+ # 0 if proportion_positive >= min_positive, else can be
183
+ # as large as max_factor.
184
+ factor1 = (
185
+ (min_positive - proportion_positive) * (gain_factor / min_positive)
186
+ ).clamp_(min=0, max=max_factor)
187
+
188
+ if max_positive == 1.0:
189
+ factor2 = 0.0
190
+ else:
191
+ # 0 if self.proportion_positive <= max_positive, else can be
192
+ # as large as -max_factor.
193
+ factor2 = (
194
+ (proportion_positive - max_positive) * (gain_factor / (1.0 - max_positive))
195
+ ).clamp_(min=0, max=max_factor)
196
+ sign_factor = factor1 - factor2
197
+ # require min_positive != 0 or max_positive != 1:
198
+ assert not isinstance(sign_factor, float)
199
+ return sign_factor
200
+
201
+
202
+ class ActivationBalancer(torch.nn.Module):
203
+ """
204
+ Modifies the backpropped derivatives of a function to try to encourage, for
205
+ each channel, that it is positive at least a proportion `threshold` of the
206
+ time. It does this by multiplying negative derivative values by up to
207
+ (1+max_factor), and positive derivative values by up to (1-max_factor),
208
+ interpolated from 1 at the threshold to those extremal values when none
209
+ of the inputs are positive.
210
+
211
+ Args:
212
+ num_channels: the number of channels
213
+ channel_dim: the dimension/axis corresponding to the channel, e.g.
214
+ -1, 0, 1, 2; will be interpreted as an offset from x.ndim if negative.
215
+ min_positive: the minimum, per channel, of the proportion of the time
216
+ that (x > 0), below which we start to modify the derivatives.
217
+ max_positive: the maximum, per channel, of the proportion of the time
218
+ that (x > 0), above which we start to modify the derivatives.
219
+ max_factor: the maximum factor by which we modify the derivatives for
220
+ either the sign constraint or the magnitude constraint;
221
+ e.g. with max_factor=0.02, the the derivatives would be multiplied by
222
+ values in the range [0.98..1.02].
223
+ sign_gain_factor: determines the 'gain' with which we increase the
224
+ change in gradient once the constraints on min_positive and max_positive
225
+ are violated.
226
+ scale_gain_factor: determines the 'gain' with which we increase the
227
+ change in gradient once the constraints on min_abs and max_abs
228
+ are violated.
229
+ min_abs: the minimum average-absolute-value difference from the mean
230
+ value per channel, which we allow, before we start to modify
231
+ the derivatives to prevent this.
232
+ max_abs: the maximum average-absolute-value difference from the mean
233
+ value per channel, which we allow, before we start to modify
234
+ the derivatives to prevent this.
235
+ min_prob: determines the minimum probability with which we modify the
236
+ gradients for the {min,max}_positive and {min,max}_abs constraints,
237
+ on each forward(). This is done randomly to prevent all layers
238
+ from doing it at the same time. Early in training we may use
239
+ higher probabilities than this; it will decay to this value.
240
+ """
241
+
242
+ def __init__(
243
+ self,
244
+ num_channels: int,
245
+ channel_dim: int,
246
+ min_positive: float = 0.05,
247
+ max_positive: float = 0.95,
248
+ max_factor: float = 0.04,
249
+ sign_gain_factor: float = 0.01,
250
+ scale_gain_factor: float = 0.02,
251
+ min_abs: float = 0.2,
252
+ max_abs: float = 100.0,
253
+ min_prob: float = 0.1,
254
+ ):
255
+ super(ActivationBalancer, self).__init__()
256
+ self.num_channels = num_channels
257
+ self.channel_dim = channel_dim
258
+ self.min_positive = min_positive
259
+ self.max_positive = max_positive
260
+ self.max_factor = max_factor
261
+ self.min_abs = min_abs
262
+ self.max_abs = max_abs
263
+ self.min_prob = min_prob
264
+ self.sign_gain_factor = sign_gain_factor
265
+ self.scale_gain_factor = scale_gain_factor
266
+
267
+ # count measures how many times the forward() function has been called.
268
+ # We occasionally sync this to a tensor called `count`, that exists to
269
+ # make sure it is synced to disk when we load and save the model.
270
+ self.cpu_count = 0
271
+ self.register_buffer("count", torch.tensor(0, dtype=torch.int64))
272
+
273
+ def forward(self, x: Tensor) -> Tensor:
274
+ if torch.jit.is_scripting() or not x.requires_grad or torch.jit.is_tracing():
275
+ return _no_op(x)
276
+
277
+ count = self.cpu_count
278
+ self.cpu_count += 1
279
+
280
+ if random.random() < 0.01:
281
+ # Occasionally sync self.cpu_count with self.count.
282
+ # count affects the decay of 'prob'. don't do this on every iter,
283
+ # because syncing with the GPU is slow.
284
+ self.cpu_count = max(self.cpu_count, self.count.item())
285
+ self.count.fill_(self.cpu_count)
286
+
287
+ # the prob of doing some work exponentially decreases from 0.5 till it hits
288
+ # a floor at min_prob (==0.1, by default)
289
+ prob = max(self.min_prob, 0.5 ** (1 + (count / 4000.0)))
290
+
291
+ if random.random() < prob:
292
+ sign_gain_factor = 0.5
293
+ if self.min_positive != 0.0 or self.max_positive != 1.0:
294
+ sign_factor = _compute_sign_factor(
295
+ x,
296
+ self.channel_dim,
297
+ self.min_positive,
298
+ self.max_positive,
299
+ gain_factor=self.sign_gain_factor / prob,
300
+ max_factor=self.max_factor,
301
+ )
302
+ else:
303
+ sign_factor = None
304
+
305
+ scale_factor = _compute_scale_factor(
306
+ x.detach(),
307
+ self.channel_dim,
308
+ min_abs=self.min_abs,
309
+ max_abs=self.max_abs,
310
+ gain_factor=self.scale_gain_factor / prob,
311
+ max_factor=self.max_factor,
312
+ )
313
+ return ActivationBalancerFunction.apply(
314
+ x,
315
+ scale_factor,
316
+ sign_factor,
317
+ self.channel_dim,
318
+ )
319
+ else:
320
+ return _no_op(x)
321
+
322
+
323
+ def BalancedDoubleSwish(
324
+ d_model, channel_dim=-1, max_abs=10.0, min_prob=0.25
325
+ ) -> nn.Sequential:
326
+ """
327
+ ActivationBalancer -> DoubleSwish
328
+ """
329
+ balancer = ActivationBalancer(
330
+ d_model, channel_dim=channel_dim, max_abs=max_abs, min_prob=min_prob
331
+ )
332
+ return nn.Sequential(
333
+ balancer,
334
+ DoubleSwish(),
335
+ )
GPT_SoVITS/AR/modules/transformer.py ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/transformer.py
2
+ import copy
3
+ import numbers
4
+ from functools import partial
5
+ from typing import Any
6
+ from typing import Callable
7
+ from typing import List
8
+ from typing import Optional
9
+ from typing import Tuple
10
+ from typing import Union
11
+
12
+ import torch
13
+ from AR.modules.activation import MultiheadAttention
14
+ from AR.modules.scaling import BalancedDoubleSwish
15
+ from torch import nn
16
+ from torch import Tensor
17
+ from torch.nn import functional as F
18
+
19
+ _shape_t = Union[int, List[int], torch.Size]
20
+
21
+
22
+ class LayerNorm(nn.Module):
23
+ __constants__ = ["normalized_shape", "eps", "elementwise_affine"]
24
+ normalized_shape: Tuple[int, ...]
25
+ eps: float
26
+ elementwise_affine: bool
27
+
28
+ def __init__(
29
+ self,
30
+ normalized_shape: _shape_t,
31
+ eps: float = 1e-5,
32
+ elementwise_affine: bool = True,
33
+ device=None,
34
+ dtype=None,
35
+ ) -> None:
36
+ factory_kwargs = {"device": device, "dtype": dtype}
37
+ super(LayerNorm, self).__init__()
38
+ if isinstance(normalized_shape, numbers.Integral):
39
+ # mypy error: incompatible types in assignment
40
+ normalized_shape = (normalized_shape,) # type: ignore[assignment]
41
+ self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type]
42
+ self.eps = eps
43
+ self.elementwise_affine = elementwise_affine
44
+ if self.elementwise_affine:
45
+ self.weight = nn.Parameter(
46
+ torch.empty(self.normalized_shape, **factory_kwargs)
47
+ )
48
+ self.bias = nn.Parameter(
49
+ torch.empty(self.normalized_shape, **factory_kwargs)
50
+ )
51
+ else:
52
+ self.register_parameter("weight", None)
53
+ self.register_parameter("bias", None)
54
+
55
+ self.reset_parameters()
56
+
57
+ def reset_parameters(self) -> None:
58
+ if self.elementwise_affine:
59
+ nn.init.ones_(self.weight)
60
+ nn.init.zeros_(self.bias)
61
+
62
+ def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
63
+ if isinstance(input, tuple):
64
+ input, embedding = input
65
+ return (
66
+ F.layer_norm(
67
+ input,
68
+ self.normalized_shape,
69
+ self.weight,
70
+ self.bias,
71
+ self.eps,
72
+ ),
73
+ embedding,
74
+ )
75
+
76
+ assert embedding is None
77
+ return F.layer_norm(
78
+ input, self.normalized_shape, self.weight, self.bias, self.eps
79
+ )
80
+
81
+ def extra_repr(self) -> str:
82
+ return (
83
+ "{normalized_shape}, eps={eps}, "
84
+ "elementwise_affine={elementwise_affine}".format(**self.__dict__)
85
+ )
86
+
87
+
88
+ class IdentityNorm(nn.Module):
89
+ def __init__(
90
+ self,
91
+ d_model: int,
92
+ eps: float = 1e-5,
93
+ device=None,
94
+ dtype=None,
95
+ ) -> None:
96
+ super(IdentityNorm, self).__init__()
97
+
98
+ def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
99
+ if isinstance(input, tuple):
100
+ return input
101
+
102
+ assert embedding is None
103
+ return input
104
+
105
+
106
+ class TransformerEncoder(nn.Module):
107
+ r"""TransformerEncoder is a stack of N encoder layers. Users can build the
108
+ BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters.
109
+
110
+ Args:
111
+ encoder_layer: an instance of the TransformerEncoderLayer() class (required).
112
+ num_layers: the number of sub-encoder-layers in the encoder (required).
113
+ norm: the layer normalization component (optional).
114
+ enable_nested_tensor: if True, input will automatically convert to nested tensor
115
+ (and convert back on output). This will improve the overall performance of
116
+ TransformerEncoder when padding rate is high. Default: ``True`` (enabled).
117
+
118
+ Examples::
119
+ >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8)
120
+ >>> transformer_encoder = TransformerEncoder(encoder_layer, num_layers=6)
121
+ >>> src = torch.rand(10, 32, 512)
122
+ >>> out = transformer_encoder(src)
123
+ """
124
+ __constants__ = ["norm"]
125
+
126
+ def __init__(self, encoder_layer, num_layers, norm=None):
127
+ super(TransformerEncoder, self).__init__()
128
+ self.layers = _get_clones(encoder_layer, num_layers)
129
+ self.num_layers = num_layers
130
+ self.norm = norm
131
+
132
+ def forward(
133
+ self,
134
+ src: Tensor,
135
+ mask: Optional[Tensor] = None,
136
+ src_key_padding_mask: Optional[Tensor] = None,
137
+ return_layer_states: bool = False,
138
+ cache=None,
139
+ ) -> Tensor:
140
+ r"""Pass the input through the encoder layers in turn.
141
+
142
+ Args:
143
+ src: the sequence to the encoder (required).
144
+ mask: the mask for the src sequence (optional).
145
+ src_key_padding_mask: the mask for the src keys per batch (optional).
146
+ return_layer_states: return layers' state (optional).
147
+
148
+ Shape:
149
+ see the docs in Transformer class.
150
+ """
151
+ if return_layer_states:
152
+ layer_states = [] # layers' output
153
+ output = src
154
+ for mod in self.layers:
155
+ output = mod(
156
+ output,
157
+ src_mask=mask,
158
+ src_key_padding_mask=src_key_padding_mask,
159
+ cache=cache,
160
+ )
161
+ layer_states.append(output[0])
162
+
163
+ if self.norm is not None:
164
+ output = self.norm(output)
165
+
166
+ return layer_states, output
167
+
168
+ output = src
169
+ for mod in self.layers:
170
+ output = mod(
171
+ output,
172
+ src_mask=mask,
173
+ src_key_padding_mask=src_key_padding_mask,
174
+ cache=cache,
175
+ )
176
+
177
+ if self.norm is not None:
178
+ output = self.norm(output)
179
+
180
+ return output
181
+
182
+
183
+ class TransformerEncoderLayer(nn.Module):
184
+ __constants__ = ["batch_first", "norm_first"]
185
+
186
+ def __init__(
187
+ self,
188
+ d_model: int,
189
+ nhead: int,
190
+ dim_feedforward: int = 2048,
191
+ dropout: float = 0.1,
192
+ activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
193
+ batch_first: bool = False,
194
+ norm_first: bool = False,
195
+ device=None,
196
+ dtype=None,
197
+ linear1_self_attention_cls: nn.Module = nn.Linear,
198
+ linear2_self_attention_cls: nn.Module = nn.Linear,
199
+ linear1_feedforward_cls: nn.Module = nn.Linear,
200
+ linear2_feedforward_cls: nn.Module = nn.Linear,
201
+ layer_norm_cls: nn.Module = LayerNorm,
202
+ layer_norm_eps: float = 1e-5,
203
+ adaptive_layer_norm=False,
204
+ ) -> None:
205
+ factory_kwargs = {"device": device, "dtype": dtype}
206
+ super(TransformerEncoderLayer, self).__init__()
207
+ # print(233333333333,d_model,nhead)
208
+ # import os
209
+ # os._exit(2333333)
210
+ self.self_attn = MultiheadAttention(
211
+ d_model, # 512 16
212
+ nhead,
213
+ dropout=dropout,
214
+ batch_first=batch_first,
215
+ linear1_cls=linear1_self_attention_cls,
216
+ linear2_cls=linear2_self_attention_cls,
217
+ **factory_kwargs,
218
+ )
219
+
220
+ # Implementation of Feedforward model
221
+ self.linear1 = linear1_feedforward_cls(
222
+ d_model, dim_feedforward, **factory_kwargs
223
+ )
224
+ self.dropout = nn.Dropout(dropout)
225
+ self.linear2 = linear2_feedforward_cls(
226
+ dim_feedforward, d_model, **factory_kwargs
227
+ )
228
+
229
+ self.norm_first = norm_first
230
+ self.dropout1 = nn.Dropout(dropout)
231
+ self.dropout2 = nn.Dropout(dropout)
232
+
233
+ # Legacy string support for activation function.
234
+ if isinstance(activation, str):
235
+ activation = _get_activation_fn(activation)
236
+ elif isinstance(activation, partial):
237
+ activation = activation(d_model)
238
+ elif activation == BalancedDoubleSwish:
239
+ activation = BalancedDoubleSwish(d_model)
240
+
241
+ # # We can't test self.activation in forward() in TorchScript,
242
+ # # so stash some information about it instead.
243
+ # if activation is F.relu or isinstance(activation, torch.nn.ReLU):
244
+ # self.activation_relu_or_gelu = 1
245
+ # elif activation is F.gelu or isinstance(activation, torch.nn.GELU):
246
+ # self.activation_relu_or_gelu = 2
247
+ # else:
248
+ # self.activation_relu_or_gelu = 0
249
+ self.activation = activation
250
+
251
+ norm1 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
252
+ if layer_norm_cls == IdentityNorm:
253
+ norm2 = BalancedBasicNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
254
+ else:
255
+ norm2 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
256
+
257
+ if adaptive_layer_norm:
258
+ self.norm1 = AdaptiveLayerNorm(d_model, norm1)
259
+ self.norm2 = AdaptiveLayerNorm(d_model, norm2)
260
+ else:
261
+ self.norm1 = norm1
262
+ self.norm2 = norm2
263
+
264
+ def __setstate__(self, state):
265
+ super(TransformerEncoderLayer, self).__setstate__(state)
266
+ if not hasattr(self, "activation"):
267
+ self.activation = F.relu
268
+
269
+ def forward(
270
+ self,
271
+ src: Tensor,
272
+ src_mask: Optional[Tensor] = None,
273
+ src_key_padding_mask: Optional[Tensor] = None,
274
+ cache=None,
275
+ ) -> Tensor:
276
+ r"""Pass the input through the encoder layer.
277
+
278
+ Args:
279
+ src: the sequence to the encoder layer (required).
280
+ src_mask: the mask for the src sequence (optional).
281
+ src_key_padding_mask: the mask for the src keys per batch (optional).
282
+
283
+ Shape:
284
+ see the docs in Transformer class.
285
+ """
286
+ x, stage_embedding = src, None
287
+ is_src_tuple = False
288
+ if isinstance(src, tuple):
289
+ x, stage_embedding = src
290
+ is_src_tuple = True
291
+
292
+ if src_key_padding_mask is not None:
293
+ _skpm_dtype = src_key_padding_mask.dtype
294
+ if _skpm_dtype != torch.bool and not torch.is_floating_point(
295
+ src_key_padding_mask
296
+ ):
297
+ raise AssertionError(
298
+ "only bool and floating types of key_padding_mask are supported"
299
+ )
300
+
301
+ if self.norm_first:
302
+ x = x + self._sa_block(
303
+ self.norm1(x, stage_embedding),
304
+ src_mask,
305
+ src_key_padding_mask,
306
+ cache=cache,
307
+ )
308
+ x = x + self._ff_block(self.norm2(x, stage_embedding))
309
+ else:
310
+ x = self.norm1(
311
+ x + self._sa_block(x, src_mask, src_key_padding_mask, cache=cache),
312
+ stage_embedding,
313
+ )
314
+ x = self.norm2(x + self._ff_block(x), stage_embedding)
315
+
316
+ if is_src_tuple:
317
+ return (x, stage_embedding)
318
+ return x
319
+
320
+ # self-attention block
321
+ def _sa_block(
322
+ self,
323
+ x: Tensor,
324
+ attn_mask: Optional[Tensor],
325
+ key_padding_mask: Optional[Tensor],
326
+ cache=None,
327
+ ) -> Tensor:
328
+ # print(x.shape,attn_mask.shape,key_padding_mask)
329
+ # torch.Size([1, 188, 512]) torch.Size([188, 188]) None
330
+ # import os
331
+ # os._exit(23333)
332
+ x = self.self_attn(
333
+ x,
334
+ x,
335
+ x,
336
+ attn_mask=attn_mask,
337
+ key_padding_mask=key_padding_mask,
338
+ need_weights=False,
339
+ cache=cache,
340
+ )[0]
341
+ return self.dropout1(x)
342
+
343
+ # feed forward block
344
+ def _ff_block(self, x: Tensor) -> Tensor:
345
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
346
+ return self.dropout2(x)
347
+
348
+
349
+ class AdaptiveLayerNorm(nn.Module):
350
+ r"""Adaptive Layer Normalization"""
351
+
352
+ def __init__(self, d_model, norm) -> None:
353
+ super(AdaptiveLayerNorm, self).__init__()
354
+ self.project_layer = nn.Linear(d_model, 2 * d_model)
355
+ self.norm = norm
356
+ self.d_model = d_model
357
+ self.eps = self.norm.eps
358
+
359
+ def forward(self, input: Tensor, embedding: Tensor = None) -> Tensor:
360
+ if isinstance(input, tuple):
361
+ input, embedding = input
362
+ weight, bias = torch.split(
363
+ self.project_layer(embedding),
364
+ split_size_or_sections=self.d_model,
365
+ dim=-1,
366
+ )
367
+ return (weight * self.norm(input) + bias, embedding)
368
+
369
+ weight, bias = torch.split(
370
+ self.project_layer(embedding),
371
+ split_size_or_sections=self.d_model,
372
+ dim=-1,
373
+ )
374
+ return weight * self.norm(input) + bias
375
+
376
+
377
+ def _get_clones(module, N):
378
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
GPT_SoVITS/AR/modules/transformer_onnx.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/transformer.py
2
+ import copy
3
+ import numbers
4
+ from functools import partial
5
+ from typing import Any
6
+ from typing import Callable
7
+ from typing import List
8
+ from typing import Optional
9
+ from typing import Tuple
10
+ from typing import Union
11
+
12
+ import torch
13
+ from AR.modules.activation_onnx import MultiheadAttention
14
+ from AR.modules.scaling import BalancedDoubleSwish
15
+ from torch import nn
16
+ from torch import Tensor
17
+ from torch.nn import functional as F
18
+
19
+ _shape_t = Union[int, List[int], torch.Size]
20
+
21
+
22
+ class LayerNorm(nn.Module):
23
+ __constants__ = ["normalized_shape", "eps", "elementwise_affine"]
24
+ normalized_shape: Tuple[int, ...]
25
+ eps: float
26
+ elementwise_affine: bool
27
+
28
+ def __init__(
29
+ self,
30
+ normalized_shape: _shape_t,
31
+ eps: float = 1e-5,
32
+ elementwise_affine: bool = True,
33
+ device=None,
34
+ dtype=None,
35
+ ) -> None:
36
+ factory_kwargs = {"device": device, "dtype": dtype}
37
+ super(LayerNorm, self).__init__()
38
+ if isinstance(normalized_shape, numbers.Integral):
39
+ # mypy error: incompatible types in assignment
40
+ normalized_shape = (normalized_shape,) # type: ignore[assignment]
41
+ self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type]
42
+ self.eps = eps
43
+ self.elementwise_affine = elementwise_affine
44
+ if self.elementwise_affine:
45
+ self.weight = nn.Parameter(
46
+ torch.empty(self.normalized_shape, **factory_kwargs)
47
+ )
48
+ self.bias = nn.Parameter(
49
+ torch.empty(self.normalized_shape, **factory_kwargs)
50
+ )
51
+ else:
52
+ self.register_parameter("weight", None)
53
+ self.register_parameter("bias", None)
54
+
55
+ self.reset_parameters()
56
+
57
+ def reset_parameters(self) -> None:
58
+ if self.elementwise_affine:
59
+ nn.init.ones_(self.weight)
60
+ nn.init.zeros_(self.bias)
61
+
62
+ def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
63
+ if isinstance(input, tuple):
64
+ input, embedding = input
65
+ return (
66
+ F.layer_norm(
67
+ input,
68
+ self.normalized_shape,
69
+ self.weight,
70
+ self.bias,
71
+ self.eps,
72
+ ),
73
+ embedding,
74
+ )
75
+
76
+ assert embedding is None
77
+ return F.layer_norm(
78
+ input, self.normalized_shape, self.weight, self.bias, self.eps
79
+ )
80
+
81
+ def extra_repr(self) -> str:
82
+ return (
83
+ "{normalized_shape}, eps={eps}, "
84
+ "elementwise_affine={elementwise_affine}".format(**self.__dict__)
85
+ )
86
+
87
+
88
+ class IdentityNorm(nn.Module):
89
+ def __init__(
90
+ self,
91
+ d_model: int,
92
+ eps: float = 1e-5,
93
+ device=None,
94
+ dtype=None,
95
+ ) -> None:
96
+ super(IdentityNorm, self).__init__()
97
+
98
+ def forward(self, input: Tensor, embedding: Any = None) -> Tensor:
99
+ if isinstance(input, tuple):
100
+ return input
101
+
102
+ assert embedding is None
103
+ return input
104
+
105
+
106
+ class TransformerEncoder(nn.Module):
107
+ r"""TransformerEncoder is a stack of N encoder layers. Users can build the
108
+ BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters.
109
+
110
+ Args:
111
+ encoder_layer: an instance of the TransformerEncoderLayer() class (required).
112
+ num_layers: the number of sub-encoder-layers in the encoder (required).
113
+ norm: the layer normalization component (optional).
114
+ enable_nested_tensor: if True, input will automatically convert to nested tensor
115
+ (and convert back on output). This will improve the overall performance of
116
+ TransformerEncoder when padding rate is high. Default: ``True`` (enabled).
117
+
118
+ Examples::
119
+ >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8)
120
+ >>> transformer_encoder = TransformerEncoder(encoder_layer, num_layers=6)
121
+ >>> src = torch.rand(10, 32, 512)
122
+ >>> out = transformer_encoder(src)
123
+ """
124
+ __constants__ = ["norm"]
125
+
126
+ def __init__(self, encoder_layer, num_layers, norm=None):
127
+ super(TransformerEncoder, self).__init__()
128
+ self.layers = _get_clones(encoder_layer, num_layers)
129
+ self.num_layers = num_layers
130
+ self.norm = norm
131
+
132
+ def forward(
133
+ self,
134
+ src: Tensor,
135
+ mask: Optional[Tensor] = None,
136
+ src_key_padding_mask: Optional[Tensor] = None,
137
+ return_layer_states: bool = False,
138
+ cache=None,
139
+ ) -> Tensor:
140
+ output = src
141
+ for mod in self.layers:
142
+ output = mod(
143
+ output,
144
+ src_mask=mask,
145
+ src_key_padding_mask=src_key_padding_mask,
146
+ cache=cache,
147
+ )
148
+
149
+ if self.norm is not None:
150
+ output = self.norm(output)
151
+
152
+ return output
153
+
154
+
155
+ class TransformerEncoderLayer(nn.Module):
156
+ __constants__ = ["batch_first", "norm_first"]
157
+ def __init__(
158
+ self,
159
+ d_model: int,
160
+ nhead: int,
161
+ dim_feedforward: int = 2048,
162
+ dropout: float = 0.1,
163
+ activation: Union[str, Callable[[Tensor], Tensor]] = F.relu,
164
+ batch_first: bool = False,
165
+ norm_first: bool = False,
166
+ device=None,
167
+ dtype=None,
168
+ linear1_self_attention_cls: nn.Module = nn.Linear,
169
+ linear2_self_attention_cls: nn.Module = nn.Linear,
170
+ linear1_feedforward_cls: nn.Module = nn.Linear,
171
+ linear2_feedforward_cls: nn.Module = nn.Linear,
172
+ layer_norm_cls: nn.Module = LayerNorm,
173
+ layer_norm_eps: float = 1e-5,
174
+ adaptive_layer_norm=False,
175
+ ) -> None:
176
+ factory_kwargs = {"device": device, "dtype": dtype}
177
+ super(TransformerEncoderLayer, self).__init__()
178
+ self.self_attn = MultiheadAttention(
179
+ d_model, # 512 16
180
+ nhead,
181
+ dropout=dropout,
182
+ batch_first=batch_first,
183
+ linear1_cls=linear1_self_attention_cls,
184
+ linear2_cls=linear2_self_attention_cls,
185
+ **factory_kwargs,
186
+ )
187
+ self.linear1 = linear1_feedforward_cls(
188
+ d_model, dim_feedforward, **factory_kwargs
189
+ )
190
+ self.dropout = nn.Dropout(dropout)
191
+ self.linear2 = linear2_feedforward_cls(
192
+ dim_feedforward, d_model, **factory_kwargs
193
+ )
194
+ self.norm_first = norm_first
195
+ self.dropout1 = nn.Dropout(dropout)
196
+ self.dropout2 = nn.Dropout(dropout)
197
+ if isinstance(activation, str):
198
+ activation = _get_activation_fn(activation)
199
+ elif isinstance(activation, partial):
200
+ activation = activation(d_model)
201
+ elif activation == BalancedDoubleSwish:
202
+ activation = BalancedDoubleSwish(d_model)
203
+ self.activation = activation
204
+
205
+ norm1 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
206
+ if layer_norm_cls == IdentityNorm:
207
+ norm2 = BalancedBasicNorm(d_model, eps=layer_norm_eps, **factory_kwargs)
208
+ else:
209
+ norm2 = layer_norm_cls(d_model, eps=layer_norm_eps, **factory_kwargs)
210
+
211
+ if adaptive_layer_norm:
212
+ self.norm1 = AdaptiveLayerNorm(d_model, norm1)
213
+ self.norm2 = AdaptiveLayerNorm(d_model, norm2)
214
+ else:
215
+ self.norm1 = norm1
216
+ self.norm2 = norm2
217
+
218
+ def __setstate__(self, state):
219
+ super(TransformerEncoderLayer, self).__setstate__(state)
220
+ if not hasattr(self, "activation"):
221
+ self.activation = F.relu
222
+
223
+ def forward(
224
+ self,
225
+ src: Tensor,
226
+ src_mask: Optional[Tensor] = None,
227
+ src_key_padding_mask: Optional[Tensor] = None,
228
+ cache=None,
229
+ ) -> Tensor:
230
+ x = src
231
+ stage_embedding = None
232
+ x = self.norm1(
233
+ x + self._sa_block(x, src_mask, src_key_padding_mask, cache=cache),
234
+ stage_embedding,
235
+ )
236
+ x = self.norm2(x + self._ff_block(x), stage_embedding)
237
+
238
+ return x
239
+
240
+ def _sa_block(
241
+ self,
242
+ x: Tensor,
243
+ attn_mask: Optional[Tensor],
244
+ key_padding_mask: Optional[Tensor],
245
+ cache=None,
246
+ ) -> Tensor:
247
+ x = self.self_attn(
248
+ x,
249
+ x,
250
+ x,
251
+ attn_mask=attn_mask,
252
+ key_padding_mask=key_padding_mask,
253
+ need_weights=False,
254
+ cache=cache,
255
+ )
256
+ return self.dropout1(x)
257
+
258
+ def _ff_block(self, x: Tensor) -> Tensor:
259
+ x = self.linear2(self.dropout(self.activation(self.linear1(x))))
260
+ return self.dropout2(x)
261
+
262
+
263
+ class AdaptiveLayerNorm(nn.Module):
264
+ r"""Adaptive Layer Normalization"""
265
+
266
+ def __init__(self, d_model, norm) -> None:
267
+ super(AdaptiveLayerNorm, self).__init__()
268
+ self.project_layer = nn.Linear(d_model, 2 * d_model)
269
+ self.norm = norm
270
+ self.d_model = d_model
271
+ self.eps = self.norm.eps
272
+
273
+ def forward(self, input: Tensor, embedding: Tensor = None) -> Tensor:
274
+ if isinstance(input, tuple):
275
+ input, embedding = input
276
+ weight, bias = torch.split(
277
+ self.project_layer(embedding),
278
+ split_size_or_sections=self.d_model,
279
+ dim=-1,
280
+ )
281
+ return (weight * self.norm(input) + bias, embedding)
282
+
283
+ weight, bias = torch.split(
284
+ self.project_layer(embedding),
285
+ split_size_or_sections=self.d_model,
286
+ dim=-1,
287
+ )
288
+ return weight * self.norm(input) + bias
289
+
290
+
291
+ def _get_clones(module, N):
292
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
GPT_SoVITS/AR/text_processing/__init__.py ADDED
File without changes
GPT_SoVITS/AR/text_processing/phonemizer.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/text_processing/phonemizer.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ import itertools
4
+ import re
5
+ from typing import Dict
6
+ from typing import List
7
+
8
+ import regex
9
+ from gruut import sentences
10
+ from gruut.const import Sentence
11
+ from gruut.const import Word
12
+ from AR.text_processing.symbols import SYMBOL_TO_ID
13
+
14
+
15
+ class GruutPhonemizer:
16
+ def __init__(self, language: str):
17
+ self._phonemizer = sentences
18
+ self.lang = language
19
+ self.symbol_to_id = SYMBOL_TO_ID
20
+ self._special_cases_dict: Dict[str] = {
21
+ r"\.\.\.": "... ",
22
+ ";": "; ",
23
+ ":": ": ",
24
+ ",": ", ",
25
+ r"\.": ". ",
26
+ "!": "! ",
27
+ r"\?": "? ",
28
+ "—": "—",
29
+ "…": "… ",
30
+ "«": "«",
31
+ "»": "»",
32
+ }
33
+ self._punctuation_regexp: str = (
34
+ rf"([{''.join(self._special_cases_dict.keys())}])"
35
+ )
36
+
37
+ def _normalize_punctuation(self, text: str) -> str:
38
+ text = regex.sub(rf"\pZ+{self._punctuation_regexp}", r"\1", text)
39
+ text = regex.sub(rf"{self._punctuation_regexp}(\pL)", r"\1 \2", text)
40
+ text = regex.sub(r"\pZ+", r" ", text)
41
+ return text.strip()
42
+
43
+ def _convert_punctuation(self, word: Word) -> str:
44
+ if not word.phonemes:
45
+ return ""
46
+ if word.phonemes[0] in ["‖", "|"]:
47
+ return word.text.strip()
48
+
49
+ phonemes = "".join(word.phonemes)
50
+ # remove modifier characters ˈˌː with regex
51
+ phonemes = re.sub(r"[ˈˌː͡]", "", phonemes)
52
+ return phonemes.strip()
53
+
54
+ def phonemize(self, text: str, espeak: bool = False) -> str:
55
+ text_to_phonemize: str = self._normalize_punctuation(text)
56
+ sents: List[Sentence] = [
57
+ sent
58
+ for sent in self._phonemizer(text_to_phonemize, lang="en-us", espeak=espeak)
59
+ ]
60
+ words: List[str] = [
61
+ self._convert_punctuation(word) for word in itertools.chain(*sents)
62
+ ]
63
+ return " ".join(words)
64
+
65
+ def transform(self, phonemes):
66
+ # convert phonemes to ids
67
+ # dictionary is in symbols.py
68
+ return [self.symbol_to_id[p] for p in phonemes if p in self.symbol_to_id.keys()]
69
+
70
+
71
+ if __name__ == "__main__":
72
+ phonemizer = GruutPhonemizer("en-us")
73
+ # text -> IPA
74
+ phonemes = phonemizer.phonemize("Hello, wor-ld ?")
75
+ print("phonemes:", phonemes)
76
+ print("len(phonemes):", len(phonemes))
77
+ phoneme_ids = phonemizer.transform(phonemes)
78
+ print("phoneme_ids:", phoneme_ids)
79
+ print("len(phoneme_ids):", len(phoneme_ids))
GPT_SoVITS/AR/text_processing/symbols.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/yangdongchao/SoundStorm/blob/master/soundstorm/s1/AR/text_processing/symbols.py
2
+ # reference: https://github.com/lifeiteng/vall-e
3
+ PAD = "_"
4
+ PUNCTUATION = ';:,.!?¡¿—…"«»“” '
5
+ LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
6
+ IPA_LETTERS = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
7
+ SYMBOLS = [PAD] + list(PUNCTUATION) + list(LETTERS) + list(IPA_LETTERS)
8
+ SPACE_ID = SYMBOLS.index(" ")
9
+ SYMBOL_TO_ID = {s: i for i, s in enumerate(SYMBOLS)}
10
+ ID_TO_SYMBOL = {i: s for i, s in enumerate(SYMBOLS)}
GPT_SoVITS/AR/utils/__init__.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+
4
+ def str2bool(str):
5
+ return True if str.lower() == 'true' else False
6
+
7
+
8
+ def get_newest_ckpt(string_list):
9
+ # 定义一个正则表达式模式,用于匹配字符串中的数字
10
+ pattern = r'epoch=(\d+)-step=(\d+)\.ckpt'
11
+
12
+ # 使用正则表达式提取每个字符串中的数字信息,并创建一个包含元组的列表
13
+ extracted_info = []
14
+ for string in string_list:
15
+ match = re.match(pattern, string)
16
+ if match:
17
+ epoch = int(match.group(1))
18
+ step = int(match.group(2))
19
+ extracted_info.append((epoch, step, string))
20
+ # 按照 epoch 后面的数字和 step 后面的数字进行排序
21
+ sorted_info = sorted(
22
+ extracted_info, key=lambda x: (x[0], x[1]), reverse=True)
23
+ # 获取最新的 ckpt 文件名
24
+ newest_ckpt = sorted_info[0][2]
25
+ return newest_ckpt
26
+
27
+
28
+ # 文本存在且不为空时 return True
29
+ def check_txt_file(file_path):
30
+ try:
31
+ with open(file_path, 'r') as file:
32
+ text = file.readline().strip()
33
+ assert text.strip() != ''
34
+ return text
35
+ except Exception:
36
+ return False
37
+ return False
GPT_SoVITS/AR/utils/initialize.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Initialize modules for espnet2 neural networks."""
3
+ import torch
4
+ from typeguard import check_argument_types
5
+
6
+
7
+ def initialize(model: torch.nn.Module, init: str):
8
+ """Initialize weights of a neural network module.
9
+
10
+ Parameters are initialized using the given method or distribution.
11
+
12
+ Custom initialization routines can be implemented into submodules
13
+ as function `espnet_initialization_fn` within the custom module.
14
+
15
+ Args:
16
+ model: Target.
17
+ init: Method of initialization.
18
+ """
19
+ assert check_argument_types()
20
+ print("init with", init)
21
+
22
+ # weight init
23
+ for p in model.parameters():
24
+ if p.dim() > 1:
25
+ if init == "xavier_uniform":
26
+ torch.nn.init.xavier_uniform_(p.data)
27
+ elif init == "xavier_normal":
28
+ torch.nn.init.xavier_normal_(p.data)
29
+ elif init == "kaiming_uniform":
30
+ torch.nn.init.kaiming_uniform_(p.data, nonlinearity="relu")
31
+ elif init == "kaiming_normal":
32
+ torch.nn.init.kaiming_normal_(p.data, nonlinearity="relu")
33
+ else:
34
+ raise ValueError("Unknown initialization: " + init)
35
+ # bias init
36
+ for name, p in model.named_parameters():
37
+ if ".bias" in name and p.dim() == 1:
38
+ p.data.zero_()
GPT_SoVITS/AR/utils/io.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ import torch
4
+ import yaml
5
+
6
+
7
+ def load_yaml_config(path):
8
+ with open(path) as f:
9
+ config = yaml.full_load(f)
10
+ return config
11
+
12
+
13
+ def save_config_to_yaml(config, path):
14
+ assert path.endswith(".yaml")
15
+ with open(path, "w") as f:
16
+ f.write(yaml.dump(config))
17
+ f.close()
18
+
19
+
20
+ def write_args(args, path):
21
+ args_dict = dict(
22
+ (name, getattr(args, name)) for name in dir(args) if not name.startswith("_")
23
+ )
24
+ with open(path, "a") as args_file:
25
+ args_file.write("==> torch version: {}\n".format(torch.__version__))
26
+ args_file.write(
27
+ "==> cudnn version: {}\n".format(torch.backends.cudnn.version())
28
+ )
29
+ args_file.write("==> Cmd:\n")
30
+ args_file.write(str(sys.argv))
31
+ args_file.write("\n==> args:\n")
32
+ for k, v in sorted(args_dict.items()):
33
+ args_file.write(" %s: %s\n" % (str(k), str(v)))
34
+ args_file.close()
GPT_SoVITS/configs/s1.yaml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ train:
2
+ seed: 1234
3
+ epochs: 300
4
+ batch_size: 8
5
+ gradient_accumulation: 4
6
+ save_every_n_epoch: 1
7
+ precision: 16
8
+ gradient_clip: 1.0
9
+ optimizer:
10
+ lr: 0.01
11
+ lr_init: 0.00001
12
+ lr_end: 0.0001
13
+ warmup_steps: 2000
14
+ decay_steps: 40000
15
+ data:
16
+ max_eval_sample: 8
17
+ max_sec: 54
18
+ num_workers: 1
19
+ pad_val: 1024 # same with EOS in model
20
+ model:
21
+ vocab_size: 1025
22
+ phoneme_vocab_size: 512
23
+ embedding_dim: 512
24
+ hidden_dim: 512
25
+ head: 16
26
+ linear_units: 2048
27
+ n_layer: 12
28
+ dropout: 0
29
+ EOS: 1024
30
+ inference:
31
+ top_k: 5
GPT_SoVITS/configs/s1big.yaml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ train:
2
+ seed: 1234
3
+ epochs: 300
4
+ batch_size: 8
5
+ gradient_accumulation: 4
6
+ save_every_n_epoch: 1
7
+ precision: 16-mixed
8
+ gradient_clip: 1.0
9
+ optimizer:
10
+ lr: 0.01
11
+ lr_init: 0.00001
12
+ lr_end: 0.0001
13
+ warmup_steps: 2000
14
+ decay_steps: 40000
15
+ data:
16
+ max_eval_sample: 8
17
+ max_sec: 54
18
+ num_workers: 1
19
+ pad_val: 1024 # same with EOS in model
20
+ model:
21
+ vocab_size: 1025
22
+ phoneme_vocab_size: 512
23
+ embedding_dim: 1024
24
+ hidden_dim: 1024
25
+ head: 16
26
+ linear_units: 2048
27
+ n_layer: 16
28
+ dropout: 0
29
+ EOS: 1024
30
+ inference:
31
+ top_k: 5
GPT_SoVITS/configs/s1big2.yaml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ train:
2
+ seed: 1234
3
+ epochs: 300
4
+ batch_size: 12
5
+ gradient_accumulation: 4
6
+ save_every_n_epoch: 1
7
+ precision: 16-mixed
8
+ gradient_clip: 1.0
9
+ optimizer:
10
+ lr: 0.01
11
+ lr_init: 0.00001
12
+ lr_end: 0.0001
13
+ warmup_steps: 2000
14
+ decay_steps: 40000
15
+ data:
16
+ max_eval_sample: 8
17
+ max_sec: 54
18
+ num_workers: 1
19
+ pad_val: 1024 # same with EOS in model
20
+ model:
21
+ vocab_size: 1025
22
+ phoneme_vocab_size: 512
23
+ embedding_dim: 1024
24
+ hidden_dim: 1024
25
+ head: 16
26
+ linear_units: 2048
27
+ n_layer: 6
28
+ dropout: 0
29
+ EOS: 1024
30
+ inference:
31
+ top_k: 5
GPT_SoVITS/configs/s1longer.yaml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ train:
2
+ seed: 1234
3
+ epochs: 20
4
+ batch_size: 8
5
+ save_every_n_epoch: 1
6
+ precision: 16-mixed
7
+ gradient_clip: 1.0
8
+ optimizer:
9
+ lr: 0.01
10
+ lr_init: 0.00001
11
+ lr_end: 0.0001
12
+ warmup_steps: 2000
13
+ decay_steps: 40000
14
+ data:
15
+ max_eval_sample: 8
16
+ max_sec: 54
17
+ num_workers: 4
18
+ pad_val: 1024 # same with EOS in model
19
+ model:
20
+ vocab_size: 1025
21
+ phoneme_vocab_size: 512
22
+ embedding_dim: 512
23
+ hidden_dim: 512
24
+ head: 16
25
+ linear_units: 2048
26
+ n_layer: 24
27
+ dropout: 0
28
+ EOS: 1024
29
+ random_bert: 0
30
+ inference:
31
+ top_k: 5
GPT_SoVITS/configs/s1mq.yaml ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ train:
2
+ seed: 1234
3
+ epochs: 100
4
+ batch_size: 6
5
+ gradient_accumulation: 4
6
+ save_every_n_epoch: 1
7
+ precision: 32
8
+ gradient_clip: 1.0
9
+ optimizer:
10
+ lr: 0.01
11
+ lr_init: 0.00001
12
+ lr_end: 0.0001
13
+ warmup_steps: 2000
14
+ decay_steps: 40000
15
+ data:
16
+ max_eval_sample: 8
17
+ max_sec: 40
18
+ num_workers: 1
19
+ pad_val: 1024 # same with EOS in model
20
+ model:
21
+ saving_path: "ckpt/"
22
+ resume_checkpoint: null
23
+ vocoder_config_path: "quantizer/new_ckpt/config.json"
24
+ vocoder_ckpt_path: "quantizer/new_ckpt/g_00600000"
25
+ datadir: "/home/liweiche/GigaSpeech/wavs"
26
+ metapath: "/home/liweiche/GigaSpeech/train2.json"
27
+ val_metapath: "/home/liweiche/GigaSpeech/dev2.json"
28
+ sampledir: "logs/"
29
+ pretrained_path: null
30
+ lr: 0.0001
31
+ batch_size: 200.0
32
+ train_bucket_size: 8192
33
+ training_step: 800000
34
+ optim_flat_percent: 0.0
35
+ warmup_step: 50
36
+ adam_beta1: 0.9
37
+ adam_beta2: 0.98
38
+ ffd_size: 3072
39
+ hidden_size: 768
40
+ enc_nlayers: 6
41
+ dec_nlayers: 6
42
+ nheads: 12
43
+ ar_layer: 4
44
+ ar_ffd_size: 1024
45
+ ar_hidden_size: 256
46
+ ar_nheads: 4
47
+ aligner_softmax_temp: 1.0
48
+ layer_norm_eps: 0.00001
49
+ speaker_embed_dropout: 0.05
50
+ label_smoothing: 0.0
51
+ val_check_interval: 5000
52
+ check_val_every_n_epoch: 1
53
+ precision: "fp16"
54
+ nworkers: 16
55
+ distributed: true
56
+ accelerator: "ddp"
57
+ version: null
58
+ accumulate_grad_batches: 1
59
+ use_repetition_token: true
60
+ use_repetition_gating: false
61
+ repetition_penalty: 1.0
62
+ sampling_temperature: 1.0
63
+ top_k: -1
64
+ min_top_k: 3
65
+ top_p: 0.8
66
+ sample_num: 4
67
+ length_penalty_max_length: 15000
68
+ length_penalty_max_prob: 0.95
69
+ max_input_length: 2048
70
+ max_output_length: 2000
71
+ sample_rate: 16000
72
+ n_codes: 1024
73
+ n_cluster_groups: 1
74
+ phone_context_window: 4
75
+ phoneset_size: 1000
76
+ inference:
77
+ top_k: 5
GPT_SoVITS/configs/s2.json ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 100,
4
+ "eval_interval": 500,
5
+ "seed": 1234,
6
+ "epochs": 100,
7
+ "learning_rate": 0.0001,
8
+ "betas": [
9
+ 0.8,
10
+ 0.99
11
+ ],
12
+ "eps": 1e-09,
13
+ "batch_size": 32,
14
+ "fp16_run": true,
15
+ "lr_decay": 0.999875,
16
+ "segment_size": 20480,
17
+ "init_lr_ratio": 1,
18
+ "warmup_epochs": 0,
19
+ "c_mel": 45,
20
+ "c_kl": 1.0,
21
+ "text_low_lr_rate": 0.4
22
+ },
23
+ "data": {
24
+ "max_wav_value": 32768.0,
25
+ "sampling_rate": 32000,
26
+ "filter_length": 2048,
27
+ "hop_length": 640,
28
+ "win_length": 2048,
29
+ "n_mel_channels": 128,
30
+ "mel_fmin": 0.0,
31
+ "mel_fmax": null,
32
+ "add_blank": true,
33
+ "n_speakers": 300,
34
+ "cleaned_text": true
35
+ },
36
+ "model": {
37
+ "inter_channels": 192,
38
+ "hidden_channels": 192,
39
+ "filter_channels": 768,
40
+ "n_heads": 2,
41
+ "n_layers": 6,
42
+ "kernel_size": 3,
43
+ "p_dropout": 0.1,
44
+ "resblock": "1",
45
+ "resblock_kernel_sizes": [
46
+ 3,
47
+ 7,
48
+ 11
49
+ ],
50
+ "resblock_dilation_sizes": [
51
+ [
52
+ 1,
53
+ 3,
54
+ 5
55
+ ],
56
+ [
57
+ 1,
58
+ 3,
59
+ 5
60
+ ],
61
+ [
62
+ 1,
63
+ 3,
64
+ 5
65
+ ]
66
+ ],
67
+ "upsample_rates": [
68
+ 10,
69
+ 8,
70
+ 2,
71
+ 2,
72
+ 2
73
+ ],
74
+ "upsample_initial_channel": 512,
75
+ "upsample_kernel_sizes": [
76
+ 16,
77
+ 16,
78
+ 8,
79
+ 2,
80
+ 2
81
+ ],
82
+ "n_layers_q": 3,
83
+ "use_spectral_norm": false,
84
+ "gin_channels": 512,
85
+ "semantic_frame_rate": "25hz",
86
+ "freeze_quantizer": true
87
+ },
88
+ "s2_ckpt_dir": "logs/s2/big2k1",
89
+ "content_module": "cnhubert"
90
+ }
GPT_SoVITS/configs/train.yaml ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gpu:
2
+ n_card: 1
3
+ n_process_per_card: 2
4
+ io:
5
+ text_path: D:\RVC1006\GPT-SoVITS\GPT_SoVITS
6
+ save_every_n_epoch: 1
7
+ precision: 16-mixed
8
+ gradient_clip: 1.0
9
+ optimizer:
10
+ lr: 0.01
11
+ lr_init: 0.00001
12
+ lr_end: 0.0001
13
+ warmup_steps: 2000
14
+ decay_steps: 40000
15
+ data:
16
+ max_eval_sample: 8
17
+ max_sec: 54
18
+ num_workers: 1
19
+ pad_val: 1024 # same with EOS in model
20
+ model:
21
+ vocab_size: 1025
22
+ phoneme_vocab_size: 512
23
+ embedding_dim: 512
24
+ hidden_dim: 512
25
+ head: 16
26
+ linear_units: 2048
27
+ n_layer: 24
28
+ dropout: 0
29
+ EOS: 1024
30
+ random_bert: 0
31
+ inference:
32
+ top_k: 5
GPT_SoVITS/feature_extractor/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from . import cnhubert, whisper_enc
2
+
3
+ content_module_map = {
4
+ 'cnhubert': cnhubert,
5
+ 'whisper': whisper_enc
6
+ }
GPT_SoVITS/feature_extractor/cnhubert.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+
3
+ import librosa
4
+ import torch
5
+ import torch.nn.functional as F
6
+ import soundfile as sf
7
+ import logging
8
+
9
+ logging.getLogger("numba").setLevel(logging.WARNING)
10
+
11
+ from transformers import (
12
+ Wav2Vec2FeatureExtractor,
13
+ HubertModel,
14
+ )
15
+
16
+ import utils
17
+ import torch.nn as nn
18
+
19
+ cnhubert_base_path = None
20
+
21
+
22
+ class CNHubert(nn.Module):
23
+ def __init__(self):
24
+ super().__init__()
25
+ self.model = HubertModel.from_pretrained(cnhubert_base_path)
26
+ self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
27
+ cnhubert_base_path
28
+ )
29
+
30
+ def forward(self, x):
31
+ input_values = self.feature_extractor(
32
+ x, return_tensors="pt", sampling_rate=16000
33
+ ).input_values.to(x.device)
34
+ feats = self.model(input_values)["last_hidden_state"]
35
+ return feats
36
+
37
+
38
+ # class CNHubertLarge(nn.Module):
39
+ # def __init__(self):
40
+ # super().__init__()
41
+ # self.model = HubertModel.from_pretrained("/data/docker/liujing04/gpt-vits/chinese-hubert-large")
42
+ # self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("/data/docker/liujing04/gpt-vits/chinese-hubert-large")
43
+ # def forward(self, x):
44
+ # input_values = self.feature_extractor(x, return_tensors="pt", sampling_rate=16000).input_values.to(x.device)
45
+ # feats = self.model(input_values)["last_hidden_state"]
46
+ # return feats
47
+ #
48
+ # class CVec(nn.Module):
49
+ # def __init__(self):
50
+ # super().__init__()
51
+ # self.model = HubertModel.from_pretrained("/data/docker/liujing04/vc-webui-big/hubert_base")
52
+ # self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("/data/docker/liujing04/vc-webui-big/hubert_base")
53
+ # def forward(self, x):
54
+ # input_values = self.feature_extractor(x, return_tensors="pt", sampling_rate=16000).input_values.to(x.device)
55
+ # feats = self.model(input_values)["last_hidden_state"]
56
+ # return feats
57
+ #
58
+ # class cnw2v2base(nn.Module):
59
+ # def __init__(self):
60
+ # super().__init__()
61
+ # self.model = Wav2Vec2Model.from_pretrained("/data/docker/liujing04/gpt-vits/chinese-wav2vec2-base")
62
+ # self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("/data/docker/liujing04/gpt-vits/chinese-wav2vec2-base")
63
+ # def forward(self, x):
64
+ # input_values = self.feature_extractor(x, return_tensors="pt", sampling_rate=16000).input_values.to(x.device)
65
+ # feats = self.model(input_values)["last_hidden_state"]
66
+ # return feats
67
+
68
+
69
+ def get_model():
70
+ model = CNHubert()
71
+ model.eval()
72
+ return model
73
+
74
+
75
+ # def get_large_model():
76
+ # model = CNHubertLarge()
77
+ # model.eval()
78
+ # return model
79
+ #
80
+ # def get_model_cvec():
81
+ # model = CVec()
82
+ # model.eval()
83
+ # return model
84
+ #
85
+ # def get_model_cnw2v2base():
86
+ # model = cnw2v2base()
87
+ # model.eval()
88
+ # return model
89
+
90
+
91
+ def get_content(hmodel, wav_16k_tensor):
92
+ with torch.no_grad():
93
+ feats = hmodel(wav_16k_tensor)
94
+ return feats.transpose(1, 2)
95
+
96
+
97
+ if __name__ == "__main__":
98
+ model = get_model()
99
+ src_path = "/Users/Shared/原音频2.wav"
100
+ wav_16k_tensor = utils.load_wav_to_torch_and_resample(src_path, 16000)
101
+ model = model
102
+ wav_16k_tensor = wav_16k_tensor
103
+ feats = get_content(model, wav_16k_tensor)
104
+ print(feats.shape)
GPT_SoVITS/feature_extractor/whisper_enc.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def get_model():
5
+ import whisper
6
+
7
+ model = whisper.load_model("small", device="cpu")
8
+
9
+ return model.encoder
10
+
11
+
12
+ def get_content(model=None, wav_16k_tensor=None):
13
+ from whisper import log_mel_spectrogram, pad_or_trim
14
+
15
+ dev = next(model.parameters()).device
16
+ mel = log_mel_spectrogram(wav_16k_tensor).to(dev)[:, :3000]
17
+ # if torch.cuda.is_available():
18
+ # mel = mel.to(torch.float16)
19
+ feature_len = mel.shape[-1] // 2
20
+ assert mel.shape[-1] < 3000, "输入音频过长,只允许输入30以内音频"
21
+ with torch.no_grad():
22
+ feature = model(pad_or_trim(mel, 3000).unsqueeze(0))[
23
+ :1, :feature_len, :
24
+ ].transpose(1, 2)
25
+ return feature
GPT_SoVITS/inference_gui.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from PyQt5.QtCore import QEvent
3
+ from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QLineEdit, QPushButton, QTextEdit
4
+ from PyQt5.QtWidgets import QGridLayout, QVBoxLayout, QWidget, QFileDialog, QStatusBar, QComboBox
5
+ import soundfile as sf
6
+
7
+ from tools.i18n.i18n import I18nAuto
8
+ i18n = I18nAuto()
9
+
10
+ from GPT_SoVITS.inference_webui import change_gpt_weights, change_sovits_weights, get_tts_wav
11
+
12
+
13
+ class GPTSoVITSGUI(QMainWindow):
14
+ def __init__(self):
15
+ super().__init__()
16
+
17
+ self.init_ui()
18
+
19
+ def init_ui(self):
20
+ self.setWindowTitle('GPT-SoVITS GUI')
21
+ self.setGeometry(800, 450, 950, 850)
22
+
23
+ self.setStyleSheet("""
24
+ QWidget {
25
+ background-color: #a3d3b1;
26
+ }
27
+
28
+ QTabWidget::pane {
29
+ background-color: #a3d3b1;
30
+ }
31
+
32
+ QTabWidget::tab-bar {
33
+ alignment: left;
34
+ }
35
+
36
+ QTabBar::tab {
37
+ background: #8da4bf;
38
+ color: #ffffff;
39
+ padding: 8px;
40
+ }
41
+
42
+ QTabBar::tab:selected {
43
+ background: #2a3f54;
44
+ }
45
+
46
+ QLabel {
47
+ color: #000000;
48
+ }
49
+
50
+ QPushButton {
51
+ background-color: #4CAF50;
52
+ color: white;
53
+ padding: 8px;
54
+ border: 1px solid #4CAF50;
55
+ border-radius: 4px;
56
+ }
57
+
58
+ QPushButton:hover {
59
+ background-color: #45a049;
60
+ border: 1px solid #45a049;
61
+ box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.1);
62
+ }
63
+ """)
64
+
65
+ license_text = (
66
+ "本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. "
67
+ "如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录LICENSE.")
68
+ license_label = QLabel(license_text)
69
+ license_label.setWordWrap(True)
70
+
71
+ self.GPT_model_label = QLabel("选择GPT模型:")
72
+ self.GPT_model_input = QLineEdit()
73
+ self.GPT_model_input.setPlaceholderText("拖拽或选择文件")
74
+ self.GPT_model_input.setReadOnly(True)
75
+ self.GPT_model_button = QPushButton("选择GPT模型文件")
76
+ self.GPT_model_button.clicked.connect(self.select_GPT_model)
77
+
78
+ self.SoVITS_model_label = QLabel("选择SoVITS模型:")
79
+ self.SoVITS_model_input = QLineEdit()
80
+ self.SoVITS_model_input.setPlaceholderText("拖拽或选择文件")
81
+ self.SoVITS_model_input.setReadOnly(True)
82
+ self.SoVITS_model_button = QPushButton("选择SoVITS模型文件")
83
+ self.SoVITS_model_button.clicked.connect(self.select_SoVITS_model)
84
+
85
+ self.ref_audio_label = QLabel("上传参考音频:")
86
+ self.ref_audio_input = QLineEdit()
87
+ self.ref_audio_input.setPlaceholderText("拖拽或选择文件")
88
+ self.ref_audio_input.setReadOnly(True)
89
+ self.ref_audio_button = QPushButton("选择音频文件")
90
+ self.ref_audio_button.clicked.connect(self.select_ref_audio)
91
+
92
+ self.ref_text_label = QLabel("参考音频文本:")
93
+ self.ref_text_input = QLineEdit()
94
+ self.ref_text_input.setPlaceholderText("拖拽或选择文件")
95
+ self.ref_text_input.setReadOnly(True)
96
+ self.ref_text_button = QPushButton("上传文本")
97
+ self.ref_text_button.clicked.connect(self.upload_ref_text)
98
+
99
+ self.language_label = QLabel("参考音频语言:")
100
+ self.language_combobox = QComboBox()
101
+ self.language_combobox.addItems(["中文", "英文", "日文"])
102
+
103
+ self.target_text_label = QLabel("合成目标文本:")
104
+ self.target_text_input = QLineEdit()
105
+ self.target_text_input.setPlaceholderText("拖拽或选择文件")
106
+ self.target_text_input.setReadOnly(True)
107
+ self.target_text_button = QPushButton("上传文本")
108
+ self.target_text_button.clicked.connect(self.upload_target_text)
109
+
110
+ self.language_label_02 = QLabel("合成音频语言:")
111
+ self.language_combobox_02 = QComboBox()
112
+ self.language_combobox_02.addItems(["中文", "英文", "日文"])
113
+
114
+ self.output_label = QLabel("输出音频路径:")
115
+ self.output_input = QLineEdit()
116
+ self.output_input.setPlaceholderText("拖拽或选择文件")
117
+ self.output_input.setReadOnly(True)
118
+ self.output_button = QPushButton("选择文件夹")
119
+ self.output_button.clicked.connect(self.select_output_path)
120
+
121
+ self.output_text = QTextEdit()
122
+ self.output_text.setReadOnly(True)
123
+
124
+ self.add_drag_drop_events([
125
+ self.GPT_model_input,
126
+ self.SoVITS_model_input,
127
+ self.ref_audio_input,
128
+ self.ref_text_input,
129
+ self.target_text_input,
130
+ self.output_input,
131
+ ])
132
+
133
+ self.synthesize_button = QPushButton("合成")
134
+ self.synthesize_button.clicked.connect(self.synthesize)
135
+
136
+ self.clear_output_button = QPushButton("清空输出")
137
+ self.clear_output_button.clicked.connect(self.clear_output)
138
+
139
+ self.status_bar = QStatusBar()
140
+
141
+ main_layout = QVBoxLayout()
142
+
143
+ input_layout = QGridLayout()
144
+ input_layout.setSpacing(10)
145
+
146
+ self.setLayout(input_layout)
147
+
148
+ input_layout.addWidget(license_label, 0, 0, 1, 3)
149
+
150
+ input_layout.addWidget(self.GPT_model_label, 1, 0)
151
+ input_layout.addWidget(self.GPT_model_input, 2, 0, 1, 2)
152
+ input_layout.addWidget(self.GPT_model_button, 2, 2)
153
+
154
+ input_layout.addWidget(self.SoVITS_model_label, 3, 0)
155
+ input_layout.addWidget(self.SoVITS_model_input, 4, 0, 1, 2)
156
+ input_layout.addWidget(self.SoVITS_model_button, 4, 2)
157
+
158
+ input_layout.addWidget(self.ref_audio_label, 5, 0)
159
+ input_layout.addWidget(self.ref_audio_input, 6, 0, 1, 2)
160
+ input_layout.addWidget(self.ref_audio_button, 6, 2)
161
+
162
+ input_layout.addWidget(self.language_label, 7, 0)
163
+ input_layout.addWidget(self.language_combobox, 8, 0, 1, 1)
164
+ input_layout.addWidget(self.ref_text_label, 9, 0)
165
+ input_layout.addWidget(self.ref_text_input, 10, 0, 1, 2)
166
+ input_layout.addWidget(self.ref_text_button, 10, 2)
167
+
168
+ input_layout.addWidget(self.language_label_02, 11, 0)
169
+ input_layout.addWidget(self.language_combobox_02, 12, 0, 1, 1)
170
+ input_layout.addWidget(self.target_text_label, 13, 0)
171
+ input_layout.addWidget(self.target_text_input, 14, 0, 1, 2)
172
+ input_layout.addWidget(self.target_text_button, 14, 2)
173
+
174
+ input_layout.addWidget(self.output_label, 15, 0)
175
+ input_layout.addWidget(self.output_input, 16, 0, 1, 2)
176
+ input_layout.addWidget(self.output_button, 16, 2)
177
+
178
+ main_layout.addLayout(input_layout)
179
+
180
+ output_layout = QVBoxLayout()
181
+ output_layout.addWidget(self.output_text)
182
+ main_layout.addLayout(output_layout)
183
+
184
+ main_layout.addWidget(self.synthesize_button)
185
+
186
+ main_layout.addWidget(self.clear_output_button)
187
+
188
+ main_layout.addWidget(self.status_bar)
189
+
190
+ self.central_widget = QWidget()
191
+ self.central_widget.setLayout(main_layout)
192
+ self.setCentralWidget(self.central_widget)
193
+
194
+ def dragEnterEvent(self, event):
195
+ if event.mimeData().hasUrls():
196
+ event.acceptProposedAction()
197
+
198
+ def dropEvent(self, event):
199
+ if event.mimeData().hasUrls():
200
+ file_paths = [url.toLocalFile() for url in event.mimeData().urls()]
201
+
202
+ if len(file_paths) == 1:
203
+ self.update_ref_audio(file_paths[0])
204
+ self.update_input_paths(self.ref_audio_input, file_paths[0])
205
+ else:
206
+ self.update_ref_audio(", ".join(file_paths))
207
+
208
+ def add_drag_drop_events(self, widgets):
209
+ for widget in widgets:
210
+ widget.setAcceptDrops(True)
211
+ widget.installEventFilter(self)
212
+
213
+ def eventFilter(self, obj, event):
214
+ if event.type() == QEvent.DragEnter:
215
+ mime_data = event.mimeData()
216
+ if mime_data.hasUrls():
217
+ event.acceptProposedAction()
218
+
219
+ elif event.type() == QEvent.Drop:
220
+ mime_data = event.mimeData()
221
+ if mime_data.hasUrls():
222
+ file_paths = [url.toLocalFile() for url in mime_data.urls()]
223
+ if len(file_paths) == 1:
224
+ self.update_input_paths(obj, file_paths[0])
225
+ else:
226
+ self.update_input_paths(obj, ", ".join(file_paths))
227
+ event.acceptProposedAction()
228
+
229
+ return super().eventFilter(obj, event)
230
+
231
+ def select_GPT_model(self):
232
+ file_path, _ = QFileDialog.getOpenFileName(self, "选择GPT模型文件", "", "GPT Files (*.ckpt)")
233
+ if file_path:
234
+ self.GPT_model_input.setText(file_path)
235
+
236
+ def select_SoVITS_model(self):
237
+ file_path, _ = QFileDialog.getOpenFileName(self, "选择SoVITS模型文件", "", "SoVITS Files (*.pth)")
238
+ if file_path:
239
+ self.SoVITS_model_input.setText(file_path)
240
+
241
+ def select_ref_audio(self):
242
+ options = QFileDialog.Options()
243
+ options |= QFileDialog.DontUseNativeDialog
244
+ options |= QFileDialog.ShowDirsOnly
245
+
246
+ file_dialog = QFileDialog()
247
+ file_dialog.setOptions(options)
248
+
249
+ file_dialog.setFileMode(QFileDialog.AnyFile)
250
+ file_dialog.setNameFilter("Audio Files (*.wav *.mp3)")
251
+
252
+ if file_dialog.exec_():
253
+ file_paths = file_dialog.selectedFiles()
254
+
255
+ if len(file_paths) == 1:
256
+ self.update_ref_audio(file_paths[0])
257
+ self.update_input_paths(self.ref_audio_input, file_paths[0])
258
+ else:
259
+ self.update_ref_audio(", ".join(file_paths))
260
+
261
+ def upload_ref_text(self):
262
+ file_path, _ = QFileDialog.getOpenFileName(self, "选择文本文件", "", "Text Files (*.txt)")
263
+ if file_path:
264
+ with open(file_path, 'r', encoding='utf-8') as file:
265
+ content = file.read()
266
+ self.ref_text_input.setText(content)
267
+ self.update_input_paths(self.ref_text_input, file_path)
268
+
269
+ def upload_target_text(self):
270
+ file_path, _ = QFileDialog.getOpenFileName(self, "选择文本文件", "", "Text Files (*.txt)")
271
+ if file_path:
272
+ with open(file_path, 'r', encoding='utf-8') as file:
273
+ content = file.read()
274
+ self.target_text_input.setText(content)
275
+ self.update_input_paths(self.target_text_input, file_path)
276
+
277
+ def select_output_path(self):
278
+ options = QFileDialog.Options()
279
+ options |= QFileDialog.DontUseNativeDialog
280
+ options |= QFileDialog.ShowDirsOnly
281
+
282
+ folder_dialog = QFileDialog()
283
+ folder_dialog.setOptions(options)
284
+ folder_dialog.setFileMode(QFileDialog.Directory)
285
+
286
+ if folder_dialog.exec_():
287
+ folder_path = folder_dialog.selectedFiles()[0]
288
+ self.output_input.setText(folder_path)
289
+
290
+ def update_ref_audio(self, file_path):
291
+ self.ref_audio_input.setText(file_path)
292
+
293
+ def update_input_paths(self, input_box, file_path):
294
+ input_box.setText(file_path)
295
+
296
+ def clear_output(self):
297
+ self.output_text.clear()
298
+
299
+ def synthesize(self):
300
+ GPT_model_path = self.GPT_model_input.text()
301
+ SoVITS_model_path = self.SoVITS_model_input.text()
302
+ ref_audio_path = self.ref_audio_input.text()
303
+ language_combobox = self.language_combobox.currentText()
304
+ language_combobox = i18n(language_combobox)
305
+ ref_text = self.ref_text_input.text()
306
+ language_combobox_02 = self.language_combobox_02.currentText()
307
+ language_combobox_02 = i18n(language_combobox_02)
308
+ target_text = self.target_text_input.text()
309
+ output_path = self.output_input.text()
310
+
311
+ change_gpt_weights(gpt_path=GPT_model_path)
312
+ change_sovits_weights(sovits_path=SoVITS_model_path)
313
+
314
+ synthesis_result = get_tts_wav(ref_wav_path=ref_audio_path,
315
+ prompt_text=ref_text,
316
+ prompt_language=language_combobox,
317
+ text=target_text,
318
+ text_language=language_combobox_02)
319
+
320
+ result_list = list(synthesis_result)
321
+
322
+ if result_list:
323
+ last_sampling_rate, last_audio_data = result_list[-1]
324
+ output_wav_path = os.path.join(output_path, "output.wav")
325
+ sf.write(output_wav_path, last_audio_data, last_sampling_rate)
326
+
327
+ result = "Audio saved to " + output_wav_path
328
+
329
+ self.status_bar.showMessage("合成完成!输出路径:" + output_wav_path, 5000)
330
+ self.output_text.append("处理结果:\n" + result)
331
+
332
+ def main():
333
+ app = QApplication(sys.argv)
334
+ mainWin = GPTSoVITSGUI()
335
+ mainWin.show()
336
+ sys.exit(app.exec_())
337
+
338
+
339
+ if __name__ == '__main__':
340
+ main()
GPT_SoVITS/inference_webui.py ADDED
@@ -0,0 +1,690 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ 按中英混合识别
3
+ 按日英混合识别
4
+ 多语种启动切分识别语种
5
+ 全部按中文识别
6
+ 全部按英文识别
7
+ 全部按日文识别
8
+ '''
9
+
10
+ # OpenVoice
11
+
12
+ import os
13
+ import torch
14
+ from openvoice import se_extractor
15
+ from openvoice.api import BaseSpeakerTTS, ToneColorConverter
16
+
17
+ if torch.cuda.is_available():
18
+ device = "cuda"
19
+ else:
20
+ device = "cpu"
21
+
22
+ ckpt_base = 'checkpoints/base_speakers/EN'
23
+ ckpt_converter = 'checkpoints/converter'
24
+ base_speaker_tts = BaseSpeakerTTS(f'{ckpt_base}/config.json', device=device)
25
+ base_speaker_tts.load_ckpt(f'{ckpt_base}/checkpoint.pth')
26
+
27
+ tone_color_converter = ToneColorConverter(f'{ckpt_converter}/config.json', device=device)
28
+ tone_color_converter.load_ckpt(f'{ckpt_converter}/checkpoint.pth')
29
+
30
+ #source_se = torch.load(f'{ckpt_base}/en_default_se.pth').to(device)
31
+ #source_se_style = torch.load(f'{ckpt_base}/en_style_se.pth').to(device)
32
+
33
+ def vc_en(audio_ref, style_mode):
34
+ text = "We have always tried to be at the intersection of technology and liberal arts, to be able to get the best of both, to make extremely advanced products from a technology point of view."
35
+ if style_mode=="default":
36
+ source_se = torch.load(f'{ckpt_base}/en_default_se.pth').to(device)
37
+ reference_speaker = audio_ref
38
+ target_se, audio_name = se_extractor.get_se(reference_speaker, tone_color_converter, target_dir='processed', vad=True)
39
+ save_path = "output.wav"
40
+
41
+ # Run the base speaker tts
42
+ src_path = "tmp.wav"
43
+ base_speaker_tts.tts(text, src_path, speaker='default', language='English', speed=1.0)
44
+
45
+ # Run the tone color converter
46
+ encode_message = "@MyShell"
47
+ tone_color_converter.convert(
48
+ audio_src_path=src_path,
49
+ src_se=source_se,
50
+ tgt_se=target_se,
51
+ output_path=save_path,
52
+ message=encode_message)
53
+
54
+ else:
55
+ source_se = torch.load(f'{ckpt_base}/en_style_se.pth').to(device)
56
+ reference_speaker = audio_ref
57
+ target_se, audio_name = se_extractor.get_se(reference_speaker, tone_color_converter, target_dir='processed', vad=True)
58
+
59
+ save_path = "output.wav"
60
+
61
+ # Run the base speaker tts
62
+ src_path = "tmp.wav"
63
+ base_speaker_tts.tts(text, src_path, speaker=style_mode, language='English', speed=1.0)
64
+
65
+ # Run the tone color converter
66
+ encode_message = "@MyShell"
67
+ tone_color_converter.convert(
68
+ audio_src_path=src_path,
69
+ src_se=source_se,
70
+ tgt_se=target_se,
71
+ output_path=save_path,
72
+ message=encode_message)
73
+
74
+ return "output.wav"
75
+
76
+ # End
77
+
78
+ import re, logging
79
+ import LangSegment
80
+ logging.getLogger("markdown_it").setLevel(logging.ERROR)
81
+ logging.getLogger("urllib3").setLevel(logging.ERROR)
82
+ logging.getLogger("httpcore").setLevel(logging.ERROR)
83
+ logging.getLogger("httpx").setLevel(logging.ERROR)
84
+ logging.getLogger("asyncio").setLevel(logging.ERROR)
85
+ logging.getLogger("charset_normalizer").setLevel(logging.ERROR)
86
+ logging.getLogger("torchaudio._extension").setLevel(logging.ERROR)
87
+ import pdb
88
+
89
+ if os.path.exists("./gweight.txt"):
90
+ with open("./gweight.txt", 'r', encoding="utf-8") as file:
91
+ gweight_data = file.read()
92
+ gpt_path = os.environ.get(
93
+ "gpt_path", gweight_data)
94
+ else:
95
+ gpt_path = os.environ.get(
96
+ "gpt_path", "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt")
97
+
98
+ if os.path.exists("./sweight.txt"):
99
+ with open("./sweight.txt", 'r', encoding="utf-8") as file:
100
+ sweight_data = file.read()
101
+ sovits_path = os.environ.get("sovits_path", sweight_data)
102
+ else:
103
+ sovits_path = os.environ.get("sovits_path", "GPT_SoVITS/pretrained_models/s2G488k.pth")
104
+ # gpt_path = os.environ.get(
105
+ # "gpt_path", "pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"
106
+ # )
107
+ # sovits_path = os.environ.get("sovits_path", "pretrained_models/s2G488k.pth")
108
+ cnhubert_base_path = os.environ.get(
109
+ "cnhubert_base_path", "GPT_SoVITS/pretrained_models/chinese-hubert-base"
110
+ )
111
+ bert_path = os.environ.get(
112
+ "bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large"
113
+ )
114
+ infer_ttswebui = os.environ.get("infer_ttswebui", 9872)
115
+ infer_ttswebui = int(infer_ttswebui)
116
+ is_share = os.environ.get("is_share", "False")
117
+ is_share = eval(is_share)
118
+ if "_CUDA_VISIBLE_DEVICES" in os.environ:
119
+ os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"]
120
+ is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available()
121
+ import gradio as gr
122
+ from transformers import AutoModelForMaskedLM, AutoTokenizer
123
+ import numpy as np
124
+ import librosa
125
+ from feature_extractor import cnhubert
126
+
127
+ cnhubert.cnhubert_base_path = cnhubert_base_path
128
+
129
+ from module.models import SynthesizerTrn
130
+ from AR.models.t2s_lightning_module import Text2SemanticLightningModule
131
+ from text import cleaned_text_to_sequence
132
+ from text.cleaner import clean_text
133
+ from time import time as ttime
134
+ from module.mel_processing import spectrogram_torch
135
+ from my_utils import load_audio
136
+ from tools.i18n.i18n import I18nAuto
137
+
138
+ i18n = I18nAuto()
139
+
140
+ # os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 确保直接启动推理UI时也能够设置。
141
+
142
+ tokenizer = AutoTokenizer.from_pretrained(bert_path)
143
+ bert_model = AutoModelForMaskedLM.from_pretrained(bert_path)
144
+ if is_half == True:
145
+ bert_model = bert_model.half().to(device)
146
+ else:
147
+ bert_model = bert_model.to(device)
148
+
149
+
150
+ def get_bert_feature(text, word2ph):
151
+ with torch.no_grad():
152
+ inputs = tokenizer(text, return_tensors="pt")
153
+ for i in inputs:
154
+ inputs[i] = inputs[i].to(device)
155
+ res = bert_model(**inputs, output_hidden_states=True)
156
+ res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
157
+ assert len(word2ph) == len(text)
158
+ phone_level_feature = []
159
+ for i in range(len(word2ph)):
160
+ repeat_feature = res[i].repeat(word2ph[i], 1)
161
+ phone_level_feature.append(repeat_feature)
162
+ phone_level_feature = torch.cat(phone_level_feature, dim=0)
163
+ return phone_level_feature.T
164
+
165
+
166
+ class DictToAttrRecursive(dict):
167
+ def __init__(self, input_dict):
168
+ super().__init__(input_dict)
169
+ for key, value in input_dict.items():
170
+ if isinstance(value, dict):
171
+ value = DictToAttrRecursive(value)
172
+ self[key] = value
173
+ setattr(self, key, value)
174
+
175
+ def __getattr__(self, item):
176
+ try:
177
+ return self[item]
178
+ except KeyError:
179
+ raise AttributeError(f"Attribute {item} not found")
180
+
181
+ def __setattr__(self, key, value):
182
+ if isinstance(value, dict):
183
+ value = DictToAttrRecursive(value)
184
+ super(DictToAttrRecursive, self).__setitem__(key, value)
185
+ super().__setattr__(key, value)
186
+
187
+ def __delattr__(self, item):
188
+ try:
189
+ del self[item]
190
+ except KeyError:
191
+ raise AttributeError(f"Attribute {item} not found")
192
+
193
+
194
+ ssl_model = cnhubert.get_model()
195
+ if is_half == True:
196
+ ssl_model = ssl_model.half().to(device)
197
+ else:
198
+ ssl_model = ssl_model.to(device)
199
+
200
+
201
+ def change_sovits_weights(sovits_path):
202
+ global vq_model, hps
203
+ dict_s2 = torch.load(sovits_path, map_location="cpu")
204
+ hps = dict_s2["config"]
205
+ hps = DictToAttrRecursive(hps)
206
+ hps.model.semantic_frame_rate = "25hz"
207
+ vq_model = SynthesizerTrn(
208
+ hps.data.filter_length // 2 + 1,
209
+ hps.train.segment_size // hps.data.hop_length,
210
+ n_speakers=hps.data.n_speakers,
211
+ **hps.model
212
+ )
213
+ if ("pretrained" not in sovits_path):
214
+ del vq_model.enc_q
215
+ if is_half == True:
216
+ vq_model = vq_model.half().to(device)
217
+ else:
218
+ vq_model = vq_model.to(device)
219
+ vq_model.eval()
220
+ print(vq_model.load_state_dict(dict_s2["weight"], strict=False))
221
+ with open("./sweight.txt", "w", encoding="utf-8") as f:
222
+ f.write(sovits_path)
223
+
224
+
225
+ change_sovits_weights(sovits_path)
226
+
227
+
228
+ def change_gpt_weights(gpt_path):
229
+ global hz, max_sec, t2s_model, config
230
+ hz = 50
231
+ dict_s1 = torch.load(gpt_path, map_location="cpu")
232
+ config = dict_s1["config"]
233
+ max_sec = config["data"]["max_sec"]
234
+ t2s_model = Text2SemanticLightningModule(config, "****", is_train=False)
235
+ t2s_model.load_state_dict(dict_s1["weight"])
236
+ if is_half == True:
237
+ t2s_model = t2s_model.half()
238
+ t2s_model = t2s_model.to(device)
239
+ t2s_model.eval()
240
+ total = sum([param.nelement() for param in t2s_model.parameters()])
241
+ print("Number of parameter: %.2fM" % (total / 1e6))
242
+ with open("./gweight.txt", "w", encoding="utf-8") as f: f.write(gpt_path)
243
+
244
+
245
+ change_gpt_weights(gpt_path)
246
+
247
+
248
+ def get_spepc(hps, filename):
249
+ audio = load_audio(filename, int(hps.data.sampling_rate))
250
+ audio = torch.FloatTensor(audio)
251
+ audio_norm = audio
252
+ audio_norm = audio_norm.unsqueeze(0)
253
+ spec = spectrogram_torch(
254
+ audio_norm,
255
+ hps.data.filter_length,
256
+ hps.data.sampling_rate,
257
+ hps.data.hop_length,
258
+ hps.data.win_length,
259
+ center=False,
260
+ )
261
+ return spec
262
+
263
+
264
+ dict_language = {
265
+ i18n("中文"): "all_zh",#全部按中文识别
266
+ i18n("英文"): "en",#全部按英文识别#######不变
267
+ i18n("日文"): "all_ja",#全部按日文识别
268
+ i18n("中英混合"): "zh",#按中英混合识别####不变
269
+ i18n("日英混合"): "ja",#按日英混合识别####不变
270
+ i18n("多语种混合"): "auto",#多语种启动切分识别语种
271
+ }
272
+
273
+
274
+ def clean_text_inf(text, language):
275
+ phones, word2ph, norm_text = clean_text(text, language)
276
+ phones = cleaned_text_to_sequence(phones)
277
+ return phones, word2ph, norm_text
278
+
279
+ dtype=torch.float16 if is_half == True else torch.float32
280
+ def get_bert_inf(phones, word2ph, norm_text, language):
281
+ language=language.replace("all_","")
282
+ if language == "zh":
283
+ bert = get_bert_feature(norm_text, word2ph).to(device)#.to(dtype)
284
+ else:
285
+ bert = torch.zeros(
286
+ (1024, len(phones)),
287
+ dtype=torch.float16 if is_half == True else torch.float32,
288
+ ).to(device)
289
+
290
+ return bert
291
+
292
+
293
+ splits = {",", "。", "?", "!", ",", ".", "?", "!", "~", ":", ":", "—", "��", }
294
+
295
+
296
+ def get_first(text):
297
+ pattern = "[" + "".join(re.escape(sep) for sep in splits) + "]"
298
+ text = re.split(pattern, text)[0].strip()
299
+ return text
300
+
301
+
302
+ def get_phones_and_bert(text,language):
303
+ if language in {"en","all_zh","all_ja"}:
304
+ language = language.replace("all_","")
305
+ if language == "en":
306
+ LangSegment.setfilters(["en"])
307
+ formattext = " ".join(tmp["text"] for tmp in LangSegment.getTexts(text))
308
+ else:
309
+ # 因无法区别中日文汉字,以用户输入为准
310
+ formattext = text
311
+ while " " in formattext:
312
+ formattext = formattext.replace(" ", " ")
313
+ phones, word2ph, norm_text = clean_text_inf(formattext, language)
314
+ if language == "zh":
315
+ bert = get_bert_feature(norm_text, word2ph).to(device)
316
+ else:
317
+ bert = torch.zeros(
318
+ (1024, len(phones)),
319
+ dtype=torch.float16 if is_half == True else torch.float32,
320
+ ).to(device)
321
+ elif language in {"zh", "ja","auto"}:
322
+ textlist=[]
323
+ langlist=[]
324
+ LangSegment.setfilters(["zh","ja","en","ko"])
325
+ if language == "auto":
326
+ for tmp in LangSegment.getTexts(text):
327
+ if tmp["lang"] == "ko":
328
+ langlist.append("zh")
329
+ textlist.append(tmp["text"])
330
+ else:
331
+ langlist.append(tmp["lang"])
332
+ textlist.append(tmp["text"])
333
+ else:
334
+ for tmp in LangSegment.getTexts(text):
335
+ if tmp["lang"] == "en":
336
+ langlist.append(tmp["lang"])
337
+ else:
338
+ # 因无法区别中日文汉字,以用户输入为准
339
+ langlist.append(language)
340
+ textlist.append(tmp["text"])
341
+ print(textlist)
342
+ print(langlist)
343
+ phones_list = []
344
+ bert_list = []
345
+ norm_text_list = []
346
+ for i in range(len(textlist)):
347
+ lang = langlist[i]
348
+ phones, word2ph, norm_text = clean_text_inf(textlist[i], lang)
349
+ bert = get_bert_inf(phones, word2ph, norm_text, lang)
350
+ phones_list.append(phones)
351
+ norm_text_list.append(norm_text)
352
+ bert_list.append(bert)
353
+ bert = torch.cat(bert_list, dim=1)
354
+ phones = sum(phones_list, [])
355
+ norm_text = ''.join(norm_text_list)
356
+
357
+ return phones,bert.to(dtype),norm_text
358
+
359
+
360
+ def merge_short_text_in_array(texts, threshold):
361
+ if (len(texts)) < 2:
362
+ return texts
363
+ result = []
364
+ text = ""
365
+ for ele in texts:
366
+ text += ele
367
+ if len(text) >= threshold:
368
+ result.append(text)
369
+ text = ""
370
+ if (len(text) > 0):
371
+ if len(result) == 0:
372
+ result.append(text)
373
+ else:
374
+ result[len(result) - 1] += text
375
+ return result
376
+
377
+ def get_tts_wav(ref_wav_path, prompt_text, prompt_language, text, text_language, how_to_cut=i18n("不切"), top_k=20, top_p=0.6, temperature=0.6, ref_free = False):
378
+ if prompt_text is None or len(prompt_text) == 0:
379
+ ref_free = True
380
+ t0 = ttime()
381
+ prompt_language = dict_language[prompt_language]
382
+ text_language = dict_language[text_language]
383
+ if not ref_free:
384
+ prompt_text = prompt_text.strip("\n")
385
+ if (prompt_text[-1] not in splits): prompt_text += "。" if prompt_language != "en" else "."
386
+ print(i18n("实际输入的参考文本:"), prompt_text)
387
+ text = text.strip("\n")
388
+ if (text[0] not in splits and len(get_first(text)) < 4): text = "。" + text if text_language != "en" else "." + text
389
+
390
+ print(i18n("实际输入的目标文本:"), text)
391
+ zero_wav = np.zeros(
392
+ int(hps.data.sampling_rate * 0.3),
393
+ dtype=np.float16 if is_half == True else np.float32,
394
+ )
395
+ with torch.no_grad():
396
+ wav16k, sr = librosa.load(ref_wav_path, sr=16000)
397
+ if (wav16k.shape[0] > 240000 or wav16k.shape[0] < 48000):
398
+ raise OSError(i18n("参考音频在3~15秒范围外,请更换!"))
399
+ wav16k = torch.from_numpy(wav16k)
400
+ zero_wav_torch = torch.from_numpy(zero_wav)
401
+ if is_half == True:
402
+ wav16k = wav16k.half().to(device)
403
+ zero_wav_torch = zero_wav_torch.half().to(device)
404
+ else:
405
+ wav16k = wav16k.to(device)
406
+ zero_wav_torch = zero_wav_torch.to(device)
407
+ wav16k = torch.cat([wav16k, zero_wav_torch])
408
+ ssl_content = ssl_model.model(wav16k.unsqueeze(0))[
409
+ "last_hidden_state"
410
+ ].transpose(
411
+ 1, 2
412
+ ) # .float()
413
+ codes = vq_model.extract_latent(ssl_content)
414
+
415
+ prompt_semantic = codes[0, 0]
416
+ t1 = ttime()
417
+
418
+ if (how_to_cut == i18n("凑四句一切")):
419
+ text = cut1(text)
420
+ elif (how_to_cut == i18n("凑50字一切")):
421
+ text = cut2(text)
422
+ elif (how_to_cut == i18n("按中文句号。切")):
423
+ text = cut3(text)
424
+ elif (how_to_cut == i18n("按英文句号.切")):
425
+ text = cut4(text)
426
+ elif (how_to_cut == i18n("按标点符号切")):
427
+ text = cut5(text)
428
+ while "\n\n" in text:
429
+ text = text.replace("\n\n", "\n")
430
+ print(i18n("实际输入的目标文本(切句后):"), text)
431
+ texts = text.split("\n")
432
+ texts = merge_short_text_in_array(texts, 5)
433
+ audio_opt = []
434
+ if not ref_free:
435
+ phones1,bert1,norm_text1=get_phones_and_bert(prompt_text, prompt_language)
436
+
437
+ for text in texts:
438
+ # 解决输入目标文本的空行导致报错的问题
439
+ if (len(text.strip()) == 0):
440
+ continue
441
+ if (text[-1] not in splits): text += "。" if text_language != "en" else "."
442
+ print(i18n("实际输入的目标文本(每句):"), text)
443
+ phones2,bert2,norm_text2=get_phones_and_bert(text, text_language)
444
+ print(i18n("前端处理后的文本(每句):"), norm_text2)
445
+ if not ref_free:
446
+ bert = torch.cat([bert1, bert2], 1)
447
+ all_phoneme_ids = torch.LongTensor(phones1+phones2).to(device).unsqueeze(0)
448
+ else:
449
+ bert = bert2
450
+ all_phoneme_ids = torch.LongTensor(phones2).to(device).unsqueeze(0)
451
+
452
+ bert = bert.to(device).unsqueeze(0)
453
+ all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device)
454
+ prompt = prompt_semantic.unsqueeze(0).to(device)
455
+ t2 = ttime()
456
+ with torch.no_grad():
457
+ # pred_semantic = t2s_model.model.infer(
458
+ pred_semantic, idx = t2s_model.model.infer_panel(
459
+ all_phoneme_ids,
460
+ all_phoneme_len,
461
+ None if ref_free else prompt,
462
+ bert,
463
+ # prompt_phone_len=ph_offset,
464
+ top_k=top_k,
465
+ top_p=top_p,
466
+ temperature=temperature,
467
+ early_stop_num=hz * max_sec,
468
+ )
469
+ t3 = ttime()
470
+ # print(pred_semantic.shape,idx)
471
+ pred_semantic = pred_semantic[:, -idx:].unsqueeze(
472
+ 0
473
+ ) # .unsqueeze(0)#mq要多unsqueeze一次
474
+ refer = get_spepc(hps, ref_wav_path) # .to(device)
475
+ if is_half == True:
476
+ refer = refer.half().to(device)
477
+ else:
478
+ refer = refer.to(device)
479
+ # audio = vq_model.decode(pred_semantic, all_phoneme_ids, refer).detach().cpu().numpy()[0, 0]
480
+ audio = (
481
+ vq_model.decode(
482
+ pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refer
483
+ )
484
+ .detach()
485
+ .cpu()
486
+ .numpy()[0, 0]
487
+ ) ###试试重建不带上prompt部分
488
+ max_audio=np.abs(audio).max()#简单防止16bit爆音
489
+ if max_audio>1:audio/=max_audio
490
+ audio_opt.append(audio)
491
+ audio_opt.append(zero_wav)
492
+ t4 = ttime()
493
+ print("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
494
+ yield hps.data.sampling_rate, (np.concatenate(audio_opt, 0) * 32768).astype(
495
+ np.int16
496
+ )
497
+
498
+
499
+ def split(todo_text):
500
+ todo_text = todo_text.replace("……", "。").replace("——", ",")
501
+ if todo_text[-1] not in splits:
502
+ todo_text += "。"
503
+ i_split_head = i_split_tail = 0
504
+ len_text = len(todo_text)
505
+ todo_texts = []
506
+ while 1:
507
+ if i_split_head >= len_text:
508
+ break # 结尾一定有标点,所以直接跳出即可,最后一段在上次已加入
509
+ if todo_text[i_split_head] in splits:
510
+ i_split_head += 1
511
+ todo_texts.append(todo_text[i_split_tail:i_split_head])
512
+ i_split_tail = i_split_head
513
+ else:
514
+ i_split_head += 1
515
+ return todo_texts
516
+
517
+
518
+ def cut1(inp):
519
+ inp = inp.strip("\n")
520
+ inps = split(inp)
521
+ split_idx = list(range(0, len(inps), 4))
522
+ split_idx[-1] = None
523
+ if len(split_idx) > 1:
524
+ opts = []
525
+ for idx in range(len(split_idx) - 1):
526
+ opts.append("".join(inps[split_idx[idx]: split_idx[idx + 1]]))
527
+ else:
528
+ opts = [inp]
529
+ return "\n".join(opts)
530
+
531
+
532
+ def cut2(inp):
533
+ inp = inp.strip("\n")
534
+ inps = split(inp)
535
+ if len(inps) < 2:
536
+ return inp
537
+ opts = []
538
+ summ = 0
539
+ tmp_str = ""
540
+ for i in range(len(inps)):
541
+ summ += len(inps[i])
542
+ tmp_str += inps[i]
543
+ if summ > 50:
544
+ summ = 0
545
+ opts.append(tmp_str)
546
+ tmp_str = ""
547
+ if tmp_str != "":
548
+ opts.append(tmp_str)
549
+ # print(opts)
550
+ if len(opts) > 1 and len(opts[-1]) < 50: ##如果最后一个太短了,和前一个合一起
551
+ opts[-2] = opts[-2] + opts[-1]
552
+ opts = opts[:-1]
553
+ return "\n".join(opts)
554
+
555
+
556
+ def cut3(inp):
557
+ inp = inp.strip("\n")
558
+ return "\n".join(["%s" % item for item in inp.strip("。").split("。")])
559
+
560
+
561
+ def cut4(inp):
562
+ inp = inp.strip("\n")
563
+ return "\n".join(["%s" % item for item in inp.strip(".").split(".")])
564
+
565
+
566
+ # contributed by https://github.com/AI-Hobbyist/GPT-SoVITS/blob/main/GPT_SoVITS/inference_webui.py
567
+ def cut5(inp):
568
+ # if not re.search(r'[^\w\s]', inp[-1]):
569
+ # inp += '。'
570
+ inp = inp.strip("\n")
571
+ punds = r'[,.;?!、,。?!;:…]'
572
+ items = re.split(f'({punds})', inp)
573
+ mergeitems = ["".join(group) for group in zip(items[::2], items[1::2])]
574
+ # 在句子不存在符号或句尾无符号的时候保证文本完整
575
+ if len(items)%2 == 1:
576
+ mergeitems.append(items[-1])
577
+ opt = "\n".join(mergeitems)
578
+ return opt
579
+
580
+
581
+ def custom_sort_key(s):
582
+ # 使用正则表达式提取字符串中的数字部分和非数字部分
583
+ parts = re.split('(\d+)', s)
584
+ # 将数字部分转换为整数,非数字部分保持不变
585
+ parts = [int(part) if part.isdigit() else part for part in parts]
586
+ return parts
587
+
588
+
589
+ def change_choices():
590
+ SoVITS_names, GPT_names = get_weights_names()
591
+ return {"choices": sorted(SoVITS_names, key=custom_sort_key), "__type__": "update"}, {"choices": sorted(GPT_names, key=custom_sort_key), "__type__": "update"}
592
+
593
+
594
+ pretrained_sovits_name = "GPT_SoVITS/pretrained_models/s2G488k.pth"
595
+ pretrained_gpt_name = "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt"
596
+ SoVITS_weight_root = "SoVITS_weights"
597
+ GPT_weight_root = "GPT_weights"
598
+ os.makedirs(SoVITS_weight_root, exist_ok=True)
599
+ os.makedirs(GPT_weight_root, exist_ok=True)
600
+
601
+
602
+ def get_weights_names():
603
+ SoVITS_names = [pretrained_sovits_name]
604
+ for name in os.listdir(SoVITS_weight_root):
605
+ if name.endswith(".pth"): SoVITS_names.append("%s/%s" % (SoVITS_weight_root, name))
606
+ GPT_names = [pretrained_gpt_name]
607
+ for name in os.listdir(GPT_weight_root):
608
+ if name.endswith(".ckpt"): GPT_names.append("%s/%s" % (GPT_weight_root, name))
609
+ return SoVITS_names, GPT_names
610
+
611
+
612
+ SoVITS_names, GPT_names = get_weights_names()
613
+
614
+ with gr.Blocks(title="GPT-SoVITS WebUI") as app:
615
+ gr.Markdown(
616
+ value=i18n("本软件以MIT协议开源, 作者不对软件具备任何控制力, 使用软件者、传播软件导出的声音者自负全责. <br>如不认可该条款, 则不能使用或引用软件包内任何代码和文件. 详见根目录<b>LICENSE</b>.")
617
+ )
618
+ with gr.Group():
619
+ gr.Markdown(value=i18n("模型切换"))
620
+ with gr.Row():
621
+ GPT_dropdown = gr.Dropdown(label=i18n("GPT模型列表"), choices=sorted(GPT_names, key=custom_sort_key), value=gpt_path, interactive=True)
622
+ SoVITS_dropdown = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=sorted(SoVITS_names, key=custom_sort_key), value=sovits_path, interactive=True)
623
+ refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary")
624
+ refresh_button.click(fn=change_choices, inputs=[], outputs=[SoVITS_dropdown, GPT_dropdown])
625
+ SoVITS_dropdown.change(change_sovits_weights, [SoVITS_dropdown], [])
626
+ GPT_dropdown.change(change_gpt_weights, [GPT_dropdown], [])
627
+ gr.Markdown(value=i18n("*请上传并填写参考信息"))
628
+ with gr.Row():
629
+ inp_training_audio = gr.Audio(label="请上传您完整的1分钟训练音频", type="filepath")
630
+ style_control = gr.Dropdown(label="请选择一种语音情感", info="🙂default😊friendly🤫whispering😄cheerful😱terrified😡angry😢sad", choices=["default", "friendly", "whispering", "cheerful", "terrified", "angry", "sad"], value="default")
631
+ btn_style = gr.Button("一键生成情感参考音频吧💕", variant="primary")
632
+ out_ref_audio = gr.Audio(label="为您生成的情感参考音频", type="filepath", interactive=False)
633
+ inp_ref = out_ref_audio
634
+ with gr.Column():
635
+ ref_text_free = gr.Checkbox(label=i18n("开启无参考文本模式。不填参考文本亦相当于开启。"), value=False, interactive=False, show_label=True)
636
+ gr.Markdown(i18n("使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开,开启后无视填写的参考文本。"))
637
+ prompt_text = gr.Textbox(label=i18n("参考音频的文本"), interactive=False, value="We have always tried to be at the intersection of technology and liberal arts, to be able to get the best of both, to make extremely advanced products from a technology point of view.")
638
+ prompt_language = gr.Dropdown(
639
+ label=i18n("参考音频的语种"), choices=[i18n("中文"), i18n("英文"), i18n("日文"), i18n("中英混合"), i18n("日英混合"), i18n("多语种混合")], value=i18n("英文"), interactive=False
640
+ )
641
+ gr.Markdown(value=i18n("*请填写需要合成的目标文本和语种模式"))
642
+ with gr.Row():
643
+ text = gr.Textbox(label=i18n("需要合成的文本"), value="")
644
+ text_language = gr.Dropdown(
645
+ label=i18n("需要合成的语种"), choices=[i18n("中文"), i18n("英文"), i18n("日文"), i18n("中英混合"), i18n("日英混合"), i18n("多语种混合")], value=i18n("中文")
646
+ )
647
+ how_to_cut = gr.Radio(
648
+ label=i18n("怎么切"),
649
+ choices=[i18n("不切"), i18n("凑四句一切"), i18n("凑50字一切"), i18n("按中文句号。切"), i18n("按英文句号.切"), i18n("按标点符号切"), ],
650
+ value=i18n("凑四句一切"),
651
+ interactive=True,
652
+ )
653
+ with gr.Row():
654
+ gr.Markdown(value=i18n("gpt采样参数(无参考文本时不要太低):"))
655
+ top_k = gr.Slider(minimum=1,maximum=100,step=1,label=i18n("top_k"),value=5,interactive=True)
656
+ top_p = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("top_p"),value=1,interactive=True)
657
+ temperature = gr.Slider(minimum=0,maximum=1,step=0.05,label=i18n("temperature"),value=1,interactive=True)
658
+ inference_button = gr.Button(i18n("合成语音"), variant="primary")
659
+ output = gr.Audio(label=i18n("输出的语音"))
660
+
661
+ inference_button.click(
662
+ get_tts_wav,
663
+ [inp_ref, prompt_text, prompt_language, text, text_language, how_to_cut, top_k, top_p, temperature, ref_text_free],
664
+ [output],
665
+ )
666
+
667
+ gr.Markdown(value=i18n("文本切分工具。太长的文本合成出来效果不一定好,所以太长建议先切。合成会根据文本的换行分开合成再拼起来。"))
668
+ with gr.Row():
669
+ text_inp = gr.Textbox(label=i18n("需要合成的切分前文本"), value="")
670
+ button1 = gr.Button(i18n("凑四句一切"), variant="primary")
671
+ button2 = gr.Button(i18n("凑50字一切"), variant="primary")
672
+ button3 = gr.Button(i18n("按中文句号。切"), variant="primary")
673
+ button4 = gr.Button(i18n("按英文句号.切"), variant="primary")
674
+ button5 = gr.Button(i18n("按标点符号切"), variant="primary")
675
+ text_opt = gr.Textbox(label=i18n("切分后文本"), value="")
676
+ button1.click(cut1, [text_inp], [text_opt])
677
+ button2.click(cut2, [text_inp], [text_opt])
678
+ button3.click(cut3, [text_inp], [text_opt])
679
+ button4.click(cut4, [text_inp], [text_opt])
680
+ button5.click(cut5, [text_inp], [text_opt])
681
+ btn_style.click(vc_en, [inp_training_audio, style_control], [out_ref_audio])
682
+ gr.Markdown(value=i18n("后续将支持转音素、手工修改音素、语音合成分步执行。"))
683
+
684
+ app.queue(concurrency_count=511, max_size=1022).launch(
685
+ server_name="0.0.0.0",
686
+ inbrowser=True,
687
+ share=True,
688
+ server_port=infer_ttswebui,
689
+ quiet=True,
690
+ )